patch
stringlengths
17
31.2k
y
int64
1
1
oldf
stringlengths
0
2.21M
idx
int64
1
1
id
int64
4.29k
68.4k
msg
stringlengths
8
843
proj
stringclasses
212 values
lang
stringclasses
9 values
@@ -19,13 +19,14 @@ package org.openqa.selenium.remote; import com.google.common.collect.ImmutableMap; +import org.openqa.selenium.DeviceRotation; import org.openqa.selenium.Rotatable; import org.openqa.selenium.ScreenOrientation; import java.lang.reflect.Method; public class AddRotatable implements AugmenterProvider { - + public Class<?> getDescribedInterface() { return Rotatable.class; }
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package org.openqa.selenium.remote; import com.google.common.collect.ImmutableMap; import org.openqa.selenium.Rotatable; import org.openqa.selenium.ScreenOrientation; import java.lang.reflect.Method; public class AddRotatable implements AugmenterProvider { public Class<?> getDescribedInterface() { return Rotatable.class; } public InterfaceImplementation getImplementation(Object value) { return new InterfaceImplementation() { public Object invoke(ExecuteMethod executeMethod, Object self, Method method, Object... args) { if ("rotate".equals(method.getName())) { return executeMethod.execute(DriverCommand.SET_SCREEN_ORIENTATION, ImmutableMap.of("orientation", args[0])); } else if ("getOrientation".equals(method.getName())) { return ScreenOrientation.valueOf((String) executeMethod.execute( DriverCommand.GET_SCREEN_ORIENTATION, null)); } return null; } }; } }
1
13,511
you don't need to create a enum for this, java allows you to switch on a string ;) (since java 7?)
SeleniumHQ-selenium
rb
@@ -1955,15 +1955,7 @@ context 'Substitutions' do end test 'should passthrough math macro inside another passthrough' do - input = 'the text `asciimath:[x = y]` should be passed through as +literal+ text' - para = block_from_string input, attributes: { 'compat-mode' => '' } - assert_equal 'the text <code>asciimath:[x = y]</code> should be passed through as <code>literal</code> text', para.content - - input = 'the text [x-]`asciimath:[x = y]` should be passed through as `literal` text' - para = block_from_string input - assert_equal 'the text <code>asciimath:[x = y]</code> should be passed through as <code>literal</code> text', para.content - - input = 'the text `+asciimath:[x = y]+` should be passed through as `literal` text' + input = 'the text `++asciimath:[x = y]++` should be passed through as `literal` text' para = block_from_string input assert_equal 'the text <code>asciimath:[x = y]</code> should be passed through as <code>literal</code> text', para.content end
1
# frozen_string_literal: true require_relative 'test_helper' # TODO # - test negatives # - test role on every quote type context 'Substitutions' do BACKSLASH = ?\\ context 'Dispatcher' do test 'apply normal substitutions' do para = block_from_string("[blue]_http://asciidoc.org[AsciiDoc]_ & [red]*Ruby*\n&#167; Making +++<u>documentation</u>+++ together +\nsince (C) {inception_year}.") para.document.attributes['inception_year'] = '2012' result = para.apply_subs(para.source) assert_equal %{<em class="blue"><a href="http://asciidoc.org">AsciiDoc</a></em> &amp; <strong class="red">Ruby</strong>\n&#167; Making <u>documentation</u> together<br>\nsince &#169; 2012.}, result end test 'apply_subs should not modify string directly' do input = '<html> -- the root of all web' para = block_from_string input para_source = para.source result = para.apply_subs para_source assert_equal '&lt;html&gt;&#8201;&#8212;&#8201;the root of all web', result assert_equal input, para_source end test 'should not drop trailing blank lines when performing substitutions' do para = block_from_string %([%hardbreaks]\nthis\nis\n-> {program}) para.lines << '' para.lines << '' para.document.attributes['program'] = 'Asciidoctor' result = para.apply_subs(para.lines) assert_equal ['this<br>', 'is<br>', '&#8594; Asciidoctor<br>', '<br>', ''], result result = para.apply_subs(para.lines * "\n") assert_equal %(this<br>\nis<br>\n&#8594; Asciidoctor<br>\n<br>\n), result end test 'should expand subs passed to expand_subs' do para = block_from_string %({program}\n*bold*\n2 > 1) para.document.attributes['program'] = 'Asciidoctor' assert_equal [:specialcharacters], (para.expand_subs [:specialchars]) refute para.expand_subs([:none]) assert_equal [:specialcharacters, :quotes, :attributes, :replacements, :macros, :post_replacements], (para.expand_subs [:normal]) end test 'apply_subs should allow the subs argument to be nil' do block = block_from_string %([pass]\n*raw*) result = block.apply_subs block.source, nil assert_equal '*raw*', result end end context 'Quotes' do test 'single-line double-quoted string' do para = block_from_string(%q{``a few quoted words''}, attributes: { 'compat-mode' => '' }) assert_equal '&#8220;a few quoted words&#8221;', para.sub_quotes(para.source) para = block_from_string(%q{"`a few quoted words`"}) assert_equal '&#8220;a few quoted words&#8221;', para.sub_quotes(para.source) para = block_from_string(%q{"`a few quoted words`"}, backend: 'docbook') assert_equal '<quote>a few quoted words</quote>', para.sub_quotes(para.source) end test 'escaped single-line double-quoted string' do para = block_from_string %(#{BACKSLASH}``a few quoted words''), attributes: { 'compat-mode' => '' } assert_equal %q(&#8216;`a few quoted words&#8217;'), para.sub_quotes(para.source) para = block_from_string %(#{BACKSLASH * 2}``a few quoted words''), attributes: { 'compat-mode' => '' } assert_equal %q(``a few quoted words''), para.sub_quotes(para.source) para = block_from_string(%(#{BACKSLASH}"`a few quoted words`")) assert_equal %q("`a few quoted words`"), para.sub_quotes(para.source) para = block_from_string(%(#{BACKSLASH * 2}"`a few quoted words`")) assert_equal %(#{BACKSLASH}"`a few quoted words`"), para.sub_quotes(para.source) end test 'multi-line double-quoted string' do para = block_from_string(%Q{``a few\nquoted words''}, attributes: { 'compat-mode' => '' }) assert_equal "&#8220;a few\nquoted words&#8221;", para.sub_quotes(para.source) para = block_from_string(%Q{"`a few\nquoted words`"}) assert_equal "&#8220;a few\nquoted words&#8221;", para.sub_quotes(para.source) end test 'double-quoted string with inline single quote' do para = block_from_string(%q{``Here's Johnny!''}, attributes: { 'compat-mode' => '' }) assert_equal %q{&#8220;Here's Johnny!&#8221;}, para.sub_quotes(para.source) para = block_from_string(%q{"`Here's Johnny!`"}) assert_equal %q{&#8220;Here's Johnny!&#8221;}, para.sub_quotes(para.source) end test 'double-quoted string with inline backquote' do para = block_from_string(%q{``Here`s Johnny!''}, attributes: { 'compat-mode' => '' }) assert_equal %q{&#8220;Here`s Johnny!&#8221;}, para.sub_quotes(para.source) para = block_from_string(%q{"`Here`s Johnny!`"}) assert_equal %q{&#8220;Here`s Johnny!&#8221;}, para.sub_quotes(para.source) end test 'double-quoted string around monospaced text' do para = block_from_string(%q("``E=mc^2^` is the solution!`")) assert_equal %q(&#8220;`E=mc<sup>2</sup>` is the solution!&#8221;), para.apply_subs(para.source); para = block_from_string(%q("```E=mc^2^`` is the solution!`")) assert_equal %q(&#8220;<code>E=mc<sup>2</sup></code> is the solution!&#8221;), para.apply_subs(para.source); end test 'single-line single-quoted string' do para = block_from_string(%q{`a few quoted words'}, attributes: { 'compat-mode' => '' }) assert_equal '&#8216;a few quoted words&#8217;', para.sub_quotes(para.source) para = block_from_string(%q{'`a few quoted words`'}) assert_equal '&#8216;a few quoted words&#8217;', para.sub_quotes(para.source) para = block_from_string(%q{'`a few quoted words`'}, backend: 'docbook') assert_equal '<quote>a few quoted words</quote>', para.sub_quotes(para.source) end test 'escaped single-line single-quoted string' do para = block_from_string(%(#{BACKSLASH}`a few quoted words'), attributes: { 'compat-mode' => '' }) assert_equal %(`a few quoted words'), para.sub_quotes(para.source) para = block_from_string(%(#{BACKSLASH}'`a few quoted words`')) assert_equal %('`a few quoted words`'), para.sub_quotes(para.source) end test 'multi-line single-quoted string' do para = block_from_string(%Q{`a few\nquoted words'}, attributes: { 'compat-mode' => '' }) assert_equal "&#8216;a few\nquoted words&#8217;", para.sub_quotes(para.source) para = block_from_string(%Q{'`a few\nquoted words`'}) assert_equal "&#8216;a few\nquoted words&#8217;", para.sub_quotes(para.source) end test 'single-quoted string with inline single quote' do para = block_from_string(%q{`That isn't what I did.'}, attributes: { 'compat-mode' => '' }) assert_equal %q{&#8216;That isn't what I did.&#8217;}, para.sub_quotes(para.source) para = block_from_string(%q{'`That isn't what I did.`'}) assert_equal %q{&#8216;That isn't what I did.&#8217;}, para.sub_quotes(para.source) end test 'single-quoted string with inline backquote' do para = block_from_string(%q{`Here`s Johnny!'}, attributes: { 'compat-mode' => '' }) assert_equal %q{&#8216;Here`s Johnny!&#8217;}, para.sub_quotes(para.source) para = block_from_string(%q{'`Here`s Johnny!`'}) assert_equal %q{&#8216;Here`s Johnny!&#8217;}, para.sub_quotes(para.source) end test 'single-line constrained marked string' do #para = block_from_string(%q{#a few words#}, attributes: { 'compat-mode' => '' }) #assert_equal 'a few words', para.sub_quotes(para.source) para = block_from_string(%q{#a few words#}) assert_equal '<mark>a few words</mark>', para.sub_quotes(para.source) end test 'escaped single-line constrained marked string' do para = block_from_string(%(#{BACKSLASH}#a few words#)) assert_equal '#a few words#', para.sub_quotes(para.source) end test 'multi-line constrained marked string' do #para = block_from_string(%Q{#a few\nwords#}, attributes: { 'compat-mode' => '' }) #assert_equal "a few\nwords", para.sub_quotes(para.source) para = block_from_string(%Q{#a few\nwords#}) assert_equal "<mark>a few\nwords</mark>", para.sub_quotes(para.source) end test 'constrained marked string should not match entity references' do para = block_from_string('111 #mark a# 222 "`quote a`" 333 #mark b# 444') assert_equal %(111 <mark>mark a</mark> 222 &#8220;quote a&#8221; 333 <mark>mark b</mark> 444), para.sub_quotes(para.source) end test 'single-line unconstrained marked string' do #para = block_from_string(%q{##--anything goes ##}, attributes: { 'compat-mode' => '' }) #assert_equal '--anything goes ', para.sub_quotes(para.source) para = block_from_string(%q{##--anything goes ##}) assert_equal '<mark>--anything goes </mark>', para.sub_quotes(para.source) end test 'escaped single-line unconstrained marked string' do para = block_from_string(%(#{BACKSLASH}#{BACKSLASH}##--anything goes ##)) assert_equal '##--anything goes ##', para.sub_quotes(para.source) end test 'multi-line unconstrained marked string' do #para = block_from_string(%Q{##--anything\ngoes ##}, attributes: { 'compat-mode' => '' }) #assert_equal "--anything\ngoes ", para.sub_quotes(para.source) para = block_from_string(%Q{##--anything\ngoes ##}) assert_equal "<mark>--anything\ngoes </mark>", para.sub_quotes(para.source) end test 'single-line constrained marked string with role' do para = block_from_string(%q{[statement]#a few words#}) assert_equal '<span class="statement">a few words</span>', para.sub_quotes(para.source) end test 'single-line constrained strong string' do para = block_from_string(%q{*a few strong words*}) assert_equal '<strong>a few strong words</strong>', para.sub_quotes(para.source) end test 'escaped single-line constrained strong string' do para = block_from_string(%(#{BACKSLASH}*a few strong words*)) assert_equal '*a few strong words*', para.sub_quotes(para.source) end test 'multi-line constrained strong string' do para = block_from_string(%Q{*a few\nstrong words*}) assert_equal "<strong>a few\nstrong words</strong>", para.sub_quotes(para.source) end test 'constrained strong string containing an asterisk' do para = block_from_string(%q{*bl*ck*-eye}) assert_equal '<strong>bl*ck</strong>-eye', para.sub_quotes(para.source) end test 'constrained strong string containing an asterisk and multibyte word chars' do para = block_from_string(%q{*黑*眼圈*}) assert_equal '<strong>黑*眼圈</strong>', para.sub_quotes(para.source) end test 'single-line constrained quote variation emphasized string' do para = block_from_string(%q{_a few emphasized words_}) assert_equal '<em>a few emphasized words</em>', para.sub_quotes(para.source) end test 'escaped single-line constrained quote variation emphasized string' do para = block_from_string(%(#{BACKSLASH}_a few emphasized words_)) assert_equal %q(_a few emphasized words_), para.sub_quotes(para.source) end test 'escaped single quoted string' do para = block_from_string(%(#{BACKSLASH}'a few emphasized words')) # NOTE the \' is replaced with ' by the :replacements substitution, later in the substitution pipeline assert_equal %(#{BACKSLASH}'a few emphasized words'), para.sub_quotes(para.source) end test 'multi-line constrained emphasized quote variation string' do para = block_from_string(%Q{_a few\nemphasized words_}) assert_equal "<em>a few\nemphasized words</em>", para.sub_quotes(para.source) end test 'single-quoted string containing an emphasized phrase' do para = block_from_string(%q{`I told him, 'Just go for it!''}, attributes: { 'compat-mode' => '' }) assert_equal '&#8216;I told him, <em>Just go for it!</em>&#8217;', para.sub_quotes(para.source) para = block_from_string(%q{'`I told him, 'Just go for it!'`'}) assert_equal %q(&#8216;I told him, 'Just go for it!'&#8217;), para.sub_quotes(para.source) end test 'escaped single-quotes inside emphasized words are restored' do para = block_from_string(%('Here#{BACKSLASH}'s Johnny!'), attributes: { 'compat-mode' => '' }) assert_equal %q(<em>Here's Johnny!</em>), para.apply_subs(para.source) para = block_from_string(%('Here#{BACKSLASH}'s Johnny!')) assert_equal %q('Here's Johnny!'), para.apply_subs(para.source) end test 'single-line constrained emphasized underline variation string' do para = block_from_string(%q{_a few emphasized words_}) assert_equal '<em>a few emphasized words</em>', para.sub_quotes(para.source) end test 'escaped single-line constrained emphasized underline variation string' do para = block_from_string(%(#{BACKSLASH}_a few emphasized words_)) assert_equal '_a few emphasized words_', para.sub_quotes(para.source) end test 'multi-line constrained emphasized underline variation string' do para = block_from_string(%Q{_a few\nemphasized words_}) assert_equal "<em>a few\nemphasized words</em>", para.sub_quotes(para.source) end # NOTE must use apply_subs because constrained monospaced is handled as a passthrough test 'single-line constrained monospaced string' do para = block_from_string(%(`a few <{monospaced}> words`), attributes: { 'monospaced' => 'monospaced', 'compat-mode' => '' }) assert_equal '<code>a few &lt;{monospaced}&gt; words</code>', para.apply_subs(para.source) para = block_from_string(%(`a few <{monospaced}> words`), attributes: { 'monospaced' => 'monospaced' }) assert_equal '<code>a few &lt;monospaced&gt; words</code>', para.apply_subs(para.source) end # NOTE must use apply_subs because constrained monospaced is handled as a passthrough test 'single-line constrained monospaced string with role' do para = block_from_string(%([input]`a few <{monospaced}> words`), attributes: { 'monospaced' => 'monospaced', 'compat-mode' => '' }) assert_equal '<code class="input">a few &lt;{monospaced}&gt; words</code>', para.apply_subs(para.source) para = block_from_string(%([input]`a few <{monospaced}> words`), attributes: { 'monospaced' => 'monospaced' }) assert_equal '<code class="input">a few &lt;monospaced&gt; words</code>', para.apply_subs(para.source) end # NOTE must use apply_subs because constrained monospaced is handled as a passthrough test 'escaped single-line constrained monospaced string' do para = block_from_string(%(#{BACKSLASH}`a few <monospaced> words`), attributes: { 'compat-mode' => '' }) assert_equal '`a few &lt;monospaced&gt; words`', para.apply_subs(para.source) para = block_from_string(%(#{BACKSLASH}`a few <monospaced> words`)) assert_equal '`a few &lt;monospaced&gt; words`', para.apply_subs(para.source) end # NOTE must use apply_subs because constrained monospaced is handled as a passthrough test 'escaped single-line constrained monospaced string with role' do para = block_from_string(%([input]#{BACKSLASH}`a few <monospaced> words`), attributes: { 'compat-mode' => '' }) assert_equal '[input]`a few &lt;monospaced&gt; words`', para.apply_subs(para.source) para = block_from_string(%([input]#{BACKSLASH}`a few <monospaced> words`)) assert_equal '[input]`a few &lt;monospaced&gt; words`', para.apply_subs(para.source) end # NOTE must use apply_subs because constrained monospaced is handled as a passthrough test 'escaped role on single-line constrained monospaced string' do para = block_from_string(%(#{BACKSLASH}[input]`a few <monospaced> words`), attributes: { 'compat-mode' => '' }) assert_equal '[input]<code>a few &lt;monospaced&gt; words</code>', para.apply_subs(para.source) para = block_from_string(%(#{BACKSLASH}[input]`a few <monospaced> words`)) assert_equal '[input]<code>a few &lt;monospaced&gt; words</code>', para.apply_subs(para.source) end # NOTE must use apply_subs because constrained monospaced is handled as a passthrough test 'escaped role on escaped single-line constrained monospaced string' do para = block_from_string(%(#{BACKSLASH}[input]#{BACKSLASH}`a few <monospaced> words`), attributes: { 'compat-mode' => '' }) assert_equal %(#{BACKSLASH}[input]`a few &lt;monospaced&gt; words`), para.apply_subs(para.source) para = block_from_string(%(#{BACKSLASH}[input]#{BACKSLASH}`a few <monospaced> words`)) assert_equal %(#{BACKSLASH}[input]`a few &lt;monospaced&gt; words`), para.apply_subs(para.source) end # NOTE must use apply_subs because constrained monospaced is handled as a passthrough test 'multi-line constrained monospaced string' do para = block_from_string(%(`a few\n<{monospaced}> words`), attributes: { 'monospaced' => 'monospaced', 'compat-mode' => '' }) assert_equal "<code>a few\n&lt;{monospaced}&gt; words</code>", para.apply_subs(para.source) para = block_from_string(%(`a few\n<{monospaced}> words`), attributes: { 'monospaced' => 'monospaced' }) assert_equal "<code>a few\n&lt;monospaced&gt; words</code>", para.apply_subs(para.source) end test 'single-line unconstrained strong chars' do para = block_from_string(%q{**Git**Hub}) assert_equal '<strong>Git</strong>Hub', para.sub_quotes(para.source) end test 'escaped single-line unconstrained strong chars' do para = block_from_string(%(#{BACKSLASH}**Git**Hub)) assert_equal '<strong>*Git</strong>*Hub', para.sub_quotes(para.source) end test 'multi-line unconstrained strong chars' do para = block_from_string(%Q{**G\ni\nt\n**Hub}) assert_equal "<strong>G\ni\nt\n</strong>Hub", para.sub_quotes(para.source) end test 'unconstrained strong chars with inline asterisk' do para = block_from_string(%q{**bl*ck**-eye}) assert_equal '<strong>bl*ck</strong>-eye', para.sub_quotes(para.source) end test 'unconstrained strong chars with role' do para = block_from_string(%q{Git[blue]**Hub**}) assert_equal %q{Git<strong class="blue">Hub</strong>}, para.sub_quotes(para.source) end # TODO this is not the same result as AsciiDoc, though I don't understand why AsciiDoc gets what it gets test 'escaped unconstrained strong chars with role' do para = block_from_string(%(Git#{BACKSLASH}[blue]**Hub**)) assert_equal %q{Git[blue]<strong>*Hub</strong>*}, para.sub_quotes(para.source) end test 'single-line unconstrained emphasized chars' do para = block_from_string(%q{__Git__Hub}) assert_equal '<em>Git</em>Hub', para.sub_quotes(para.source) end test 'escaped single-line unconstrained emphasized chars' do para = block_from_string(%(#{BACKSLASH}__Git__Hub)) assert_equal '__Git__Hub', para.sub_quotes(para.source) end test 'escaped single-line unconstrained emphasized chars around word' do para = block_from_string(%(#{BACKSLASH}#{BACKSLASH}__GitHub__)) assert_equal '__GitHub__', para.sub_quotes(para.source) end test 'multi-line unconstrained emphasized chars' do para = block_from_string(%Q{__G\ni\nt\n__Hub}) assert_equal "<em>G\ni\nt\n</em>Hub", para.sub_quotes(para.source) end test 'unconstrained emphasis chars with role' do para = block_from_string(%q{[gray]__Git__Hub}) assert_equal %q{<em class="gray">Git</em>Hub}, para.sub_quotes(para.source) end test 'escaped unconstrained emphasis chars with role' do para = block_from_string(%(#{BACKSLASH}[gray]__Git__Hub)) assert_equal %q{[gray]__Git__Hub}, para.sub_quotes(para.source) end test 'single-line constrained monospaced chars' do para = block_from_string(%q{call +save()+ to persist the changes}, attributes: { 'compat-mode' => '' }) assert_equal 'call <code>save()</code> to persist the changes', para.sub_quotes(para.source) para = block_from_string(%q{call [x-]+save()+ to persist the changes}) assert_equal 'call <code>save()</code> to persist the changes', para.apply_subs(para.source) para = block_from_string(%q{call `save()` to persist the changes}) assert_equal 'call <code>save()</code> to persist the changes', para.sub_quotes(para.source) end test 'single-line constrained monospaced chars with role' do para = block_from_string(%q{call [method]+save()+ to persist the changes}, attributes: { 'compat-mode' => '' }) assert_equal 'call <code class="method">save()</code> to persist the changes', para.sub_quotes(para.source) para = block_from_string(%q{call [method x-]+save()+ to persist the changes}) assert_equal 'call <code class="method">save()</code> to persist the changes', para.apply_subs(para.source) para = block_from_string(%q{call [method]`save()` to persist the changes}) assert_equal 'call <code class="method">save()</code> to persist the changes', para.sub_quotes(para.source) end test 'escaped single-line constrained monospaced chars' do para = block_from_string(%(call #{BACKSLASH}+save()+ to persist the changes), attributes: { 'compat-mode' => '' }) assert_equal 'call +save()+ to persist the changes', para.sub_quotes(para.source) para = block_from_string(%(call #{BACKSLASH}`save()` to persist the changes)) assert_equal 'call `save()` to persist the changes', para.sub_quotes(para.source) end test 'escaped single-line constrained monospaced chars with role' do para = block_from_string(%(call [method]#{BACKSLASH}+save()+ to persist the changes), attributes: { 'compat-mode' => '' }) assert_equal 'call [method]+save()+ to persist the changes', para.sub_quotes(para.source) para = block_from_string(%(call [method]#{BACKSLASH}`save()` to persist the changes)) assert_equal 'call [method]`save()` to persist the changes', para.sub_quotes(para.source) end test 'escaped role on single-line constrained monospaced chars' do para = block_from_string(%(call #{BACKSLASH}[method]+save()+ to persist the changes), attributes: { 'compat-mode' => '' }) assert_equal 'call [method]<code>save()</code> to persist the changes', para.sub_quotes(para.source) para = block_from_string(%(call #{BACKSLASH}[method]`save()` to persist the changes)) assert_equal 'call [method]<code>save()</code> to persist the changes', para.sub_quotes(para.source) end test 'escaped role on escaped single-line constrained monospaced chars' do para = block_from_string(%(call #{BACKSLASH}[method]#{BACKSLASH}+save()+ to persist the changes), attributes: { 'compat-mode' => '' }) assert_equal %(call #{BACKSLASH}[method]+save()+ to persist the changes), para.sub_quotes(para.source) para = block_from_string(%(call #{BACKSLASH}[method]#{BACKSLASH}`save()` to persist the changes)) assert_equal %(call #{BACKSLASH}[method]`save()` to persist the changes), para.sub_quotes(para.source) end test 'single-line unconstrained monospaced chars' do para = block_from_string(%q{Git++Hub++}, attributes: { 'compat-mode' => '' }) assert_equal 'Git<code>Hub</code>', para.sub_quotes(para.source) para = block_from_string(%q{Git[x-]++Hub++}) assert_equal 'Git<code>Hub</code>', para.apply_subs(para.source) para = block_from_string(%q{Git``Hub``}) assert_equal 'Git<code>Hub</code>', para.sub_quotes(para.source) end test 'escaped single-line unconstrained monospaced chars' do para = block_from_string(%(Git#{BACKSLASH}++Hub++), attributes: { 'compat-mode' => '' }) assert_equal 'Git+<code>Hub</code>+', para.sub_quotes(para.source) para = block_from_string(%(Git#{BACKSLASH * 2}++Hub++), attributes: { 'compat-mode' => '' }) assert_equal 'Git++Hub++', para.sub_quotes(para.source) para = block_from_string(%(Git#{BACKSLASH}``Hub``)) assert_equal 'Git``Hub``', para.sub_quotes(para.source) end test 'multi-line unconstrained monospaced chars' do para = block_from_string(%Q{Git++\nH\nu\nb++}, attributes: { 'compat-mode' => '' }) assert_equal "Git<code>\nH\nu\nb</code>", para.sub_quotes(para.source) para = block_from_string(%Q{Git[x-]++\nH\nu\nb++}) assert_equal %(Git<code>\nH\nu\nb</code>), para.apply_subs(para.source) para = block_from_string(%Q{Git``\nH\nu\nb``}) assert_equal "Git<code>\nH\nu\nb</code>", para.sub_quotes(para.source) end test 'single-line superscript chars' do para = block_from_string(%(x^2^ = x * x, e = mc^2^, there's a 1^st^ time for everything)) assert_equal %(x<sup>2</sup> = x * x, e = mc<sup>2</sup>, there\'s a 1<sup>st</sup> time for everything), para.sub_quotes(para.source) end test 'escaped single-line superscript chars' do para = block_from_string(%(x#{BACKSLASH}^2^ = x * x)) assert_equal 'x^2^ = x * x', para.sub_quotes(para.source) end test 'does not match superscript across whitespace' do para = block_from_string(%Q{x^(n\n-\n1)^}) assert_equal para.source, para.sub_quotes(para.source) end test 'does not match adjacent superscript chars' do para = block_from_string 'a ^^ b' assert_equal 'a ^^ b', para.sub_quotes(para.source) end test 'does not confuse superscript and links with blank window shorthand' do para = block_from_string(%Q{http://localhost[Text^] on the 21^st^ and 22^nd^}) assert_equal '<a href="http://localhost" target="_blank" rel="noopener">Text</a> on the 21<sup>st</sup> and 22<sup>nd</sup>', para.content end test 'single-line subscript chars' do para = block_from_string(%q{H~2~O}) assert_equal 'H<sub>2</sub>O', para.sub_quotes(para.source) end test 'escaped single-line subscript chars' do para = block_from_string(%(H#{BACKSLASH}~2~O)) assert_equal 'H~2~O', para.sub_quotes(para.source) end test 'does not match subscript across whitespace' do para = block_from_string(%Q{project~ view\non\nGitHub~}) assert_equal para.source, para.sub_quotes(para.source) end test 'does not match adjacent subscript chars' do para = block_from_string 'a ~~ b' assert_equal 'a ~~ b', para.sub_quotes(para.source) end test 'does not match subscript across distinct URLs' do para = block_from_string(%Q{http://www.abc.com/~def[DEF] and http://www.abc.com/~ghi[GHI]}) assert_equal para.source, para.sub_quotes(para.source) end test 'quoted text with role shorthand' do para = block_from_string(%q{[.white.red-background]#alert#}) assert_equal '<span class="white red-background">alert</span>', para.sub_quotes(para.source) end test 'quoted text with id shorthand' do para = block_from_string(%q{[#bond]#007#}) assert_equal '<span id="bond">007</span>', para.sub_quotes(para.source) end test 'quoted text with id and role shorthand' do para = block_from_string(%q{[#bond.white.red-background]#007#}) assert_equal '<span id="bond" class="white red-background">007</span>', para.sub_quotes(para.source) end test 'quoted text with id and role shorthand using docbook backend' do para = block_from_string(%q{[#bond.white.red-background]#007#}, backend: 'docbook') assert_equal '<anchor xml:id="bond" xreflabel="007"/><phrase role="white red-background">007</phrase>', para.sub_quotes(para.source) end test 'should ignore attributes after comma' do para = block_from_string(%q{[red, foobar]#alert#}) assert_equal '<span class="red">alert</span>', para.sub_quotes(para.source) end test 'inline passthrough with id and role set using shorthand' do %w(#idname.rolename .rolename#idname).each do |attrlist| para = block_from_string %([#{attrlist}]+pass+) assert_equal '<span id="idname" class="rolename">pass</span>', para.content end end test 'should not assign role attribute if shorthand style has no roles' do para = block_from_string '[#idname]*blah*' assert_equal '<strong id="idname">blah</strong>', para.content end end context 'Macros' do test 'a single-line link macro should be interpreted as a link' do para = block_from_string('link:/home.html[]') assert_equal %q{<a href="/home.html" class="bare">/home.html</a>}, para.sub_macros(para.source) end test 'a single-line link macro with text should be interpreted as a link' do para = block_from_string('link:/home.html[Home]') assert_equal %q{<a href="/home.html">Home</a>}, para.sub_macros(para.source) end test 'a mailto macro should be interpreted as a mailto link' do para = block_from_string('mailto:[email protected][]') assert_equal %q{<a href="mailto:[email protected]">[email protected]</a>}, para.sub_macros(para.source) end test 'a mailto macro with text should be interpreted as a mailto link' do para = block_from_string('mailto:[email protected][Doc Writer]') assert_equal %q{<a href="mailto:[email protected]">Doc Writer</a>}, para.sub_macros(para.source) end test 'a mailto macro with text and subject should be interpreted as a mailto link' do para = block_from_string('mailto:[email protected][Doc Writer, Pull request]') assert_equal %q{<a href="mailto:[email protected]?subject=Pull+request">Doc Writer</a>}, para.sub_macros(para.source) end test 'a mailto macro with text, subject and body should be interpreted as a mailto link' do para = block_from_string('mailto:[email protected][Doc Writer, Pull request, Please accept my pull request]') assert_equal %q{<a href="mailto:[email protected]?subject=Pull+request&amp;body=Please+accept+my+pull+request">Doc Writer</a>}, para.sub_macros(para.source) end test 'a mailto macro with subject and body only should use e-mail as text' do para = block_from_string('mailto:[email protected][,Pull request,Please accept my pull request]') assert_equal %q{<a href="mailto:[email protected]?subject=Pull+request&amp;body=Please+accept+my+pull+request">[email protected]</a>}, para.sub_macros(para.source) end test 'should recognize inline email addresses' do %w( [email protected] [email protected] [email protected] [email protected] joe_bloggs@mail_server.com [email protected] [email protected] [email protected] [email protected] ).each do |input| para = block_from_string input assert_equal %(<a href="mailto:#{input}">#{input}</a>), (para.sub_macros para.source) end end test 'should recognize inline email address containing an ampersand' do para = block_from_string('bert&[email protected]') assert_equal %q{<a href="mailto:bert&amp;[email protected]">bert&amp;[email protected]</a>}, para.apply_subs(para.source) end test 'should recognize inline email address surrounded by angle brackets' do para = block_from_string('<[email protected]>') assert_equal %q{&lt;<a href="mailto:[email protected]">[email protected]</a>&gt;}, para.apply_subs(para.source) end test 'should ignore escaped inline email address' do para = block_from_string(%(#{BACKSLASH}[email protected])) assert_equal %q{[email protected]}, para.sub_macros(para.source) end test 'a single-line raw url should be interpreted as a link' do para = block_from_string('http://google.com') assert_equal %q{<a href="http://google.com" class="bare">http://google.com</a>}, para.sub_macros(para.source) end test 'a single-line raw url with text should be interpreted as a link' do para = block_from_string('http://google.com[Google]') assert_equal %q{<a href="http://google.com">Google</a>}, para.sub_macros(para.source) end test 'a multi-line raw url with text should be interpreted as a link' do para = block_from_string("http://google.com[Google\nHomepage]") assert_equal %{<a href="http://google.com">Google\nHomepage</a>}, para.sub_macros(para.source) end test 'a single-line raw url with attribute as text should be interpreted as a link with resolved attribute' do para = block_from_string("http://google.com[{google_homepage}]") para.document.attributes['google_homepage'] = 'Google Homepage' assert_equal %q{<a href="http://google.com">Google Homepage</a>}, para.sub_macros(para.sub_attributes(para.source)) end test 'should not resolve an escaped attribute in link text' do { 'http://google.com' => "http://google.com[#{BACKSLASH}{google_homepage}]", 'http://google.com?q=,' => "link:http://google.com?q=,[#{BACKSLASH}{google_homepage}]", }.each do |uri, macro| para = block_from_string macro para.document.attributes['google_homepage'] = 'Google Homepage' assert_equal %(<a href="#{uri}">{google_homepage}</a>), para.sub_macros(para.sub_attributes(para.source)) end end test 'a single-line escaped raw url should not be interpreted as a link' do para = block_from_string(%(#{BACKSLASH}http://google.com)) assert_equal %q{http://google.com}, para.sub_macros(para.source) end test 'a comma separated list of links should not include commas in links' do para = block_from_string('http://foo.com, http://bar.com, http://example.org') assert_equal %q{<a href="http://foo.com" class="bare">http://foo.com</a>, <a href="http://bar.com" class="bare">http://bar.com</a>, <a href="http://example.org" class="bare">http://example.org</a>}, para.sub_macros(para.source) end test 'a single-line image macro should be interpreted as an image' do para = block_from_string('image:tiger.png[]') assert_equal %{<span class="image"><img src="tiger.png" alt="tiger"></span>}, para.sub_macros(para.source).gsub(/>\s+</, '><') end test 'should replace underscore and hyphen with space in generated alt text for an inline image' do para = block_from_string('image:tiger-with-family_1.png[]') assert_equal %{<span class="image"><img src="tiger-with-family_1.png" alt="tiger with family 1"></span>}, para.sub_macros(para.source).gsub(/>\s+</, '><') end test 'a single-line image macro with text should be interpreted as an image with alt text' do para = block_from_string('image:tiger.png[Tiger]') assert_equal %{<span class="image"><img src="tiger.png" alt="Tiger"></span>}, para.sub_macros(para.source).gsub(/>\s+</, '><') end test 'should encode special characters in alt text of inline image' do input = 'A tiger\'s "roar" is < a bear\'s "growl"' expected = 'A tiger&#8217;s &quot;roar&quot; is &lt; a bear&#8217;s &quot;growl&quot;' output = (convert_inline_string %(image:tiger-roar.png[#{input}])).gsub(/>\s+</, '><') assert_equal %(<span class="image"><img src="tiger-roar.png" alt="#{expected}"></span>), output end test 'an image macro with SVG image and text should be interpreted as an image with alt text' do para = block_from_string('image:tiger.svg[Tiger]') assert_equal %{<span class="image"><img src="tiger.svg" alt="Tiger"></span>}, para.sub_macros(para.source).gsub(/>\s+</, '><') end test 'an image macro with an interactive SVG image and alt text should be converted to an object element' do para = block_from_string('image:tiger.svg[Tiger,opts=interactive]', safe: Asciidoctor::SafeMode::SERVER, attributes: { 'imagesdir' => 'images' }) assert_equal %{<span class="image"><object type="image/svg+xml" data="images/tiger.svg"><span class="alt">Tiger</span></object></span>}, para.sub_macros(para.source).gsub(/>\s+</, '><') end test 'an image macro with an interactive SVG image, fallback and alt text should be converted to an object element' do para = block_from_string('image:tiger.svg[Tiger,fallback=tiger.png,opts=interactive]', safe: Asciidoctor::SafeMode::SERVER, attributes: { 'imagesdir' => 'images' }) assert_equal %{<span class="image"><object type="image/svg+xml" data="images/tiger.svg"><img src="images/tiger.png" alt="Tiger"></object></span>}, para.sub_macros(para.source).gsub(/>\s+</, '><') end test 'an image macro with an inline SVG image should be converted to an svg element' do para = block_from_string('image:circle.svg[Tiger,100,opts=inline]', safe: Asciidoctor::SafeMode::SERVER, attributes: { 'imagesdir' => 'fixtures', 'docdir' => testdir }) result = para.sub_macros(para.source).gsub(/>\s+</, '><') assert_match(/<svg\s[^>]*width="100px"[^>]*>/, result) refute_match(/<svg\s[^>]*width="500px"[^>]*>/, result) refute_match(/<svg\s[^>]*height="500px"[^>]*>/, result) refute_match(/<svg\s[^>]*style="width:500px;height:500px"[^>]*>/, result) end test 'an image macro with an inline SVG image should be converted to an svg element even when data-uri is set' do para = block_from_string('image:circle.svg[Tiger,100,opts=inline]', safe: Asciidoctor::SafeMode::SERVER, attributes: { 'data-uri' => '', 'imagesdir' => 'fixtures', 'docdir' => testdir }) assert_match(/<svg\s[^>]*width="100px">/, para.sub_macros(para.source).gsub(/>\s+</, '><')) end test 'an image macro with an SVG image should not use an object element when safe mode is secure' do para = block_from_string('image:tiger.svg[Tiger,opts=interactive]', attributes: { 'imagesdir' => 'images' }) assert_equal %{<span class="image"><img src="images/tiger.svg" alt="Tiger"></span>}, para.sub_macros(para.source).gsub(/>\s+</, '><') end test 'a single-line image macro with text containing escaped square bracket should be interpreted as an image with alt text' do para = block_from_string(%(image:tiger.png[[Another#{BACKSLASH}] Tiger])) assert_equal %{<span class="image"><img src="tiger.png" alt="[Another] Tiger"></span>}, para.sub_macros(para.source).gsub(/>\s+</, '><') end test 'a single-line image macro with text and dimensions should be interpreted as an image with alt text and dimensions' do para = block_from_string('image:tiger.png[Tiger, 200, 100]') assert_equal %{<span class="image"><img src="tiger.png" alt="Tiger" width="200" height="100"></span>}, para.sub_macros(para.source).gsub(/>\s+</, '><') end test 'a single-line image macro with text and dimensions should be interpreted as an image with alt text and dimensions in docbook' do para = block_from_string 'image:tiger.png[Tiger, 200, 100]', backend: 'docbook' assert_equal %{<inlinemediaobject><imageobject><imagedata fileref="tiger.png" contentwidth="200" contentdepth="100"/></imageobject><textobject><phrase>Tiger</phrase></textobject></inlinemediaobject>}, para.sub_macros(para.source).gsub(/>\s+</, '><') end test 'a single-line image macro with text and link should be interpreted as a linked image with alt text' do para = block_from_string('image:tiger.png[Tiger, link="http://en.wikipedia.org/wiki/Tiger"]') assert_equal %{<span class="image"><a class="image" href="http://en.wikipedia.org/wiki/Tiger"><img src="tiger.png" alt="Tiger"></a></span>}, para.sub_macros(para.source).gsub(/>\s+</, '><') end test 'rel=noopener should be added to an image with a link that targets the _blank window' do para = block_from_string 'image:tiger.png[Tiger,link=http://en.wikipedia.org/wiki/Tiger,window=_blank]' assert_equal %{<span class="image"><a class="image" href="http://en.wikipedia.org/wiki/Tiger" target="_blank" rel="noopener"><img src="tiger.png" alt="Tiger"></a></span>}, para.sub_macros(para.source).gsub(/>\s+</, '><') end test 'rel=noopener should be added to an image with a link that targets a named window when the noopener option is set' do para = block_from_string 'image:tiger.png[Tiger,link=http://en.wikipedia.org/wiki/Tiger,window=name,opts=noopener]' assert_equal %{<span class="image"><a class="image" href="http://en.wikipedia.org/wiki/Tiger" target="name" rel="noopener"><img src="tiger.png" alt="Tiger"></a></span>}, para.sub_macros(para.source).gsub(/>\s+</, '><') end test 'rel=nofollow should be added to an image with a link when the nofollow option is set' do para = block_from_string 'image:tiger.png[Tiger,link=http://en.wikipedia.org/wiki/Tiger,opts=nofollow]' assert_equal %{<span class="image"><a class="image" href="http://en.wikipedia.org/wiki/Tiger" rel="nofollow"><img src="tiger.png" alt="Tiger"></a></span>}, para.sub_macros(para.source).gsub(/>\s+</, '><') end test 'a multi-line image macro with text and dimensions should be interpreted as an image with alt text and dimensions' do para = block_from_string(%(image:tiger.png[Another\nAwesome\nTiger, 200,\n100])) assert_equal %{<span class="image"><img src="tiger.png" alt="Another Awesome Tiger" width="200" height="100"></span>}, para.sub_macros(para.source).gsub(/>\s+</, '><') end test 'an inline image macro with a url target should be interpreted as an image' do para = block_from_string %(Beware of the image:http://example.com/images/tiger.png[tiger].) assert_equal %{Beware of the <span class="image"><img src="http://example.com/images/tiger.png" alt="tiger"></span>.}, para.sub_macros(para.source).gsub(/>\s+</, '><') end test 'an inline image macro with a float attribute should be interpreted as a floating image' do para = block_from_string %(image:http://example.com/images/tiger.png[tiger, float="right"] Beware of the tigers!) assert_equal %{<span class="image right"><img src="http://example.com/images/tiger.png" alt="tiger"></span> Beware of the tigers!}, para.sub_macros(para.source).gsub(/>\s+</, '><') end test 'should prepend value of imagesdir attribute to inline image target if target is relative path' do para = block_from_string %(Beware of the image:tiger.png[tiger].), attributes: { 'imagesdir' => './images' } assert_equal %{Beware of the <span class="image"><img src="./images/tiger.png" alt="tiger"></span>.}, para.sub_macros(para.source).gsub(/>\s+</, '><') end test 'should not prepend value of imagesdir attribute to inline image target if target is absolute path' do para = block_from_string %(Beware of the image:/tiger.png[tiger].), attributes: { 'imagesdir' => './images' } assert_equal %{Beware of the <span class="image"><img src="/tiger.png" alt="tiger"></span>.}, para.sub_macros(para.source).gsub(/>\s+</, '><') end test 'should not prepend value of imagesdir attribute to inline image target if target is url' do para = block_from_string %(Beware of the image:http://example.com/images/tiger.png[tiger].), attributes: { 'imagesdir' => './images' } assert_equal %{Beware of the <span class="image"><img src="http://example.com/images/tiger.png" alt="tiger"></span>.}, para.sub_macros(para.source).gsub(/>\s+</, '><') end test 'should match an inline image macro if target contains a space character' do para = block_from_string(%(Beware of the image:big cats.png[] around here.)) assert_equal %(Beware of the <span class="image"><img src="big%20cats.png" alt="big cats"></span> around here.), para.sub_macros(para.source).gsub(/>\s+</, '><') end test 'should not match an inline image macro if target contains a newline character' do para = block_from_string(%(Fear not. There are no image:big\ncats.png[] around here.)) result = para.sub_macros(para.source) refute_includes result, '<img ' assert_includes result, %(image:big\ncats.png[]) end test 'should not match an inline image macro if target begins or ends with space character' do ['image: big cats.png[]', 'image:big cats.png []'].each do |input| para = block_from_string %(Fear not. There are no #{input} around here.) result = para.sub_macros(para.source) refute_includes result, '<img ' assert_includes result, input end end test 'should not detect a block image macro found inline' do para = block_from_string(%(Not an inline image macro image::tiger.png[].)) result = para.sub_macros(para.source) refute_includes result, '<img ' assert_includes result, 'image::tiger.png[]' end # NOTE this test verifies attributes get substituted eagerly in target of image in title test 'should substitute attributes in target of inline image in section title' do input = '== image:{iconsdir}/dot.gif[dot] Title' using_memory_logger do |logger| sect = block_from_string input, attributes: { 'data-uri' => '', 'iconsdir' => 'fixtures', 'docdir' => testdir }, safe: :server, catalog_assets: true assert 1, sect.document.catalog[:images].size assert_equal 'fixtures/dot.gif', sect.document.catalog[:images][0].to_s assert_nil sect.document.catalog[:images][0].imagesdir assert logger.empty? end end test 'an icon macro should be interpreted as an icon if icons are enabled' do para = block_from_string 'icon:github[]', attributes: { 'icons' => '' } assert_equal %{<span class="icon"><img src="./images/icons/github.png" alt="github"></span>}, para.sub_macros(para.source).gsub(/>\s+</, '><') end test 'an icon macro should be interpreted as alt text if icons are disabled' do para = block_from_string 'icon:github[]' assert_equal %{<span class="icon">[github]</span>}, para.sub_macros(para.source).gsub(/>\s+</, '><') end test 'an icon macro should output alt text if icons are disabled and alt is given' do para = block_from_string 'icon:github[alt="GitHub"]' assert_equal %{<span class="icon">[GitHub]</span>}, para.sub_macros(para.source).gsub(/>\s+</, '><') end test 'an icon macro should be interpreted as a font-based icon when icons=font' do para = block_from_string 'icon:github[]', attributes: { 'icons' => 'font' } assert_equal %{<span class="icon"><i class="fa fa-github"></i></span>}, para.sub_macros(para.source).gsub(/>\s+</, '><') end test 'an icon macro with a size should be interpreted as a font-based icon with a size when icons=font' do para = block_from_string 'icon:github[4x]', attributes: { 'icons' => 'font' } assert_equal %{<span class="icon"><i class="fa fa-github fa-4x"></i></span>}, para.sub_macros(para.source).gsub(/>\s+</, '><') end test 'an icon macro with a role and title should be interpreted as a font-based icon with a class and title when icons=font' do para = block_from_string 'icon:heart[role="red", title="Heart me"]', attributes: { 'icons' => 'font' } assert_equal %{<span class="icon red"><i class="fa fa-heart" title="Heart me"></i></span>}, para.sub_macros(para.source).gsub(/>\s+</, '><') end test 'a single-line footnote macro should be registered and output as a footnote' do para = block_from_string('Sentence text footnote:[An example footnote.].') assert_equal %(Sentence text <sup class="footnote">[<a id="_footnoteref_1" class="footnote" href="#_footnotedef_1" title="View footnote.">1</a>]</sup>.), para.sub_macros(para.source) assert_equal 1, para.document.catalog[:footnotes].size footnote = para.document.catalog[:footnotes].first assert_equal 1, footnote.index assert_nil footnote.id assert_equal 'An example footnote.', footnote.text end test 'a multi-line footnote macro should be registered and output as a footnote without newline' do para = block_from_string("Sentence text footnote:[An example footnote\nwith wrapped text.].") assert_equal %(Sentence text <sup class="footnote">[<a id="_footnoteref_1" class="footnote" href="#_footnotedef_1" title="View footnote.">1</a>]</sup>.), para.sub_macros(para.source) assert_equal 1, para.document.catalog[:footnotes].size footnote = para.document.catalog[:footnotes].first assert_equal 1, footnote.index assert_nil footnote.id assert_equal "An example footnote with wrapped text.", footnote.text end test 'an escaped closing square bracket in a footnote should be unescaped when converted' do para = block_from_string(%(footnote:[a #{BACKSLASH}] b].)) assert_equal %(<sup class="footnote">[<a id="_footnoteref_1" class="footnote" href="#_footnotedef_1" title="View footnote.">1</a>]</sup>.), para.sub_macros(para.source) assert_equal 1, para.document.catalog[:footnotes].size footnote = para.document.catalog[:footnotes].first assert_equal "a ] b", footnote.text end test 'a footnote macro can be directly adjacent to preceding word' do para = block_from_string('Sentence textfootnote:[An example footnote.].') assert_equal %(Sentence text<sup class="footnote">[<a id="_footnoteref_1" class="footnote" href="#_footnotedef_1" title="View footnote.">1</a>]</sup>.), para.sub_macros(para.source) end test 'a footnote macro may contain an escaped backslash' do para = block_from_string("footnote:[\\]]\nfootnote:[a \\] b]\nfootnote:[a \\]\\] b]") para.sub_macros(para.source) assert_equal 3, para.document.catalog[:footnotes].size footnote1 = para.document.catalog[:footnotes][0] assert_equal ']', footnote1.text footnote2 = para.document.catalog[:footnotes][1] assert_equal 'a ] b', footnote2.text footnote3 = para.document.catalog[:footnotes][2] assert_equal 'a ]] b', footnote3.text end test 'a footnote macro may contain a link macro' do para = block_from_string('Share your code. footnote:[https://github.com[GitHub]]') assert_equal %(Share your code. <sup class="footnote">[<a id="_footnoteref_1" class="footnote" href="#_footnotedef_1" title="View footnote.">1</a>]</sup>), para.sub_macros(para.source) assert_equal 1, para.document.catalog[:footnotes].size footnote1 = para.document.catalog[:footnotes][0] assert_equal '<a href="https://github.com">GitHub</a>', footnote1.text end test 'a footnote macro may contain a plain URL' do para = block_from_string %(the JLine footnote:[https://github.com/jline/jline2]\nlibrary.) result = para.sub_macros para.source assert_equal %(the JLine <sup class="footnote">[<a id="_footnoteref_1" class="footnote" href="#_footnotedef_1" title="View footnote.">1</a>]</sup>\nlibrary.), result assert_equal 1, para.document.catalog[:footnotes].size fn1 = para.document.catalog[:footnotes].first assert_equal '<a href="https://github.com/jline/jline2" class="bare">https://github.com/jline/jline2</a>', fn1.text end test 'a footnote macro followed by a semi-colon may contain a plain URL' do para = block_from_string %(the JLine footnote:[https://github.com/jline/jline2];\nlibrary.) result = para.sub_macros para.source assert_equal %(the JLine <sup class="footnote">[<a id="_footnoteref_1" class="footnote" href="#_footnotedef_1" title="View footnote.">1</a>]</sup>;\nlibrary.), result assert_equal 1, para.document.catalog[:footnotes].size fn1 = para.document.catalog[:footnotes].first assert_equal '<a href="https://github.com/jline/jline2" class="bare">https://github.com/jline/jline2</a>', fn1.text end test 'a footnote macro may contain a shorthand xref' do # specialcharacters escaping is simulated para = block_from_string('text footnote:[&lt;&lt;_install,install&gt;&gt;]') doc = para.document doc.register :refs, ['_install', (Asciidoctor::Inline.new doc, :anchor, 'Install', type: :ref, target: '_install'), 'Install'] catalog = doc.catalog assert_equal %(text <sup class="footnote">[<a id="_footnoteref_1" class="footnote" href="#_footnotedef_1" title="View footnote.">1</a>]</sup>), para.sub_macros(para.source) assert_equal 1, catalog[:footnotes].size footnote1 = catalog[:footnotes][0] assert_equal '<a href="#_install">install</a>', footnote1.text end test 'a footnote macro may contain an xref macro' do para = block_from_string('text footnote:[xref:_install[install]]') doc = para.document doc.register :refs, ['_install', (Asciidoctor::Inline.new doc, :anchor, 'Install', type: :ref, target: '_install'), 'Install'] catalog = doc.catalog assert_equal %(text <sup class="footnote">[<a id="_footnoteref_1" class="footnote" href="#_footnotedef_1" title="View footnote.">1</a>]</sup>), para.sub_macros(para.source) assert_equal 1, catalog[:footnotes].size footnote1 = catalog[:footnotes][0] assert_equal '<a href="#_install">install</a>', footnote1.text end test 'a footnote macro may contain an anchor macro' do para = block_from_string('text footnote:[a [[b]] [[c\]\] d]') assert_equal %(text <sup class="footnote">[<a id="_footnoteref_1" class="footnote" href="#_footnotedef_1" title="View footnote.">1</a>]</sup>), para.sub_macros(para.source) assert_equal 1, para.document.catalog[:footnotes].size footnote1 = para.document.catalog[:footnotes][0] assert_equal 'a <a id="b"></a> [[c]] d', footnote1.text end test 'subsequent footnote macros with escaped URLs should be restored in DocBook' do input = 'foofootnote:[+http://example.com+]barfootnote:[+http://acme.com+]baz' result = convert_string_to_embedded input, doctype: 'inline', backend: 'docbook' assert_equal 'foo<footnote><simpara>http://example.com</simpara></footnote>bar<footnote><simpara>http://acme.com</simpara></footnote>baz', result end test 'should increment index of subsequent footnote macros' do para = block_from_string("Sentence text footnote:[An example footnote.]. Sentence text footnote:[Another footnote.].") assert_equal %(Sentence text <sup class="footnote">[<a id="_footnoteref_1" class="footnote" href="#_footnotedef_1" title="View footnote.">1</a>]</sup>. Sentence text <sup class="footnote">[<a id="_footnoteref_2" class="footnote" href="#_footnotedef_2" title="View footnote.">2</a>]</sup>.), para.sub_macros(para.source) assert_equal 2, para.document.catalog[:footnotes].size footnote1 = para.document.catalog[:footnotes][0] assert_equal 1, footnote1.index assert_nil footnote1.id assert_equal "An example footnote.", footnote1.text footnote2 = para.document.catalog[:footnotes][1] assert_equal 2, footnote2.index assert_nil footnote2.id assert_equal "Another footnote.", footnote2.text end test 'a footnoteref macro with id and single-line text should be registered and output as a footnote' do para = block_from_string 'Sentence text footnoteref:[ex1, An example footnote.].', attributes: { 'compat-mode' => '' } assert_equal %(Sentence text <sup class="footnote" id="_footnote_ex1">[<a id="_footnoteref_1" class="footnote" href="#_footnotedef_1" title="View footnote.">1</a>]</sup>.), para.sub_macros(para.source) assert_equal 1, para.document.catalog[:footnotes].size footnote = para.document.catalog[:footnotes].first assert_equal 1, footnote.index assert_equal 'ex1', footnote.id assert_equal 'An example footnote.', footnote.text end test 'a footnoteref macro with id and multi-line text should be registered and output as a footnote without newlines' do para = block_from_string "Sentence text footnoteref:[ex1, An example footnote\nwith wrapped text.].", attributes: { 'compat-mode' => '' } assert_equal %(Sentence text <sup class="footnote" id="_footnote_ex1">[<a id="_footnoteref_1" class="footnote" href="#_footnotedef_1" title="View footnote.">1</a>]</sup>.), para.sub_macros(para.source) assert_equal 1, para.document.catalog[:footnotes].size footnote = para.document.catalog[:footnotes].first assert_equal 1, footnote.index assert_equal 'ex1', footnote.id assert_equal "An example footnote with wrapped text.", footnote.text end test 'a footnoteref macro with id should refer to footnoteref with same id' do para = block_from_string 'Sentence text footnoteref:[ex1, An example footnote.]. Sentence text footnoteref:[ex1].', attributes: { 'compat-mode' => '' } assert_equal %(Sentence text <sup class="footnote" id="_footnote_ex1">[<a id="_footnoteref_1" class="footnote" href="#_footnotedef_1" title="View footnote.">1</a>]</sup>. Sentence text <sup class="footnoteref">[<a class="footnote" href="#_footnotedef_1" title="View footnote.">1</a>]</sup>.), para.sub_macros(para.source) assert_equal 1, para.document.catalog[:footnotes].size footnote = para.document.catalog[:footnotes].first assert_equal 1, footnote.index assert_equal 'ex1', footnote.id assert_equal 'An example footnote.', footnote.text end test 'an unresolved footnote reference should produce a warning message' do input = 'Sentence text.footnote:ex1[]' using_memory_logger do |logger| para = block_from_string input para.sub_macros para.source assert_message logger, :WARN, 'invalid footnote reference: ex1' end end test 'using a footnoteref macro should generate a warning when compat mode is not enabled' do input = 'Sentence text.footnoteref:[fn1,Commentary on this sentence.]' using_memory_logger do |logger| para = block_from_string input para.sub_macros para.source assert_message logger, :WARN, 'found deprecated footnoteref macro: footnoteref:[fn1,Commentary on this sentence.]; use footnote macro with target instead' end end test 'inline footnote macro can be used to define and reference a footnote reference' do input = <<~'EOS' You can download the software from the product page.footnote:sub[Option only available if you have an active subscription.] You can also file a support request.footnote:sub[] If all else fails, you can give us a call.footnoteref:[sub] EOS using_memory_logger do |logger| output = convert_string_to_embedded input, attributes: { 'compat-mode' => '' } assert_css '#_footnotedef_1', output, 1 assert_css 'p a[href="#_footnotedef_1"]', output, 3 assert_css '#footnotes .footnote', output, 1 assert logger.empty? end end test 'should parse multiple footnote references in a single line' do input = 'notable text.footnote:id[about this [text\]], footnote:id[], footnote:id[]' output = convert_string_to_embedded input assert_xpath '(//p)[1]/sup[starts-with(@class,"footnote")]', output, 3 assert_xpath '(//p)[1]/sup[@class="footnote"]', output, 1 assert_xpath '(//p)[1]/sup[@class="footnoteref"]', output, 2 assert_xpath '(//p)[1]/sup[starts-with(@class,"footnote")]/a[@class="footnote"][text()="1"]', output, 3 assert_css '#footnotes .footnote', output, 1 end test 'should not resolve an inline footnote macro missing both id and text' do input = <<~'EOS' The footnote:[] macro can be used for defining and referencing footnotes. The footnoteref:[] macro is now deprecated. EOS output = convert_string_to_embedded input assert_includes output, 'The footnote:[] macro' assert_includes output, 'The footnoteref:[] macro' end test 'inline footnote macro can define a numeric id without conflicting with auto-generated ID' do input = 'You can download the software from the product page.footnote:1[Option only available if you have an active subscription.]' output = convert_string_to_embedded input assert_css '#_footnote_1', output, 1 assert_css 'p sup#_footnote_1', output, 1 assert_css 'p a#_footnoteref_1', output, 1 assert_css 'p a[href="#_footnotedef_1"]', output, 1 assert_css '#footnotes #_footnotedef_1', output, 1 end test 'inline footnote macro can define an id that uses any word characters in Unicode' do input = <<~'EOS' L'origine du mot forêt{blank}footnote:forêt[un massif forestier] est complexe. Qu'est-ce qu'une forêt ?{blank}footnote:forêt[] EOS output = convert_string_to_embedded input assert_css '#_footnote_forêt', output, 1 assert_css '#_footnotedef_1', output, 1 assert_xpath '//a[@class="footnote"][text()="1"]', output, 2 end test 'should be able to reference a bibliography entry in a footnote' do input = <<~'EOS' Choose a design pattern.footnote:[See <<gof>> to find a collection of design patterns.] [bibliography] == Bibliography * [[[gof]]] Erich Gamma, et al. _Design Patterns: Elements of Reusable Object-Oriented Software._ Addison-Wesley. 1994. EOS result = convert_string_to_embedded input assert_include '<a href="#_footnoteref_1">1</a>. See <a href="#gof">[gof]</a> to find a collection of design patterns.', result end test 'a single-line index term macro with a primary term should be registered as an index reference' do sentence = "The tiger (Panthera tigris) is the largest cat species.\n" macros = ['indexterm:[Tigers]', '(((Tigers)))'] macros.each do |macro| para = block_from_string("#{sentence}#{macro}") output = para.sub_macros(para.source) assert_equal sentence, output #assert_equal 1, para.document.catalog[:indexterms].size #assert_equal ['Tigers'], para.document.catalog[:indexterms].first end end test 'a single-line index term macro with primary and secondary terms should be registered as an index reference' do sentence = "The tiger (Panthera tigris) is the largest cat species.\n" macros = ['indexterm:[Big cats, Tigers]', '(((Big cats, Tigers)))'] macros.each do |macro| para = block_from_string("#{sentence}#{macro}") output = para.sub_macros(para.source) assert_equal sentence, output #assert_equal 1, para.document.catalog[:indexterms].size #assert_equal ['Big cats', 'Tigers'], para.document.catalog[:indexterms].first end end test 'a single-line index term macro with primary, secondary and tertiary terms should be registered as an index reference' do sentence = "The tiger (Panthera tigris) is the largest cat species.\n" macros = ['indexterm:[Big cats,Tigers , Panthera tigris]', '(((Big cats,Tigers , Panthera tigris)))'] macros.each do |macro| para = block_from_string("#{sentence}#{macro}") output = para.sub_macros(para.source) assert_equal sentence, output #assert_equal 1, para.document.catalog[:indexterms].size #assert_equal ['Big cats', 'Tigers', 'Panthera tigris'], para.document.catalog[:indexterms].first end end test 'a multi-line index term macro should be compacted and registered as an index reference' do sentence = "The tiger (Panthera tigris) is the largest cat species.\n" macros = ["indexterm:[Panthera\ntigris]", "(((Panthera\ntigris)))"] macros.each do |macro| para = block_from_string("#{sentence}#{macro}") output = para.sub_macros(para.source) assert_equal sentence, output #assert_equal 1, para.document.catalog[:indexterms].size #assert_equal ['Panthera tigris'], para.document.catalog[:indexterms].first end end test 'should escape concealed index term if second bracket is preceded by a backslash' do input = %[National Institute of Science and Technology (#{BACKSLASH}((NIST)))] doc = document_from_string input, standalone: false output = doc.convert assert_xpath '//p[text()="National Institute of Science and Technology (((NIST)))"]', output, 1 #assert doc.catalog[:indexterms].empty? end test 'should only escape enclosing brackets if concealed index term is preceded by a backslash' do input = %[National Institute of Science and Technology #{BACKSLASH}(((NIST)))] doc = document_from_string input, standalone: false output = doc.convert assert_xpath '//p[text()="National Institute of Science and Technology (NIST)"]', output, 1 #term = doc.catalog[:indexterms].first #assert_equal 1, term.size #assert_equal 'NIST', term.first end test 'should not split index terms on commas inside of quoted terms' do inputs = [] inputs.push <<~'EOS' Tigers are big, scary cats. indexterm:[Tigers, "[Big\], scary cats"] EOS inputs.push <<~'EOS' Tigers are big, scary cats. (((Tigers, "[Big], scary cats"))) EOS inputs.each do |input| para = block_from_string input output = para.sub_macros(para.source) assert_equal input.lines.first, output #assert_equal 1, para.document.catalog[:indexterms].size #terms = para.document.catalog[:indexterms].first #assert_equal 2, terms.size #assert_equal 'Tigers', terms.first #assert_equal '[Big], scary cats', terms.last end end test 'normal substitutions are performed on an index term macro' do sentence = "The tiger (Panthera tigris) is the largest cat species.\n" macros = ['indexterm:[*Tigers*]', '(((*Tigers*)))'] macros.each do |macro| para = block_from_string("#{sentence}#{macro}") output = para.apply_subs(para.source) assert_equal sentence, output #assert_equal 1, para.document.catalog[:indexterms].size #assert_equal ['<strong>Tigers</strong>'], para.document.catalog[:indexterms].first end end test 'registers multiple index term macros' do sentence = "The tiger (Panthera tigris) is the largest cat species." macros = "(((Tigers)))\n(((Animals,Cats)))" para = block_from_string("#{sentence}\n#{macros}") output = para.sub_macros(para.source) assert_equal sentence, output.rstrip #assert_equal 2, para.document.catalog[:indexterms].size #assert_equal ['Tigers'], para.document.catalog[:indexterms][0] #assert_equal ['Animals', 'Cats'], para.document.catalog[:indexterms][1] end test 'an index term macro with round bracket syntax may contain round brackets in term' do sentence = "The tiger (Panthera tigris) is the largest cat species.\n" macro = '(((Tiger (Panthera tigris))))' para = block_from_string("#{sentence}#{macro}") output = para.sub_macros(para.source) assert_equal sentence, output #assert_equal 1, para.document.catalog[:indexterms].size #assert_equal ['Tiger (Panthera tigris)'], para.document.catalog[:indexterms].first end test 'visible shorthand index term macro should not consume trailing round bracket' do input = '(text with ((index term)))' expected = <<~'EOS'.chop (text with <indexterm> <primary>index term</primary> </indexterm>index term) EOS #expected_term = ['index term'] para = block_from_string input, backend: :docbook output = para.sub_macros para.source assert_equal expected, output #indexterms_table = para.document.catalog[:indexterms] #assert_equal 1, indexterms_table.size #assert_equal expected_term, indexterms_table[0] end test 'visible shorthand index term macro should not consume leading round bracket' do input = '(((index term)) for text)' expected = <<~'EOS'.chop (<indexterm> <primary>index term</primary> </indexterm>index term for text) EOS #expected_term = ['index term'] para = block_from_string input, backend: :docbook output = para.sub_macros para.source assert_equal expected, output #indexterms_table = para.document.catalog[:indexterms] #assert_equal 1, indexterms_table.size #assert_equal expected_term, indexterms_table[0] end test 'an index term macro with square bracket syntax may contain square brackets in term' do sentence = "The tiger (Panthera tigris) is the largest cat species.\n" macro = 'indexterm:[Tiger [Panthera tigris\\]]' para = block_from_string("#{sentence}#{macro}") output = para.sub_macros(para.source) assert_equal sentence, output #assert_equal 1, para.document.catalog[:indexterms].size #assert_equal ['Tiger [Panthera tigris]'], para.document.catalog[:indexterms].first end test 'a single-line index term 2 macro should be registered as an index reference and retain term inline' do sentence = 'The tiger (Panthera tigris) is the largest cat species.' macros = ['The indexterm2:[tiger] (Panthera tigris) is the largest cat species.', 'The ((tiger)) (Panthera tigris) is the largest cat species.'] macros.each do |macro| para = block_from_string(macro) output = para.sub_macros(para.source) assert_equal sentence, output #assert_equal 1, para.document.catalog[:indexterms].size #assert_equal ['tiger'], para.document.catalog[:indexterms].first end end test 'a multi-line index term 2 macro should be compacted and registered as an index reference and retain term inline' do sentence = 'The panthera tigris is the largest cat species.' macros = ["The indexterm2:[ panthera\ntigris ] is the largest cat species.", "The (( panthera\ntigris )) is the largest cat species."] macros.each do |macro| para = block_from_string(macro) output = para.sub_macros(para.source) assert_equal sentence, output #assert_equal 1, para.document.catalog[:indexterms].size #assert_equal ['panthera tigris'], para.document.catalog[:indexterms].first end end test 'registers multiple index term 2 macros' do sentence = "The ((tiger)) (Panthera tigris) is the largest ((cat)) species." para = block_from_string(sentence) output = para.sub_macros(para.source) assert_equal 'The tiger (Panthera tigris) is the largest cat species.', output #assert_equal 2, para.document.catalog[:indexterms].size #assert_equal ['tiger'], para.document.catalog[:indexterms][0] #assert_equal ['cat'], para.document.catalog[:indexterms][1] end test 'should escape visible index term if preceded by a backslash' do sentence = "The #{BACKSLASH}((tiger)) (Panthera tigris) is the largest #{BACKSLASH}((cat)) species." para = block_from_string(sentence) output = para.sub_macros(para.source) assert_equal 'The ((tiger)) (Panthera tigris) is the largest ((cat)) species.', output #assert para.document.catalog[:indexterms].empty? end test 'normal substitutions are performed on an index term 2 macro' do sentence = 'The ((*tiger*)) (Panthera tigris) is the largest cat species.' para = block_from_string sentence output = para.apply_subs(para.source) assert_equal 'The <strong>tiger</strong> (Panthera tigris) is the largest cat species.', output #assert_equal 1, para.document.catalog[:indexterms].size #assert_equal ['<strong>tiger</strong>'], para.document.catalog[:indexterms].first end test 'index term 2 macro with round bracket syntex should not interfer with index term macro with round bracket syntax' do sentence = "The ((panthera tigris)) is the largest cat species.\n(((Big cats,Tigers)))" para = block_from_string sentence output = para.sub_macros(para.source) assert_equal "The panthera tigris is the largest cat species.\n", output #terms = para.document.catalog[:indexterms] #assert_equal 2, terms.size #assert_equal ['panthera tigris'], terms[0] #assert_equal ['Big cats', 'Tigers'], terms[1] end test 'should parse visible shorthand index term with see and seealso' do sentence = '((Flash >> HTML 5)) has been supplanted by ((HTML 5 &> CSS 3 &> SVG)).' output = convert_string_to_embedded sentence, backend: 'docbook' indexterm_flash = <<~'EOS'.chop <indexterm> <primary>Flash</primary> <see>HTML 5</see> </indexterm> EOS indexterm_html5 = <<~'EOS'.chop <indexterm> <primary>HTML 5</primary> <seealso>CSS 3</seealso> <seealso>SVG</seealso> </indexterm> EOS assert_includes output, indexterm_flash assert_includes output, indexterm_html5 end test 'should parse concealed shorthand index term with see and seealso' do sentence = 'Flash(((Flash >> HTML 5))) has been supplanted by HTML 5(((HTML 5 &> CSS 3 &> SVG))).' output = convert_string_to_embedded sentence, backend: 'docbook' indexterm_flash = <<~'EOS'.chop <indexterm> <primary>Flash</primary> <see>HTML 5</see> </indexterm> EOS indexterm_html5 = <<~'EOS'.chop <indexterm> <primary>HTML 5</primary> <seealso>CSS 3</seealso> <seealso>SVG</seealso> </indexterm> EOS assert_includes output, indexterm_flash assert_includes output, indexterm_html5 end test 'should parse visible index term macro with see and seealso' do sentence = 'indexterm2:[Flash,see=HTML 5] has been supplanted by indexterm2:[HTML 5,see-also="CSS 3, SVG"].' output = convert_string_to_embedded sentence, backend: 'docbook' indexterm_flash = <<~'EOS'.chop <indexterm> <primary>Flash</primary> <see>HTML 5</see> </indexterm> EOS indexterm_html5 = <<~'EOS'.chop <indexterm> <primary>HTML 5</primary> <seealso>CSS 3</seealso> <seealso>SVG</seealso> </indexterm> EOS assert_includes output, indexterm_flash assert_includes output, indexterm_html5 end test 'should parse concealed index term macro with see and seealso' do sentence = 'Flashindexterm:[Flash,see=HTML 5] has been supplanted by HTML 5indexterm:[HTML 5,see-also="CSS 3, SVG"].' output = convert_string_to_embedded sentence, backend: 'docbook' indexterm_flash = <<~'EOS'.chop <indexterm> <primary>Flash</primary> <see>HTML 5</see> </indexterm> EOS indexterm_html5 = <<~'EOS'.chop <indexterm> <primary>HTML 5</primary> <seealso>CSS 3</seealso> <seealso>SVG</seealso> </indexterm> EOS assert_includes output, indexterm_flash assert_includes output, indexterm_html5 end context 'Button macro' do test 'btn macro' do para = block_from_string('btn:[Save]', attributes: { 'experimental' => '' }) assert_equal %q{<b class="button">Save</b>}, para.sub_macros(para.source) end test 'btn macro that spans multiple lines' do para = block_from_string(%(btn:[Rebase and\nmerge]), attributes: { 'experimental' => '' }) assert_equal %q{<b class="button">Rebase and merge</b>}, para.sub_macros(para.source) end test 'btn macro for docbook backend' do para = block_from_string('btn:[Save]', backend: 'docbook', attributes: { 'experimental' => '' }) assert_equal %q{<guibutton>Save</guibutton>}, para.sub_macros(para.source) end end context 'Keyboard macro' do test 'kbd macro with single key' do para = block_from_string('kbd:[F3]', attributes: { 'experimental' => '' }) assert_equal %q{<kbd>F3</kbd>}, para.sub_macros(para.source) end test 'kbd macro with single backslash key' do para = block_from_string("kbd:[#{BACKSLASH} ]", attributes: { 'experimental' => '' }) assert_equal %q(<kbd>\</kbd>), para.sub_macros(para.source) end test 'kbd macro with single key, docbook backend' do para = block_from_string('kbd:[F3]', backend: 'docbook', attributes: { 'experimental' => '' }) assert_equal %q{<keycap>F3</keycap>}, para.sub_macros(para.source) end test 'kbd macro with key combination' do para = block_from_string('kbd:[Ctrl+Shift+T]', attributes: { 'experimental' => '' }) assert_equal %q{<span class="keyseq"><kbd>Ctrl</kbd>+<kbd>Shift</kbd>+<kbd>T</kbd></span>}, para.sub_macros(para.source) end test 'kbd macro with key combination that spans multiple lines' do para = block_from_string(%(kbd:[Ctrl +\nT]), attributes: { 'experimental' => '' }) assert_equal %q{<span class="keyseq"><kbd>Ctrl</kbd>+<kbd>T</kbd></span>}, para.sub_macros(para.source) end test 'kbd macro with key combination, docbook backend' do para = block_from_string('kbd:[Ctrl+Shift+T]', backend: 'docbook', attributes: { 'experimental' => '' }) assert_equal %q{<keycombo><keycap>Ctrl</keycap><keycap>Shift</keycap><keycap>T</keycap></keycombo>}, para.sub_macros(para.source) end test 'kbd macro with key combination delimited by pluses with spaces' do para = block_from_string('kbd:[Ctrl + Shift + T]', attributes: { 'experimental' => '' }) assert_equal %q{<span class="keyseq"><kbd>Ctrl</kbd>+<kbd>Shift</kbd>+<kbd>T</kbd></span>}, para.sub_macros(para.source) end test 'kbd macro with key combination delimited by commas' do para = block_from_string('kbd:[Ctrl,Shift,T]', attributes: { 'experimental' => '' }) assert_equal %q{<span class="keyseq"><kbd>Ctrl</kbd>+<kbd>Shift</kbd>+<kbd>T</kbd></span>}, para.sub_macros(para.source) end test 'kbd macro with key combination delimited by commas with spaces' do para = block_from_string('kbd:[Ctrl, Shift, T]', attributes: { 'experimental' => '' }) assert_equal %q{<span class="keyseq"><kbd>Ctrl</kbd>+<kbd>Shift</kbd>+<kbd>T</kbd></span>}, para.sub_macros(para.source) end test 'kbd macro with key combination delimited by plus containing a comma key' do para = block_from_string('kbd:[Ctrl+,]', attributes: { 'experimental' => '' }) assert_equal %q{<span class="keyseq"><kbd>Ctrl</kbd>+<kbd>,</kbd></span>}, para.sub_macros(para.source) end test 'kbd macro with key combination delimited by commas containing a plus key' do para = block_from_string('kbd:[Ctrl, +, Shift]', attributes: { 'experimental' => '' }) assert_equal %q{<span class="keyseq"><kbd>Ctrl</kbd>+<kbd>+</kbd>+<kbd>Shift</kbd></span>}, para.sub_macros(para.source) end test 'kbd macro with key combination where last key matches plus delimiter' do para = block_from_string('kbd:[Ctrl + +]', attributes: { 'experimental' => '' }) assert_equal %q{<span class="keyseq"><kbd>Ctrl</kbd>+<kbd>+</kbd></span>}, para.sub_macros(para.source) end test 'kbd macro with key combination where last key matches comma delimiter' do para = block_from_string('kbd:[Ctrl, ,]', attributes: { 'experimental' => '' }) assert_equal %q{<span class="keyseq"><kbd>Ctrl</kbd>+<kbd>,</kbd></span>}, para.sub_macros(para.source) end test 'kbd macro with key combination containing escaped bracket' do para = block_from_string('kbd:[Ctrl + \]]', attributes: { 'experimental' => '' }) assert_equal %q{<span class="keyseq"><kbd>Ctrl</kbd>+<kbd>]</kbd></span>}, para.sub_macros(para.source) end test 'kbd macro with key combination ending in backslash' do para = block_from_string("kbd:[Ctrl + #{BACKSLASH} ]", attributes: { 'experimental' => '' }) assert_equal %q(<span class="keyseq"><kbd>Ctrl</kbd>+<kbd>\\</kbd></span>), para.sub_macros(para.source) end test 'kbd macro looks for delimiter beyond first character' do para = block_from_string('kbd:[,te]', attributes: { 'experimental' => '' }) assert_equal %q(<kbd>,te</kbd>), para.sub_macros(para.source) end test 'kbd macro restores trailing delimiter as key value' do para = block_from_string('kbd:[te,]', attributes: { 'experimental' => '' }) assert_equal %q(<kbd>te,</kbd>), para.sub_macros(para.source) end end context 'Menu macro' do test 'should process menu using macro sytnax' do para = block_from_string('menu:File[]', attributes: { 'experimental' => '' }) assert_equal %q{<b class="menuref">File</b>}, para.sub_macros(para.source) end test 'should process menu for docbook backend' do para = block_from_string('menu:File[]', backend: 'docbook', attributes: { 'experimental' => '' }) assert_equal %q{<guimenu>File</guimenu>}, para.sub_macros(para.source) end test 'should process multiple menu macros in same line' do para = block_from_string('menu:File[] and menu:Edit[]', attributes: { 'experimental' => '' }) assert_equal '<b class="menuref">File</b> and <b class="menuref">Edit</b>', para.sub_macros(para.source) end test 'should process menu with menu item using macro syntax' do para = block_from_string('menu:File[Save As&#8230;]', attributes: { 'experimental' => '' }) assert_equal %q{<span class="menuseq"><b class="menu">File</b>&#160;<b class="caret">&#8250;</b> <b class="menuitem">Save As&#8230;</b></span>}, para.sub_macros(para.source) end test 'should process menu macro that spans multiple lines' do input = %(menu:Preferences[Compile\non\nSave]) para = block_from_string input, attributes: { 'experimental' => '' } assert_equal %(<span class="menuseq"><b class="menu">Preferences</b>&#160;<b class="caret">&#8250;</b> <b class="menuitem">Compile\non\nSave</b></span>), para.sub_macros(para.source) end test 'should unescape escaped closing bracket in menu macro' do input = 'menu:Preferences[Compile [on\\] Save]' para = block_from_string input, attributes: { 'experimental' => '' } assert_equal %q(<span class="menuseq"><b class="menu">Preferences</b>&#160;<b class="caret">&#8250;</b> <b class="menuitem">Compile [on] Save</b></span>), para.sub_macros(para.source) end test 'should process menu with menu item using macro syntax when fonts icons are enabled' do para = block_from_string('menu:Tools[More Tools &gt; Extensions]', attributes: { 'experimental' => '', 'icons' => 'font' }) assert_equal %q{<span class="menuseq"><b class="menu">Tools</b>&#160;<i class="fa fa-angle-right caret"></i> <b class="submenu">More Tools</b>&#160;<i class="fa fa-angle-right caret"></i> <b class="menuitem">Extensions</b></span>}, para.sub_macros(para.source) end test 'should process menu with menu item for docbook backend' do para = block_from_string('menu:File[Save As&#8230;]', backend: 'docbook', attributes: { 'experimental' => '' }) assert_equal %q{<menuchoice><guimenu>File</guimenu> <guimenuitem>Save As&#8230;</guimenuitem></menuchoice>}, para.sub_macros(para.source) end test 'should process menu with menu item in submenu using macro syntax' do para = block_from_string('menu:Tools[Project &gt; Build]', attributes: { 'experimental' => '' }) assert_equal %q{<span class="menuseq"><b class="menu">Tools</b>&#160;<b class="caret">&#8250;</b> <b class="submenu">Project</b>&#160;<b class="caret">&#8250;</b> <b class="menuitem">Build</b></span>}, para.sub_macros(para.source) end test 'should process menu with menu item in submenu for docbook backend' do para = block_from_string('menu:Tools[Project &gt; Build]', backend: 'docbook', attributes: { 'experimental' => '' }) assert_equal %q{<menuchoice><guimenu>Tools</guimenu> <guisubmenu>Project</guisubmenu> <guimenuitem>Build</guimenuitem></menuchoice>}, para.sub_macros(para.source) end test 'should process menu with menu item in submenu using macro syntax and comma delimiter' do para = block_from_string('menu:Tools[Project, Build]', attributes: { 'experimental' => '' }) assert_equal %q{<span class="menuseq"><b class="menu">Tools</b>&#160;<b class="caret">&#8250;</b> <b class="submenu">Project</b>&#160;<b class="caret">&#8250;</b> <b class="menuitem">Build</b></span>}, para.sub_macros(para.source) end test 'should process menu with menu item using inline syntax' do para = block_from_string('"File &gt; Save As&#8230;"', attributes: { 'experimental' => '' }) assert_equal %q{<span class="menuseq"><b class="menu">File</b>&#160;<b class="caret">&#8250;</b> <b class="menuitem">Save As&#8230;</b></span>}, para.sub_macros(para.source) end test 'should process menu with menu item in submenu using inline syntax' do para = block_from_string('"Tools &gt; Project &gt; Build"', attributes: { 'experimental' => '' }) assert_equal %q{<span class="menuseq"><b class="menu">Tools</b>&#160;<b class="caret">&#8250;</b> <b class="submenu">Project</b>&#160;<b class="caret">&#8250;</b> <b class="menuitem">Build</b></span>}, para.sub_macros(para.source) end test 'inline menu syntax should not match closing quote of XML attribute' do para = block_from_string('<span class="xmltag">&lt;node&gt;</span><span class="classname">r</span>', attributes: { 'experimental' => '' }) assert_equal %q{<span class="xmltag">&lt;node&gt;</span><span class="classname">r</span>}, para.sub_macros(para.source) end test 'should process menu macro with items containing multibyte characters' do para = block_from_string('menu:视图[放大, 重置]', attributes: { 'experimental' => '' }) assert_equal %q{<span class="menuseq"><b class="menu">视图</b>&#160;<b class="caret">&#8250;</b> <b class="submenu">放大</b>&#160;<b class="caret">&#8250;</b> <b class="menuitem">重置</b></span>}, para.sub_macros(para.source) end test 'should process inline menu with items containing multibyte characters' do para = block_from_string('"视图 &gt; 放大 &gt; 重置"', attributes: { 'experimental' => '' }) assert_equal %q{<span class="menuseq"><b class="menu">视图</b>&#160;<b class="caret">&#8250;</b> <b class="submenu">放大</b>&#160;<b class="caret">&#8250;</b> <b class="menuitem">重置</b></span>}, para.sub_macros(para.source) end test 'should process a menu macro with a target that begins with a character reference' do para = block_from_string('menu:&#8942;[More Tools, Extensions]', attributes: { 'experimental' => '' }) assert_equal %q{<span class="menuseq"><b class="menu">&#8942;</b>&#160;<b class="caret">&#8250;</b> <b class="submenu">More Tools</b>&#160;<b class="caret">&#8250;</b> <b class="menuitem">Extensions</b></span>}, para.sub_macros(para.source) end test 'should not process a menu macro with a target that ends with a space' do input = 'menu:foo [bar] menu:File[Save]' para = block_from_string input, attributes: { 'experimental' => '' } result = para.sub_macros para.source assert_xpath '/span[@class="menuseq"]', result, 1 assert_xpath '//b[@class="menu"][text()="File"]', result, 1 end test 'should process an inline menu that begins with a character reference' do para = block_from_string('"&#8942; &gt; More Tools &gt; Extensions"', attributes: { 'experimental' => '' }) assert_equal %q{<span class="menuseq"><b class="menu">&#8942;</b>&#160;<b class="caret">&#8250;</b> <b class="submenu">More Tools</b>&#160;<b class="caret">&#8250;</b> <b class="menuitem">Extensions</b></span>}, para.sub_macros(para.source) end end end context 'Passthroughs' do test 'collect inline triple plus passthroughs' do para = block_from_string('+++<code>inline code</code>+++') result = para.extract_passthroughs(para.source) passthroughs = para.instance_variable_get :@passthroughs assert_equal Asciidoctor::Substitutors::PASS_START + '0' + Asciidoctor::Substitutors::PASS_END, result assert_equal 1, passthroughs.size assert_equal '<code>inline code</code>', passthroughs[0][:text] assert_empty passthroughs[0][:subs] end test 'collect multi-line inline triple plus passthroughs' do para = block_from_string("+++<code>inline\ncode</code>+++") result = para.extract_passthroughs(para.source) passthroughs = para.instance_variable_get :@passthroughs assert_equal Asciidoctor::Substitutors::PASS_START + '0' + Asciidoctor::Substitutors::PASS_END, result assert_equal 1, passthroughs.size assert_equal "<code>inline\ncode</code>", passthroughs[0][:text] assert_empty passthroughs[0][:subs] end test 'collect inline double dollar passthroughs' do para = block_from_string('$$<code>{code}</code>$$') result = para.extract_passthroughs(para.source) passthroughs = para.instance_variable_get :@passthroughs assert_equal Asciidoctor::Substitutors::PASS_START + '0' + Asciidoctor::Substitutors::PASS_END, result assert_equal 1, passthroughs.size assert_equal '<code>{code}</code>', passthroughs[0][:text] assert_equal [:specialcharacters], passthroughs[0][:subs] end test 'collect inline double plus passthroughs' do para = block_from_string('++<code>{code}</code>++') result = para.extract_passthroughs(para.source) passthroughs = para.instance_variable_get :@passthroughs assert_equal Asciidoctor::Substitutors::PASS_START + '0' + Asciidoctor::Substitutors::PASS_END, result assert_equal 1, passthroughs.size assert_equal '<code>{code}</code>', passthroughs[0][:text] assert_equal [:specialcharacters], passthroughs[0][:subs] end test 'should not crash if role on passthrough is enclosed in quotes' do %W( ['role']#{BACKSLASH}++This++++++++++++ ['role']#{BACKSLASH}+++++++++This++++++++++++ ).each do |input| para = block_from_string input assert_includes para.content, %(<span class="'role'">) end end test 'should allow inline double plus passthrough to be escaped using backslash' do para = block_from_string("you need to replace `int a = n#{BACKSLASH}++;` with `int a = ++n;`!") result = para.apply_subs para.source assert_equal 'you need to replace <code>int a = n++;</code> with <code>int a = ++n;</code>!', result end test 'should allow inline double plus passthrough with attributes to be escaped using backslash' do para = block_from_string("=[attrs]#{BACKSLASH}#{BACKSLASH}++text++") result = para.apply_subs para.source assert_equal '=[attrs]++text++', result end test 'collect multi-line inline double dollar passthroughs' do para = block_from_string("$$<code>\n{code}\n</code>$$") result = para.extract_passthroughs(para.source) passthroughs = para.instance_variable_get :@passthroughs assert_equal Asciidoctor::Substitutors::PASS_START + '0' + Asciidoctor::Substitutors::PASS_END, result assert_equal 1, passthroughs.size assert_equal "<code>\n{code}\n</code>", passthroughs[0][:text] assert_equal [:specialcharacters], passthroughs[0][:subs] end test 'collect multi-line inline double plus passthroughs' do para = block_from_string("++<code>\n{code}\n</code>++") result = para.extract_passthroughs(para.source) passthroughs = para.instance_variable_get :@passthroughs assert_equal Asciidoctor::Substitutors::PASS_START + '0' + Asciidoctor::Substitutors::PASS_END, result assert_equal 1, passthroughs.size assert_equal "<code>\n{code}\n</code>", passthroughs[0][:text] assert_equal [:specialcharacters], passthroughs[0][:subs] end test 'collect passthroughs from inline pass macro' do para = block_from_string(%Q{pass:specialcharacters,quotes[<code>['code'\\]</code>]}) result = para.extract_passthroughs(para.source) passthroughs = para.instance_variable_get :@passthroughs assert_equal Asciidoctor::Substitutors::PASS_START + '0' + Asciidoctor::Substitutors::PASS_END, result assert_equal 1, passthroughs.size assert_equal %q{<code>['code']</code>}, passthroughs[0][:text] assert_equal [:specialcharacters, :quotes], passthroughs[0][:subs] end test 'collect multi-line passthroughs from inline pass macro' do para = block_from_string(%Q{pass:specialcharacters,quotes[<code>['more\ncode'\\]</code>]}) result = para.extract_passthroughs(para.source) passthroughs = para.instance_variable_get :@passthroughs assert_equal Asciidoctor::Substitutors::PASS_START + '0' + Asciidoctor::Substitutors::PASS_END, result assert_equal 1, passthroughs.size assert_equal %Q{<code>['more\ncode']</code>}, passthroughs[0][:text] assert_equal [:specialcharacters, :quotes], passthroughs[0][:subs] end test 'should find and replace placeholder duplicated by substitution' do input = %q(+first passthrough+ followed by link:$$http://example.com/__u_no_format_me__$$[] with passthrough) result = convert_inline_string input assert_equal 'first passthrough followed by <a href="http://example.com/__u_no_format_me__" class="bare">http://example.com/__u_no_format_me__</a> with passthrough', result end test 'resolves sub shorthands on inline pass macro' do para = block_from_string 'pass:q,a[*<{backend}>*]' result = para.extract_passthroughs para.source passthroughs = para.instance_variable_get :@passthroughs assert_equal 1, passthroughs.size assert_equal [:quotes, :attributes], passthroughs[0][:subs] result = para.restore_passthroughs result assert_equal '<strong><html5></strong>', result end test 'inline pass macro supports incremental subs' do para = block_from_string 'pass:n,-a[<{backend}>]' result = para.extract_passthroughs para.source passthroughs = para.instance_variable_get :@passthroughs assert_equal 1, passthroughs.size result = para.restore_passthroughs result assert_equal '&lt;{backend}&gt;', result end test 'should not recognize pass macro with invalid subsitution list' do [',', '42', 'a,'].each do |subs| para = block_from_string %(pass:#{subs}[foobar]) result = para.extract_passthroughs para.source assert_equal %(pass:#{subs}[foobar]), result end end test 'should allow content of inline pass macro to be empty' do para = block_from_string 'pass:[]' result = para.extract_passthroughs para.source passthroughs = para.instance_variable_get :@passthroughs assert_equal 1, passthroughs.size assert_equal '', para.restore_passthroughs(result) end # NOTE placeholder is surrounded by text to prevent reader from stripping trailing boundary char (unique to test scenario) test 'restore inline passthroughs without subs' do para = block_from_string("some #{Asciidoctor::Substitutors::PASS_START}" + '0' + "#{Asciidoctor::Substitutors::PASS_END} to study") para.extract_passthroughs '' passthroughs = para.instance_variable_get :@passthroughs passthroughs[0] = { text: '<code>inline code</code>', subs: [] } result = para.restore_passthroughs(para.source) assert_equal "some <code>inline code</code> to study", result end # NOTE placeholder is surrounded by text to prevent reader from stripping trailing boundary char (unique to test scenario) test 'restore inline passthroughs with subs' do para = block_from_string("some #{Asciidoctor::Substitutors::PASS_START}" + '0' + "#{Asciidoctor::Substitutors::PASS_END} to study in the #{Asciidoctor::Substitutors::PASS_START}" + '1' + "#{Asciidoctor::Substitutors::PASS_END} programming language") para.extract_passthroughs '' passthroughs = para.instance_variable_get :@passthroughs passthroughs[0] = { text: '<code>{code}</code>', subs: [:specialcharacters] } passthroughs[1] = { text: '{language}', subs: [:specialcharacters] } result = para.restore_passthroughs(para.source) assert_equal 'some &lt;code&gt;{code}&lt;/code&gt; to study in the {language} programming language', result end test 'should restore nested passthroughs' do result = convert_inline_string %q(+Sometimes you feel pass:q[`mono`].+ Sometimes you +$$don't$$+.) assert_equal %q(Sometimes you feel <code>mono</code>. Sometimes you don't.), result end test 'should not fail to restore remaining passthroughs after processing inline passthrough with macro substitution' do input = 'pass:m[.] pass:[.]' assert_equal '. .', (convert_inline_string input) end test 'should honor role on double plus passthrough' do result = convert_inline_string 'Print the version using [var]++{asciidoctor-version}++.' assert_equal 'Print the version using <span class="var">{asciidoctor-version}</span>.', result end test 'complex inline passthrough macro' do text_to_escape = %q{[(] <'basic form'> <'logical operator'> <'basic form'> [)]} para = block_from_string %($$#{text_to_escape}$$) para.extract_passthroughs(para.source) passthroughs = para.instance_variable_get :@passthroughs assert_equal 1, passthroughs.size assert_equal text_to_escape, passthroughs[0][:text] text_to_escape_escaped = %q{[(\] <'basic form'> <'logical operator'> <'basic form'> [)\]} para = block_from_string %(pass:specialcharacters[#{text_to_escape_escaped}]) para.extract_passthroughs(para.source) passthroughs = para.instance_variable_get :@passthroughs assert_equal 1, passthroughs.size assert_equal text_to_escape, passthroughs[0][:text] end test 'inline pass macro with a composite sub' do para = block_from_string %(pass:verbatim[<{backend}>]) assert_equal '&lt;{backend}&gt;', para.content end context 'Math macros' do test 'should passthrough text in asciimath macro and surround with AsciiMath delimiters' do input = 'asciimath:[x/x={(1,if x!=0),(text{undefined},if x=0):}]' para = block_from_string input assert_equal '\$x/x={(1,if x!=0),(text{undefined},if x=0):}\$', para.content end test 'should not recognize asciimath macro with no content' do input = 'asciimath:[]' para = block_from_string input assert_equal 'asciimath:[]', para.content end test 'should perform specialcharacters subs on asciimath macro content in html backend by default' do input = 'asciimath:[a < b]' para = block_from_string input assert_equal '\$a &lt; b\$', para.content end test 'should convert contents of asciimath macro to MathML in DocBook output if asciimath gem is available' do asciimath_available = !(Asciidoctor::Helpers.require_library 'asciimath', true, :ignore).nil? input = 'asciimath:[a < b]' expected = '<inlineequation><mml:math xmlns:mml="http://www.w3.org/1998/Math/MathML"><mml:mi>a</mml:mi><mml:mo>&lt;</mml:mo><mml:mi>b</mml:mi></mml:math></inlineequation>' using_memory_logger do |logger| para = block_from_string input, backend: :docbook actual = para.content if asciimath_available assert_equal expected, actual assert_equal :loaded, para.document.converter.instance_variable_get(:@asciimath_status) else assert_message logger, :WARN, 'optional gem \'asciimath\' is not available. Functionality disabled.' assert_equal :unavailable, para.document.converter.instance_variable_get(:@asciimath_status) end end end test 'should not perform specialcharacters subs on asciimath macro content in Docbook output if asciimath gem not available' do asciimath_available = !(Asciidoctor::Helpers.require_library 'asciimath', true, :ignore).nil? input = 'asciimath:[a < b]' para = block_from_string input, backend: :docbook para.document.converter.instance_variable_set :@asciimath_status, :unavailable if asciimath_available old_asciimath = ::AsciiMath Object.send :remove_const, 'AsciiMath' end assert_equal '<inlineequation><mathphrase><![CDATA[a < b]]></mathphrase></inlineequation>', para.content ::AsciiMath = old_asciimath if asciimath_available end test 'should honor explicit subslist on asciimath macro' do input = 'asciimath:attributes[{expr}]' para = block_from_string input, attributes: { 'expr' => 'x != 0' } assert_equal '\$x != 0\$', para.content end test 'should passthrough text in latexmath macro and surround with LaTeX math delimiters' do input = 'latexmath:[C = \alpha + \beta Y^{\gamma} + \epsilon]' para = block_from_string input assert_equal '\(C = \alpha + \beta Y^{\gamma} + \epsilon\)', para.content end test 'should strip legacy LaTeX math delimiters around latexmath content if present' do input = 'latexmath:[$C = \alpha + \beta Y^{\gamma} + \epsilon$]' para = block_from_string input assert_equal '\(C = \alpha + \beta Y^{\gamma} + \epsilon\)', para.content end test 'should not recognize latexmath macro with no content' do input = 'latexmath:[]' para = block_from_string input assert_equal 'latexmath:[]', para.content end test 'should unescape escaped square bracket in equation' do input = 'latexmath:[\sqrt[3\]{x}]' para = block_from_string input assert_equal '\(\sqrt[3]{x}\)', para.content end test 'should perform specialcharacters subs on latexmath macro in html backend by default' do input = 'latexmath:[a < b]' para = block_from_string input assert_equal '\(a &lt; b\)', para.content end test 'should not perform specialcharacters subs on latexmath macro content in docbook backend by default' do input = 'latexmath:[a < b]' para = block_from_string input, backend: :docbook assert_equal '<inlineequation><alt><![CDATA[a < b]]></alt><mathphrase><![CDATA[a < b]]></mathphrase></inlineequation>', para.content end test 'should honor explicit subslist on latexmath macro' do input = 'latexmath:attributes[{expr}]' para = block_from_string input, attributes: { 'expr' => '\sqrt{4} = 2' } assert_equal '\(\sqrt{4} = 2\)', para.content end test 'should passthrough math macro inside another passthrough' do input = 'the text `asciimath:[x = y]` should be passed through as +literal+ text' para = block_from_string input, attributes: { 'compat-mode' => '' } assert_equal 'the text <code>asciimath:[x = y]</code> should be passed through as <code>literal</code> text', para.content input = 'the text [x-]`asciimath:[x = y]` should be passed through as `literal` text' para = block_from_string input assert_equal 'the text <code>asciimath:[x = y]</code> should be passed through as <code>literal</code> text', para.content input = 'the text `+asciimath:[x = y]+` should be passed through as `literal` text' para = block_from_string input assert_equal 'the text <code>asciimath:[x = y]</code> should be passed through as <code>literal</code> text', para.content end test 'should support attrlist on a literal monospace phrase' do input = '[.baz]`+foo--bar+`' para = block_from_string input assert_equal '<code class="baz">foo--bar</code>', para.content end test 'should not process an escaped passthrough macro inside a monospaced phrase' do input = 'use the `\pass:c[]` macro' para = block_from_string input assert_equal 'use the <code>pass:c[]</code> macro', para.content end test 'should not process an escaped passthrough macro inside a monospaced phrase with attributes' do input = 'use the [syntax]`\pass:c[]` macro' para = block_from_string input assert_equal 'use the <code class="syntax">pass:c[]</code> macro', para.content end test 'should honor an escaped single plus passthrough inside a monospaced phrase' do input = 'use `\+{author}+` to show an attribute reference' para = block_from_string input assert_equal 'use <code>+{author}+</code> to show an attribute reference', para.content end test 'should not recognize stem macro with no content' do input = 'stem:[]' para = block_from_string input assert_equal input, para.content end test 'should passthrough text in stem macro and surround with AsciiMath delimiters if stem attribute is asciimath, empty, or not set' do [ {}, { 'stem' => '' }, { 'stem' => 'asciimath' }, { 'stem' => 'bogus' }, ].each do |attributes| input = 'stem:[x/x={(1,if x!=0),(text{undefined},if x=0):}]' para = block_from_string input, attributes: attributes assert_equal '\$x/x={(1,if x!=0),(text{undefined},if x=0):}\$', para.content end end test 'should passthrough text in stem macro and surround with LaTeX math delimiters if stem attribute is latexmath, latex, or tex' do [ { 'stem' => 'latexmath' }, { 'stem' => 'latex' }, { 'stem' => 'tex' }, ].each do |attributes| input = 'stem:[C = \alpha + \beta Y^{\gamma} + \epsilon]' para = block_from_string input, attributes: attributes assert_equal '\(C = \alpha + \beta Y^{\gamma} + \epsilon\)', para.content end end test 'should apply substitutions specified on stem macro' do ['stem:c,a[sqrt(x) <=> {solve-for-x}]', 'stem:n,-r[sqrt(x) <=> {solve-for-x}]'].each do |input| para = block_from_string input, attributes: { 'stem' => 'asciimath', 'solve-for-x' => '13' } assert_equal '\$sqrt(x) &lt;=&gt; 13\$', para.content end end test 'should not recognize stem macro with invalid substitution list' do [',', '42', 'a,'].each do |subs| input = %(stem:#{subs}[x^2]) para = block_from_string input, attributes: { 'stem' => 'asciimath' } assert_equal %(stem:#{subs}[x^2]), para.content end end end end context 'Replacements' do test 'unescapes XML entities' do para = block_from_string '< &quot; &there4; &#34; &#x22; >' assert_equal '&lt; &quot; &there4; &#34; &#x22; &gt;', para.apply_subs(para.source) end test 'replaces arrows' do para = block_from_string '<- -> <= => \<- \-> \<= \=>' assert_equal '&#8592; &#8594; &#8656; &#8658; &lt;- -&gt; &lt;= =&gt;', para.apply_subs(para.source) end test 'replaces dashes' do input = <<~'EOS' -- foo foo--bar foo\--bar foo -- bar foo \-- bar stuff in between -- foo stuff in between foo -- stuff in between foo -- EOS expected = <<~'EOS'.chop &#8201;&#8212;&#8201;foo foo&#8212;&#8203;bar foo--bar foo&#8201;&#8212;&#8201;bar foo -- bar stuff in between&#8201;&#8212;&#8201;foo stuff in between foo&#8201;&#8212;&#8201;stuff in between foo&#8201;&#8212;&#8201; EOS para = block_from_string input assert_equal expected, para.sub_replacements(para.source) end test 'replaces dashes between multibyte word characters' do para = block_from_string %(富--巴) expected = '富&#8212;&#8203;巴' assert_equal expected, para.sub_replacements(para.source) end test 'replaces marks' do para = block_from_string '(C) (R) (TM) \(C) \(R) \(TM)' assert_equal '&#169; &#174; &#8482; (C) (R) (TM)', para.sub_replacements(para.source) end test 'preserves entity references' do input = '&amp; &#169; &#10004; &#128512; &#x2022; &#x1f600;' result = convert_inline_string input assert_equal input, result end test 'only preserves named entities with two or more letters' do input = '&amp; &a; &gt;' result = convert_inline_string input assert_equal '&amp; &amp;a; &gt;', result end test 'replaces punctuation' do para = block_from_string %(John's Hideout is the Whites`' place... foo\\'bar) assert_equal "John&#8217;s Hideout is the Whites&#8217; place&#8230;&#8203; foo'bar", para.sub_replacements(para.source) end test 'should replace right single quote marks' do given = [ %(`'Twas the night), %(a `'57 Chevy!), %(the whites`' place), %(the whites`'.), %(the whites`'--where the wild things are), %(the whites`'\nhave), %(It's Mary`'s little lamb.), %(consecutive single quotes '' are not modified), %(he is 6' tall), %(\\`') ] expected = [ %(&#8217;Twas the night), %(a &#8217;57 Chevy!), %(the whites&#8217; place), %(the whites&#8217;.), %(the whites&#8217;--where the wild things are), %(the whites&#8217;\nhave), %(It&#8217;s Mary&#8217;s little lamb.), %(consecutive single quotes '' are not modified), %(he is 6' tall), %(`') ] given.size.times do |i| para = block_from_string given[i] assert_equal expected[i], para.sub_replacements(para.source) end end end context 'Post replacements' do test 'line break inserted after line with line break character' do para = block_from_string("First line +\nSecond line") result = para.apply_subs para.lines, (para.expand_subs :post_replacements) assert_equal 'First line<br>', result.first end test 'line break inserted after line wrap with hardbreaks enabled' do para = block_from_string("First line\nSecond line", attributes: { 'hardbreaks' => '' }) result = para.apply_subs para.lines, (para.expand_subs :post_replacements) assert_equal 'First line<br>', result.first end test 'line break character stripped from end of line with hardbreaks enabled' do para = block_from_string("First line +\nSecond line", attributes: { 'hardbreaks' => '' }) result = para.apply_subs para.lines, (para.expand_subs :post_replacements) assert_equal 'First line<br>', result.first end test 'line break not inserted for single line with hardbreaks enabled' do para = block_from_string('First line', attributes: { 'hardbreaks' => '' }) result = para.apply_subs para.lines, (para.expand_subs :post_replacements) assert_equal 'First line', result.first end end context 'Resolve subs' do test 'should resolve subs for block' do doc = empty_document parse: true block = Asciidoctor::Block.new doc, :paragraph block.attributes['subs'] = 'quotes,normal' block.commit_subs assert_equal [:quotes, :specialcharacters, :attributes, :replacements, :macros, :post_replacements], block.subs end test 'should resolve specialcharacters sub as highlight for source block when source highlighter is coderay' do doc = empty_document attributes: { 'source-highlighter' => 'coderay' }, parse: true block = Asciidoctor::Block.new doc, :listing, content_model: :verbatim block.style = 'source' block.attributes['subs'] = 'specialcharacters' block.attributes['language'] = 'ruby' block.commit_subs assert_equal [:highlight], block.subs end test 'should resolve specialcharacters sub as highlight for source block when source highlighter is pygments' do doc = empty_document attributes: { 'source-highlighter' => 'pygments' }, parse: true block = Asciidoctor::Block.new doc, :listing, content_model: :verbatim block.style = 'source' block.attributes['subs'] = 'specialcharacters' block.attributes['language'] = 'ruby' block.commit_subs assert_equal [:highlight], block.subs end if ENV['PYGMENTS_VERSION'] test 'should not replace specialcharacters sub with highlight for source block when source highlighter is not set' do doc = empty_document parse: true block = Asciidoctor::Block.new doc, :listing, content_model: :verbatim block.style = 'source' block.attributes['subs'] = 'specialcharacters' block.attributes['language'] = 'ruby' block.commit_subs assert_equal [:specialcharacters], block.subs end test 'should not use subs if subs option passed to block constructor is nil' do doc = empty_document parse: true block = Asciidoctor::Block.new doc, :paragraph, source: '*bold* _italic_', subs: nil, attributes: { 'subs' => 'quotes' } assert_empty block.subs block.commit_subs assert_empty block.subs end test 'should not use subs if subs option passed to block constructor is empty array' do doc = empty_document parse: true block = Asciidoctor::Block.new doc, :paragraph, source: '*bold* _italic_', subs: [], attributes: { 'subs' => 'quotes' } assert_empty block.subs block.commit_subs assert_empty block.subs end test 'should use subs from subs option passed to block constructor' do doc = empty_document parse: true block = Asciidoctor::Block.new doc, :paragraph, source: '*bold* _italic_', subs: [:specialcharacters], attributes: { 'subs' => 'quotes' } assert_equal [:specialcharacters], block.subs block.commit_subs assert_equal [:specialcharacters], block.subs end test 'should use subs from subs attribute if subs option is not passed to block constructor' do doc = empty_document parse: true block = Asciidoctor::Block.new doc, :paragraph, source: '*bold* _italic_', attributes: { 'subs' => 'quotes' } assert_empty block.subs # in this case, we have to call commit_subs to resolve the subs block.commit_subs assert_equal [:quotes], block.subs end test 'should use subs from subs attribute if subs option passed to block constructor is :default' do doc = empty_document parse: true block = Asciidoctor::Block.new doc, :paragraph, source: '*bold* _italic_', subs: :default, attributes: { 'subs' => 'quotes' } assert_equal [:quotes], block.subs block.commit_subs assert_equal [:quotes], block.subs end test 'should use built-in subs if subs option passed to block constructor is :default and subs attribute is absent' do doc = empty_document parse: true block = Asciidoctor::Block.new doc, :paragraph, source: '*bold* _italic_', subs: :default assert_equal [:specialcharacters, :quotes, :attributes, :replacements, :macros, :post_replacements], block.subs block.commit_subs assert_equal [:specialcharacters, :quotes, :attributes, :replacements, :macros, :post_replacements], block.subs end end end
1
6,692
What is this obscure syntax anyway? o.O
asciidoctor-asciidoctor
rb
@@ -756,6 +756,10 @@ class SPRegion(PyRegion): return spec + def getAlgorithm(self): + return self._sfdr + + def getParameter(self, parameterName, index=-1): """ Get the value of a NodeSpec parameter. Most parameters are handled
1
# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2013, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- import os import numpy from nupic.bindings.math import GetNTAReal from nupic.bindings.algorithms import SpatialPooler as CPPSpatialPooler from nupic.research.spatial_pooler import SpatialPooler as PYSpatialPooler import nupic.research.fdrutilities as fdru from nupic.support import getArgumentDescriptions from PyRegion import PyRegion def getDefaultSPImp(): """ Return the default spatial pooler implementation for this region. """ return 'cpp' def getSPClass(spatialImp): """ Return the class corresponding to the given spatialImp string """ if spatialImp == 'py': return PYSpatialPooler elif spatialImp == 'cpp': return CPPSpatialPooler else: raise RuntimeError("Invalid spatialImp '%s'. Legal values are: 'py', " "'cpp'" % (spatialImp)) def _buildArgs(f, self=None, kwargs={}): """ Get the default arguments from the function and assign as instance vars. Return a list of 3-tuples with (name, description, defaultValue) for each argument to the function. Assigns all arguments to the function as instance variables of SPRegion. If the argument was not provided, uses the default value. Pops any values from kwargs that go to the function. """ # Get the name, description, and default value for each argument argTuples = getArgumentDescriptions(f) argTuples = argTuples[1:] # Remove 'self' # Get the names of the parameters to our own constructor and remove them # Check for _originial_init first, because if LockAttributesMixin is used, # __init__'s signature will be just (self, *args, **kw), but # _original_init is created with the original signature #init = getattr(self, '_original_init', self.__init__) init = SPRegion.__init__ ourArgNames = [t[0] for t in getArgumentDescriptions(init)] # Also remove a few other names that aren't in our constructor but are # computed automatically (e.g. numberOfCols for the TP) # TODO: where does numberOfCols come into SPRegion? ourArgNames += [ 'numberOfCols', ] for argTuple in argTuples[:]: if argTuple[0] in ourArgNames: argTuples.remove(argTuple) # Build the dictionary of arguments if self: for argTuple in argTuples: argName = argTuple[0] if argName in kwargs: # Argument was provided argValue = kwargs.pop(argName) else: # Argument was not provided; use the default value if there is one, and # raise an exception otherwise if len(argTuple) == 2: # No default value raise TypeError("Must provide value for '%s'" % argName) argValue = argTuple[2] # Set as an instance variable if 'self' was passed in setattr(self, argName, argValue) return argTuples def _getAdditionalSpecs(spatialImp, kwargs={}): """Build the additional specs in three groups (for the inspector) Use the type of the default argument to set the Spec type, defaulting to 'Byte' for None and complex types Determines the spatial parameters based on the selected implementation. It defaults to SpatialPooler. """ typeNames = {int: 'UInt32', float: 'Real32', str: 'Byte', bool: 'bool', tuple: 'tuple'} def getArgType(arg): t = typeNames.get(type(arg), 'Byte') count = 0 if t == 'Byte' else 1 if t == 'tuple': t = typeNames.get(type(arg[0]), 'Byte') count = len(arg) if t == 'bool': t = 'UInt32' return (t, count) def getConstraints(arg): t = typeNames.get(type(arg), 'Byte') if t == 'Byte': return 'multiple' elif t == 'bool': return 'bool' else: return '' # Get arguments from spatial pooler constructors, figure out types of # variables and populate spatialSpec. SpatialClass = getSPClass(spatialImp) sArgTuples = _buildArgs(SpatialClass.__init__) spatialSpec = {} for argTuple in sArgTuples: d = dict( description=argTuple[1], accessMode='ReadWrite', dataType=getArgType(argTuple[2])[0], count=getArgType(argTuple[2])[1], constraints=getConstraints(argTuple[2])) spatialSpec[argTuple[0]] = d # Add special parameters that weren't handled automatically # Spatial parameters only! spatialSpec.update(dict( columnCount=dict( description='Total number of columns (coincidences).', accessMode='Read', dataType='UInt32', count=1, constraints=''), inputWidth=dict( description='Size of inputs to the SP.', accessMode='Read', dataType='UInt32', count=1, constraints=''), spInputNonZeros=dict( description='The indices of the non-zero inputs to the spatial pooler', accessMode='Read', dataType='UInt32', count=0, constraints=''), spOutputNonZeros=dict( description='The indices of the non-zero outputs from the spatial pooler', accessMode='Read', dataType='UInt32', count=0, constraints=''), spOverlapDistribution=dict( description="""The overlaps between the active output coincidences and the input. The overlap amounts for each coincidence are sorted from highest to lowest. """, accessMode='Read', dataType='Real32', count=0, constraints=''), sparseCoincidenceMatrix=dict( description='The coincidences, as a SparseMatrix', accessMode='Read', dataType='Byte', count=0, constraints=''), denseOutput=dict( description='Score for each coincidence.', accessMode='Read', dataType='Real32', count=0, constraints=''), spLearningStatsStr=dict( description="""String representation of dictionary containing a number of statistics related to learning.""", accessMode='Read', dataType='Byte', count=0, constraints='handle'), spatialImp=dict( description="""Which spatial pooler implementation to use. Set to either 'py', or 'cpp'. The 'cpp' implementation is optimized for speed in C++.""", accessMode='ReadWrite', dataType='Byte', count=0, constraints='enum: py, cpp'), )) # The last group is for parameters that aren't specific to spatial pooler otherSpec = dict( learningMode=dict( description='1 if the node is learning (default 1).', accessMode='ReadWrite', dataType='UInt32', count=1, constraints='bool'), inferenceMode=dict( description='1 if the node is inferring (default 0).', accessMode='ReadWrite', dataType='UInt32', count=1, constraints='bool'), anomalyMode=dict( description='1 if an anomaly score is being computed', accessMode='ReadWrite', dataType='UInt32', count=1, constraints='bool'), topDownMode=dict( description='1 if the node should do top down compute on the next call ' 'to compute into topDownOut (default 0).', accessMode='ReadWrite', dataType='UInt32', count=1, constraints='bool'), activeOutputCount=dict( description='Number of active elements in bottomUpOut output.', accessMode='Read', dataType='UInt32', count=1, constraints=''), logPathInput=dict( description='Optional name of input log file. If set, every input vector' ' will be logged to this file.', accessMode='ReadWrite', dataType='Byte', count=0, constraints=''), logPathOutput=dict( description='Optional name of output log file. If set, every output vector' ' will be logged to this file.', accessMode='ReadWrite', dataType='Byte', count=0, constraints=''), logPathOutputDense=dict( description='Optional name of output log file. If set, every output vector' ' will be logged to this file as a dense vector.', accessMode='ReadWrite', dataType='Byte', count=0, constraints=''), ) return spatialSpec, otherSpec class SPRegion(PyRegion): """ SPRegion is designed to implement the spatial pooler compute for a given HTM level. Uses the SpatialPooler class to do most of the work. This node has just one SpatialPooler instance for the enitire level and does *not* support the concept of "baby nodes" within it. Automatic parameter handling: Parameter names, default values, and descriptions are retrieved automatically from SpatialPooler. Thus, there are only a few hardcoded arguments in __init__, and the rest are passed to the appropriate underlying class. The NodeSpec is mostly built automatically from these parameters, too. If you add a parameter to SpatialPooler, it will be exposed through SPRegion automatically as if it were in SPRegion.__init__, with the right default value. Add an entry in the __init__ docstring for it too, and that will be brought into the NodeSpec. SPRegion will maintain the parameter as its own instance variable and also pass it to SpatialPooler. If the parameter is changed, SPRegion will propagate the change. If you want to do something different with the parameter, add it as an argument into SPRegion.__init__, which will override all the default handling. """ def __init__(self, columnCount, # Number of columns in the SP, a required parameter inputWidth, # Size of inputs to the SP, a required parameter spatialImp=getDefaultSPImp(), #'py', 'cpp' **kwargs): if columnCount <= 0 or inputWidth <=0: raise TypeError("Parameters columnCount and inputWidth must be > 0") # Pull out the spatial arguments automatically # These calls whittle down kwargs and create instance variables of SPRegion self.SpatialClass = getSPClass(spatialImp) sArgTuples = _buildArgs(self.SpatialClass.__init__, self, kwargs) # Make a list of automatic spatial arg names for later use self._spatialArgNames = [t[0] for t in sArgTuples] # Learning and SP parameters. # By default we start out in stage learn with inference disabled self.learningMode = True self.inferenceMode = False self.anomalyMode = False self.topDownMode = False self.columnCount = columnCount self.inputWidth = inputWidth PyRegion.__init__(self, **kwargs) # Initialize all non-persistent base members, as well as give # derived class an opportunity to do the same. self._loaded = False self._initializeEphemeralMembers() # Debugging support, used in _conditionalBreak self.breakPdb = False self.breakKomodo = False # Defaults for all other parameters self.logPathInput = '' self.logPathOutput = '' self.logPathOutputDense = '' self._fpLogSPInput = None self._fpLogSP = None self._fpLogSPDense = None # # Variables set up in initInNetwork() # # Spatial instance self._sfdr = None # Spatial pooler's bottom-up output value: hang on to this output for # top-down inference and for debugging self._spatialPoolerOutput = None # Spatial pooler's bottom-up input: hang on to this for supporting the # spInputNonZeros parameter self._spatialPoolerInput = None ############################################################################# # # Initialization code # ############################################################################# def _initializeEphemeralMembers(self): """ Initialize all ephemeral data members, and give the derived class the opportunity to do the same by invoking the virtual member _initEphemerals(), which is intended to be overridden. NOTE: this is used by both __init__ and __setstate__ code paths. """ for attrName in self._getEphemeralMembersBase(): if attrName != "_loaded": if hasattr(self, attrName): if self._loaded: # print self.__class__.__name__, "contains base class member '%s' " \ # "after loading." % attrName # TODO: Re-enable warning or turn into error in a future release. pass else: print self.__class__.__name__, "contains base class member '%s'" % \ attrName if not self._loaded: for attrName in self._getEphemeralMembersBase(): if attrName != "_loaded": # if hasattr(self, attrName): # import pdb; pdb.set_trace() assert not hasattr(self, attrName) else: assert hasattr(self, attrName) # Profiling information self._profileObj = None self._iterations = 0 # Let derived class initialize ephemerals self._initEphemerals() self._checkEphemeralMembers() def initialize(self, dims, splitterMaps): """""" # Zero out the spatial output in case it is requested self._spatialPoolerOutput = numpy.zeros(self.columnCount, dtype=GetNTAReal()) # Zero out the rfInput in case it is requested self._spatialPoolerInput = numpy.zeros((1,self.inputWidth), dtype=GetNTAReal()) # Allocate the spatial pooler self._allocateSpatialFDR(None) def _allocateSpatialFDR(self, rfInput): """Allocate the spatial pooler instance.""" if self._sfdr: return # Retrieve the necessary extra arguments that were handled automatically autoArgs = dict((name, getattr(self, name)) for name in self._spatialArgNames) # Instantiate the spatial pooler class. if ( (self.SpatialClass == CPPSpatialPooler) or (self.SpatialClass == PYSpatialPooler) ): autoArgs['columnDimensions'] = [self.columnCount] autoArgs['inputDimensions'] = [self.inputWidth] autoArgs['potentialRadius'] = self.inputWidth self._sfdr = self.SpatialClass( **autoArgs ) ############################################################################# # # Core compute methods: learning, inference, and prediction # ############################################################################# def compute(self, inputs, outputs): """ Run one iteration of SPRegion's compute, profiling it if requested. The guts of the compute are contained in the _compute() call so that we can profile it if requested. """ # Uncomment this to find out who is generating divide by 0, or other numpy warnings # numpy.seterr(divide='raise', invalid='raise', over='raise') # Modify this line to turn on profiling for a given node. The results file # ('hotshot.stats') will be sensed and printed out by the vision framework's # RunInference.py script at the end of inference. # Also uncomment the hotshot import at the top of this file. if False and self.learningMode \ and self._iterations > 0 and self._iterations <= 10: import hotshot if self._iterations == 10: print "\n Collecting and sorting internal node profiling stats generated by hotshot..." stats = hotshot.stats.load("hotshot.stats") stats.strip_dirs() stats.sort_stats('time', 'calls') stats.print_stats() if self._profileObj is None: print "\n Preparing to capture profile using hotshot..." if os.path.exists('hotshot.stats'): # There is an old hotshot stats profile left over, remove it. os.remove('hotshot.stats') self._profileObj = hotshot.Profile("hotshot.stats", 1, 1) # filename, lineevents, linetimings self._profileObj.runcall(self._compute, *[inputs, outputs]) else: self._compute(inputs, outputs) def _compute(self, inputs, outputs): """ Run one iteration of SPRegion's compute """ #if self.topDownMode and (not 'topDownIn' in inputs): # raise RuntimeError("The input topDownIn must be linked in if " # "topDownMode is True") if self._sfdr is None: raise RuntimeError("Spatial pooler has not been initialized") if not self.topDownMode: # # BOTTOM-UP compute # self._iterations += 1 # Get our inputs into numpy arrays buInputVector = inputs['bottomUpIn'] resetSignal = False if 'resetIn' in inputs: assert len(inputs['resetIn']) == 1 resetSignal = inputs['resetIn'][0] != 0 # Perform inference and/or learning rfOutput = self._doBottomUpCompute( rfInput = buInputVector.reshape((1,buInputVector.size)), resetSignal = resetSignal ) outputs['bottomUpOut'][:] = rfOutput.flat else: # # TOP-DOWN inference # topDownIn = inputs.get('topDownIn',None) spatialTopDownOut, temporalTopDownOut = self._doTopDownInfer(topDownIn) outputs['spatialTopDownOut'][:] = spatialTopDownOut if temporalTopDownOut is not None: outputs['temporalTopDownOut'][:] = temporalTopDownOut # OBSOLETE outputs['anomalyScore'][:] = 0 # Write the bottom up out to our node outputs only if we are doing inference #print "SPRegion input: ", buInputVector.nonzero()[0] #print "SPRegion output: ", rfOutput.nonzero()[0] def _doBottomUpCompute(self, rfInput, resetSignal): """ Do one iteration of inference and/or learning and return the result Parameters: -------------------------------------------- rfInput: Input vector. Shape is: (1, inputVectorLen). resetSignal: True if reset is asserted """ # Conditional compute break self._conditionalBreak() # Save the rfInput for the spInputNonZeros parameter self._spatialPoolerInput = rfInput.reshape(-1) assert(rfInput.shape[0] == 1) # Run inference using the spatial pooler. We learn on the coincidences only # if we are in learning mode and trainingStep is set appropriately. # Run SFDR bottom-up compute and cache output in self._spatialPoolerOutput inputVector = numpy.array(rfInput[0]).astype('uint32') outputVector = numpy.zeros(self._sfdr.getNumColumns()).astype('uint32') self._sfdr.compute(inputVector, self.learningMode, outputVector) self._spatialPoolerOutput[:] = outputVector[:] # Direct logging of SP outputs if requested if self._fpLogSP: output = self._spatialPoolerOutput.reshape(-1) outputNZ = output.nonzero()[0] outStr = " ".join(["%d" % int(token) for token in outputNZ]) print >>self._fpLogSP, output.size, outStr # Direct logging of SP inputs if self._fpLogSPInput: output = rfInput.reshape(-1) outputNZ = output.nonzero()[0] outStr = " ".join(["%d" % int(token) for token in outputNZ]) print >>self._fpLogSPInput, output.size, outStr return self._spatialPoolerOutput def _doTopDownInfer(self, topDownInput = None): """ Do one iteration of top-down inference. Parameters: -------------------------------------------- tdInput: Top-down input retval: (spatialTopDownOut, temporalTopDownOut) spatialTopDownOut is the top down output computed only from the SP, using it's current bottom-up output. temporalTopDownOut is the top down output computed from the topDown in of the level above us. """ return None, None ############################################################################# # # Region API support methods: getSpec, getParameter, and setParameter # ############################################################################# @classmethod def getBaseSpec(cls): """Return the base Spec for SPRegion. Doesn't include the spatial, temporal and other parameters """ spec = dict( description=SPRegion.__doc__, singleNodeOnly=True, inputs=dict( bottomUpIn=dict( description="""The input vector.""", dataType='Real32', count=0, required=True, regionLevel=False, isDefaultInput=True, requireSplitterMap=False), resetIn=dict( description="""A boolean flag that indicates whether or not the input vector received in this compute cycle represents the start of a new temporal sequence.""", dataType='Real32', count=1, required=False, regionLevel=True, isDefaultInput=False, requireSplitterMap=False), topDownIn=dict( description="""The top-down input signal, generated from feedback from upper levels""", dataType='Real32', count=0, required = False, regionLevel=True, isDefaultInput=False, requireSplitterMap=False), ), outputs=dict( bottomUpOut=dict( description="""The output signal generated from the bottom-up inputs from lower levels.""", dataType='Real32', count=0, regionLevel=True, isDefaultOutput=True), topDownOut=dict( description="""The top-down output signal, generated from feedback from upper levels""", dataType='Real32', count=0, regionLevel=True, isDefaultOutput=False), spatialTopDownOut = dict( description="""The top-down output, generated only from the current SP output. This can be used to evaluate how well the SP is representing the inputs independent of the TP.""", dataType='Real32', count=0, regionLevel=True, isDefaultOutput=False), temporalTopDownOut = dict( description="""The top-down output, generated only from the current TP output feedback down through the SP.""", dataType='Real32', count=0, regionLevel=True, isDefaultOutput=False), anomalyScore = dict( description="""The score for how 'anomalous' (i.e. rare) this spatial input pattern is. Higher values are increasingly rare""", dataType='Real32', count=1, regionLevel=True, isDefaultOutput=False), ), parameters=dict( breakPdb=dict( description='Set to 1 to stop in the pdb debugger on the next compute', dataType='UInt32', count=1, constraints='bool', defaultValue=0, accessMode='ReadWrite'), breakKomodo=dict( description='Set to 1 to stop in the Komodo debugger on the next compute', dataType='UInt32', count=1, constraints='bool', defaultValue=0, accessMode='ReadWrite'), ), ) return spec @classmethod def getSpec(cls): """Return the Spec for SPRegion. The parameters collection is constructed based on the parameters specified by the variosu components (spatialSpec, temporalSpec and otherSpec) """ spec = cls.getBaseSpec() s, o = _getAdditionalSpecs(spatialImp=getDefaultSPImp()) spec['parameters'].update(s) spec['parameters'].update(o) return spec def getParameter(self, parameterName, index=-1): """ Get the value of a NodeSpec parameter. Most parameters are handled automatically by PyRegion's parameter get mechanism. The ones that need special treatment are explicitly handled here. """ if parameterName == 'activeOutputCount': return self.columnCount elif parameterName == 'spatialPoolerInput': return list(self._spatialPoolerInput.reshape(-1)) elif parameterName == 'spatialPoolerOutput': return list(self._spatialPoolerOutput) elif parameterName == 'spNumActiveOutputs': return len(self._spatialPoolerOutput.nonzero()[0]) elif parameterName == 'spOutputNonZeros': return [len(self._spatialPoolerOutput)] + \ list(self._spatialPoolerOutput.nonzero()[0]) elif parameterName == 'spInputNonZeros': import pdb; pdb.set_trace() return [len(self._spatialPoolerInput)] + \ list(self._spatialPoolerInput.nonzero()[0]) elif parameterName == 'spLearningStatsStr': try: return str(self._sfdr.getLearningStats()) except: return str(dict()) else: return PyRegion.getParameter(self, parameterName, index) def setParameter(self, parameterName, index, parameterValue): """ Set the value of a Spec parameter. Most parameters are handled automatically by PyRegion's parameter set mechanism. The ones that need special treatment are explicitly handled here. """ if parameterName in self._spatialArgNames: setattr(self._sfdr, parameterName, parameterValue) elif parameterName == "logPathInput": self.logPathInput = parameterValue # Close any existing log file if self._fpLogSPInput: self._fpLogSPInput.close() self._fpLogSPInput = None # Open a new log file if parameterValue: self._fpLogSPInput = open(self.logPathInput, 'w') elif parameterName == "logPathOutput": self.logPathOutput = parameterValue # Close any existing log file if self._fpLogSP: self._fpLogSP.close() self._fpLogSP = None # Open a new log file if parameterValue: self._fpLogSP = open(self.logPathOutput, 'w') elif parameterName == "logPathOutputDense": self.logPathOutputDense = parameterValue # Close any existing log file if self._fpLogSPDense: self._fpLogSPDense.close() self._fpLogSPDense = None # Open a new log file if parameterValue: self._fpLogSPDense = open(self.logPathOutputDense, 'w') elif hasattr(self, parameterName): setattr(self, parameterName, parameterValue) else: raise Exception('Unknown parameter: ' + parameterName) ############################################################################# # # Methods to support serialization # ############################################################################# def __getstate__(self): """ Return serializable state. This function will return a version of the __dict__ with all "ephemeral" members stripped out. "Ephemeral" members are defined as those that do not need to be (nor should be) stored in any kind of persistent file (e.g., NuPIC network XML file.) """ state = self.__dict__.copy() # We only want to serialize a single spatial/temporal FDR if they're cloned for ephemeralMemberName in self._getEphemeralMembersAll(): state.pop(ephemeralMemberName, None) return state def __setstate__(self, state): """ Set the state of ourself from a serialized state. """ self.__dict__.update(state) self._loaded = True # Backwards compatibility if not hasattr(self, "SpatialClass"): self.SpatialClass = self._sfdr.__class__ # Initialize all non-persistent base members, as well as give # derived class an opportunity to do the same. self._initializeEphemeralMembers() self._allocateSpatialFDR(None) def _initEphemerals(self): """ Initialize all ephemerals used by derived classes. """ if hasattr(self, '_sfdr') and self._sfdr: self._spatialPoolerOutput = numpy.zeros(self.columnCount, dtype=GetNTAReal()) else: self._spatialPoolerOutput = None # Will be filled in initInNetwork # Direct logging support (faster than node watch) self._fpLogSPInput = None self._fpLogSP = None self._fpLogSPDense = None self.logPathInput = "" self.logPathOutput = "" self.logPathOutputDense = "" def _getEphemeralMembers(self): """ Callback that returns a list of all "ephemeral" members (i.e., data members that should not and/or cannot be pickled.) """ return ['_spatialPoolerOutput', '_fpLogSP', '_fpLogSPDense', 'logPathInput', 'logPathOutput', 'logPathOutputDense' ] def _getEphemeralMembersBase(self): """ Returns list of all ephemeral members. """ return [ '_loaded', '_profileObj', '_iterations', ] def _getEphemeralMembersAll(self): """ Returns a concatenated list of both the standard base class ephemeral members, as well as any additional ephemeral members (e.g., file handles, etc.). """ return self._getEphemeralMembersBase() + self._getEphemeralMembers() def _checkEphemeralMembers(self): for attrName in self._getEphemeralMembersBase(): if not hasattr(self, attrName): print "Missing base class member:", attrName for attrName in self._getEphemeralMembers(): if not hasattr(self, attrName): print "Missing derived class member:", attrName for attrName in self._getEphemeralMembersBase(): assert hasattr(self, attrName) for attrName in self._getEphemeralMembers(): assert hasattr(self, attrName), "Node missing attr '%s'." % attrName ############################################################################# # # Misc. code # ############################################################################# def _conditionalBreak(self): if self.breakKomodo: import dbgp.client; dbgp.client.brk() if self.breakPdb: import pdb; pdb.set_trace() ############################################################################# # # NuPIC 2 Support # These methods are required by NuPIC 2 # ############################################################################# def getOutputElementCount(self, name): if name == 'bottomUpOut': return self.columnCount elif name == 'spatialTopDownOut' or name == 'temporalTopDownOut' or \ name == 'topDownOut': return self.inputWidth else: raise Exception("Invalid output name specified") # TODO: as a temporary hack, getParameterArrayCount checks to see if there's a # variable, private or not, with that name. If so, it attempts to return the # length of that variable. def getParameterArrayCount(self, name, index): p = self.getParameter(name) if (not hasattr(p, '__len__')): raise Exception("Attempt to access parameter '%s' as an array but it is not an array" % name) return len(p) # TODO: as a temporary hack, getParameterArray checks to see if there's a # variable, private or not, with that name. If so, it returns the value of the # variable. def getParameterArray(self, name, index, a): p = self.getParameter(name) if (not hasattr(p, '__len__')): raise Exception("Attempt to access parameter '%s' as an array but it is not an array" % name) if len(p) > 0: a[:] = p[:]
1
20,130
I think we should have one for the CLA Classifier, and KNNClassifier as well. And how about RecordSensor?
numenta-nupic
py
@@ -0,0 +1,9 @@ +package com.fsck.k9.fragment; +import android.database.Cursor; +import android.view.ContextMenu; +/** + Created by Kamil Rajtar on 05.11.17. */ + +public interface ICursorContextMenuSupplier{ + void getCursorMenu(ContextMenu menu,Cursor cursor); +}
1
1
16,339
Missing new line.
k9mail-k-9
java
@@ -17,6 +17,9 @@ const populateModelSymbol = require('../symbols').populateModelSymbol; const schemaMixedSymbol = require('../../schema/symbols').schemaMixedSymbol; module.exports = function getModelsMapForPopulate(model, docs, options) { + if (model.schema.base.options.strictPopulate == false && options.strictPopulate == null) { + options.strictPopulate = model.schema.base.options.strictPopulate; + } let doc; const len = docs.length; const map = [];
1
'use strict'; const MongooseError = require('../../error/index'); const SkipPopulateValue = require('./SkipPopulateValue'); const get = require('../get'); const getDiscriminatorByValue = require('../discriminator/getDiscriminatorByValue'); const getConstructorName = require('../getConstructorName'); const getSchemaTypes = require('./getSchemaTypes'); const getVirtual = require('./getVirtual'); const lookupLocalFields = require('./lookupLocalFields'); const mpath = require('mpath'); const modelNamesFromRefPath = require('./modelNamesFromRefPath'); const utils = require('../../utils'); const modelSymbol = require('../symbols').modelSymbol; const populateModelSymbol = require('../symbols').populateModelSymbol; const schemaMixedSymbol = require('../../schema/symbols').schemaMixedSymbol; module.exports = function getModelsMapForPopulate(model, docs, options) { let doc; const len = docs.length; const map = []; const modelNameFromQuery = options.model && options.model.modelName || options.model; let schema; let refPath; let modelNames; const available = {}; const modelSchema = model.schema; // Populating a nested path should always be a no-op re: #9073. // People shouldn't do this, but apparently they do. if (options._localModel != null && options._localModel.schema.nested[options.path]) { return []; } const _virtualRes = getVirtual(model.schema, options.path); const virtual = _virtualRes == null ? null : _virtualRes.virtual; if (virtual != null) { return _virtualPopulate(model, docs, options, _virtualRes); } let allSchemaTypes = getSchemaTypes(model, modelSchema, null, options.path); allSchemaTypes = Array.isArray(allSchemaTypes) ? allSchemaTypes : [allSchemaTypes].filter(v => v != null); if (allSchemaTypes.length <= 0 && options.strictPopulate !== false && options._localModel != null) { return new MongooseError('Cannot populate path `' + options.path + '` because it is not in your schema. Set the `strictPopulate` option ' + 'to false to override.'); } for (let i = 0; i < len; i++) { doc = docs[i]; let justOne = null; const docSchema = doc != null && doc.$__ != null ? doc.$__schema : modelSchema; schema = getSchemaTypes(model, docSchema, doc, options.path); // Special case: populating a path that's a DocumentArray unless // there's an explicit `ref` or `refPath` re: gh-8946 if (schema != null && schema.$isMongooseDocumentArray && schema.options.ref == null && schema.options.refPath == null) { continue; } const isUnderneathDocArray = schema && schema.$isUnderneathDocArray; if (isUnderneathDocArray && get(options, 'options.sort') != null) { return new MongooseError('Cannot populate with `sort` on path ' + options.path + ' because it is a subproperty of a document array'); } modelNames = null; let isRefPath = false; let normalizedRefPath = null; let schemaOptions = null; if (Array.isArray(schema)) { const schemasArray = schema; for (const _schema of schemasArray) { let _modelNames; let res; try { res = _getModelNames(doc, _schema, modelNameFromQuery, model); _modelNames = res.modelNames; isRefPath = isRefPath || res.isRefPath; normalizedRefPath = normalizedRefPath || res.refPath; justOne = res.justOne; } catch (error) { return error; } if (isRefPath && !res.isRefPath) { continue; } if (!_modelNames) { continue; } modelNames = modelNames || []; for (const modelName of _modelNames) { if (modelNames.indexOf(modelName) === -1) { modelNames.push(modelName); } } } } else { try { const res = _getModelNames(doc, schema, modelNameFromQuery, model); modelNames = res.modelNames; isRefPath = res.isRefPath; normalizedRefPath = normalizedRefPath || res.refPath; justOne = res.justOne; schemaOptions = get(schema, 'options.populate', null); } catch (error) { return error; } if (!modelNames) { continue; } } const data = {}; const localField = options.path; const foreignField = '_id'; // `justOne = null` means we don't know from the schema whether the end // result should be an array or a single doc. This can result from // populating a POJO using `Model.populate()` if ('justOne' in options && options.justOne !== void 0) { justOne = options.justOne; } else if (schema && !schema[schemaMixedSymbol]) { // Skip Mixed types because we explicitly don't do casting on those. if (options.path.endsWith('.' + schema.path) || options.path === schema.path) { justOne = Array.isArray(schema) ? schema.every(schema => !schema.$isMongooseArray) : !schema.$isMongooseArray; } } if (!modelNames) { continue; } data.isVirtual = false; data.justOne = justOne; data.localField = localField; data.foreignField = foreignField; // Get local fields const ret = _getLocalFieldValues(doc, localField, model, options, null, schema); const id = String(utils.getValue(foreignField, doc)); options._docs[id] = Array.isArray(ret) ? ret.slice() : ret; let match = get(options, 'match', null); const hasMatchFunction = typeof match === 'function'; if (hasMatchFunction) { match = match.call(doc, doc); } data.match = match; data.hasMatchFunction = hasMatchFunction; data.isRefPath = isRefPath; if (isRefPath) { const embeddedDiscriminatorModelNames = _findRefPathForDiscriminators(doc, modelSchema, data, options, normalizedRefPath, ret); modelNames = embeddedDiscriminatorModelNames || modelNames; } try { addModelNamesToMap(model, map, available, modelNames, options, data, ret, doc, schemaOptions); } catch (err) { return err; } } return map; function _getModelNames(doc, schema, modelNameFromQuery, model) { let modelNames; let isRefPath = false; let justOne = null; if (schema && schema.caster) { schema = schema.caster; } if (schema && schema.$isSchemaMap) { schema = schema.$__schemaType; } const ref = schema && schema.options && schema.options.ref; refPath = schema && schema.options && schema.options.refPath; if (schema != null && schema[schemaMixedSymbol] && !ref && !refPath && !modelNameFromQuery) { return { modelNames: null }; } if (modelNameFromQuery) { modelNames = [modelNameFromQuery]; // query options } else if (refPath != null) { if (typeof refPath === 'function') { const subdocPath = options.path.slice(0, options.path.length - schema.path.length - 1); const vals = mpath.get(subdocPath, doc, lookupLocalFields); const subdocsBeingPopulated = Array.isArray(vals) ? utils.array.flatten(vals) : (vals ? [vals] : []); modelNames = new Set(); for (const subdoc of subdocsBeingPopulated) { refPath = refPath.call(subdoc, subdoc, options.path); modelNamesFromRefPath(refPath, doc, options.path, modelSchema, options._queryProjection). forEach(name => modelNames.add(name)); } modelNames = Array.from(modelNames); } else { modelNames = modelNamesFromRefPath(refPath, doc, options.path, modelSchema, options._queryProjection); } isRefPath = true; } else { let ref; let refPath; let schemaForCurrentDoc; let discriminatorValue; let modelForCurrentDoc = model; const discriminatorKey = model.schema.options.discriminatorKey; if (!schema && discriminatorKey && (discriminatorValue = utils.getValue(discriminatorKey, doc))) { // `modelNameForFind` is the discriminator value, so we might need // find the discriminated model name const discriminatorModel = getDiscriminatorByValue(model.discriminators, discriminatorValue) || model; if (discriminatorModel != null) { modelForCurrentDoc = discriminatorModel; } else { try { modelForCurrentDoc = model.db.model(discriminatorValue); } catch (error) { return error; } } schemaForCurrentDoc = modelForCurrentDoc.schema._getSchema(options.path); if (schemaForCurrentDoc && schemaForCurrentDoc.caster) { schemaForCurrentDoc = schemaForCurrentDoc.caster; } } else { schemaForCurrentDoc = schema; } if (schemaForCurrentDoc != null) { justOne = !schemaForCurrentDoc.$isMongooseArray && !schemaForCurrentDoc._arrayPath; } if ((ref = get(schemaForCurrentDoc, 'options.ref')) != null) { if (schemaForCurrentDoc != null && typeof ref === 'function' && options.path.endsWith('.' + schemaForCurrentDoc.path)) { // Ensure correct context for ref functions: subdoc, not top-level doc. See gh-8469 modelNames = new Set(); const subdocPath = options.path.slice(0, options.path.length - schemaForCurrentDoc.path.length - 1); const vals = mpath.get(subdocPath, doc, lookupLocalFields); const subdocsBeingPopulated = Array.isArray(vals) ? utils.array.flatten(vals) : (vals ? [vals] : []); for (const subdoc of subdocsBeingPopulated) { modelNames.add(handleRefFunction(ref, subdoc)); } if (subdocsBeingPopulated.length === 0) { modelNames = [handleRefFunction(ref, doc)]; } else { modelNames = Array.from(modelNames); } } else { ref = handleRefFunction(ref, doc); modelNames = [ref]; } } else if ((schemaForCurrentDoc = get(schema, 'options.refPath')) != null) { isRefPath = true; if (typeof refPath === 'function') { const subdocPath = options.path.slice(0, options.path.length - schemaForCurrentDoc.path.length - 1); const vals = mpath.get(subdocPath, doc, lookupLocalFields); const subdocsBeingPopulated = Array.isArray(vals) ? utils.array.flatten(vals) : (vals ? [vals] : []); modelNames = new Set(); for (const subdoc of subdocsBeingPopulated) { refPath = refPath.call(subdoc, subdoc, options.path); modelNamesFromRefPath(refPath, doc, options.path, modelSchema, options._queryProjection). forEach(name => modelNames.add(name)); } modelNames = Array.from(modelNames); } else { modelNames = modelNamesFromRefPath(refPath, doc, options.path, modelSchema, options._queryProjection); } } } if (!modelNames) { return { modelNames: modelNames, justOne: justOne, isRefPath: isRefPath, refPath: refPath }; } if (!Array.isArray(modelNames)) { modelNames = [modelNames]; } return { modelNames: modelNames, justOne: justOne, isRefPath: isRefPath, refPath: refPath }; } }; /*! * ignore */ function _virtualPopulate(model, docs, options, _virtualRes) { const map = []; const available = {}; const virtual = _virtualRes.virtual; for (const doc of docs) { let modelNames = null; const data = {}; // localField and foreignField let localField; const virtualPrefix = _virtualRes.nestedSchemaPath ? _virtualRes.nestedSchemaPath + '.' : ''; if (typeof virtual.options.localField === 'function') { localField = virtualPrefix + virtual.options.localField.call(doc, doc); } else if (Array.isArray(virtual.options.localField)) { localField = virtual.options.localField.map(field => virtualPrefix + field); } else { localField = virtualPrefix + virtual.options.localField; } data.count = virtual.options.count; if (virtual.options.skip != null && !options.hasOwnProperty('skip')) { options.skip = virtual.options.skip; } if (virtual.options.limit != null && !options.hasOwnProperty('limit')) { options.limit = virtual.options.limit; } if (virtual.options.perDocumentLimit != null && !options.hasOwnProperty('perDocumentLimit')) { options.perDocumentLimit = virtual.options.perDocumentLimit; } let foreignField = virtual.options.foreignField; if (!localField || !foreignField) { return new MongooseError('If you are populating a virtual, you must set the ' + 'localField and foreignField options'); } if (typeof localField === 'function') { localField = localField.call(doc, doc); } if (typeof foreignField === 'function') { foreignField = foreignField.call(doc); } data.isRefPath = false; // `justOne = null` means we don't know from the schema whether the end // result should be an array or a single doc. This can result from // populating a POJO using `Model.populate()` let justOne = null; if ('justOne' in options && options.justOne !== void 0) { justOne = options.justOne; } if (virtual.options.refPath) { modelNames = modelNamesFromRefPath(virtual.options.refPath, doc, options.path); justOne = !!virtual.options.justOne; data.isRefPath = true; } else if (virtual.options.ref) { let normalizedRef; if (typeof virtual.options.ref === 'function' && !virtual.options.ref[modelSymbol]) { normalizedRef = virtual.options.ref.call(doc, doc); } else { normalizedRef = virtual.options.ref; } justOne = !!virtual.options.justOne; // When referencing nested arrays, the ref should be an Array // of modelNames. if (Array.isArray(normalizedRef)) { modelNames = normalizedRef; } else { modelNames = [normalizedRef]; } } data.isVirtual = true; data.virtual = virtual; data.justOne = justOne; // `match` let match = get(options, 'match', null) || get(data, 'virtual.options.match', null) || get(data, 'virtual.options.options.match', null); let hasMatchFunction = typeof match === 'function'; if (hasMatchFunction) { match = match.call(doc, doc); } if (Array.isArray(localField) && Array.isArray(foreignField) && localField.length === foreignField.length) { match = Object.assign({}, match); for (let i = 1; i < localField.length; ++i) { match[foreignField[i]] = convertTo_id(mpath.get(localField[i], doc, lookupLocalFields), model.schema); hasMatchFunction = true; } localField = localField[0]; foreignField = foreignField[0]; } data.localField = localField; data.foreignField = foreignField; data.match = match; data.hasMatchFunction = hasMatchFunction; // Get local fields const ret = _getLocalFieldValues(doc, localField, model, options, virtual); try { addModelNamesToMap(model, map, available, modelNames, options, data, ret, doc); } catch (err) { return err; } } return map; } /*! * ignore */ function addModelNamesToMap(model, map, available, modelNames, options, data, ret, doc, schemaOptions) { // `PopulateOptions#connection`: if the model is passed as a string, the // connection matters because different connections have different models. const connection = options.connection != null ? options.connection : model.db; if (modelNames == null) { return; } let k = modelNames.length; while (k--) { const modelName = modelNames[k]; if (modelName == null) { continue; } let Model; if (options.model && options.model[modelSymbol]) { Model = options.model; } else if (modelName[modelSymbol]) { Model = modelName; } else { try { Model = connection.model(modelName); } catch (err) { if (ret !== void 0) { throw err; } Model = null; } } let ids = ret; const flat = Array.isArray(ret) ? utils.array.flatten(ret) : []; if (data.isRefPath && Array.isArray(ret) && flat.length === modelNames.length) { ids = flat.filter((val, i) => modelNames[i] === modelName); } const perDocumentLimit = options.perDocumentLimit == null ? get(options, 'options.perDocumentLimit', null) : options.perDocumentLimit; if (!available[modelName] || perDocumentLimit != null) { const currentOptions = { model: Model }; if (data.isVirtual && get(data.virtual, 'options.options')) { currentOptions.options = utils.clone(data.virtual.options.options); } else if (schemaOptions != null) { currentOptions.options = Object.assign({}, schemaOptions); } utils.merge(currentOptions, options); // Used internally for checking what model was used to populate this // path. options[populateModelSymbol] = Model; available[modelName] = { model: Model, options: currentOptions, match: data.hasMatchFunction ? [data.match] : data.match, docs: [doc], ids: [ids], allIds: [ret], localField: new Set([data.localField]), foreignField: new Set([data.foreignField]), justOne: data.justOne, isVirtual: data.isVirtual, virtual: data.virtual, count: data.count, [populateModelSymbol]: Model }; map.push(available[modelName]); } else { available[modelName].localField.add(data.localField); available[modelName].foreignField.add(data.foreignField); available[modelName].docs.push(doc); available[modelName].ids.push(ids); available[modelName].allIds.push(ret); if (data.hasMatchFunction) { available[modelName].match.push(data.match); } } } } /*! * ignore */ function handleRefFunction(ref, doc) { if (typeof ref === 'function' && !ref[modelSymbol]) { return ref.call(doc, doc); } return ref; } /*! * ignore */ function _getLocalFieldValues(doc, localField, model, options, virtual, schema) { // Get Local fields const localFieldPathType = model.schema._getPathType(localField); const localFieldPath = localFieldPathType === 'real' ? model.schema.path(localField) : localFieldPathType.schema; const localFieldGetters = localFieldPath && localFieldPath.getters ? localFieldPath.getters : []; const _populateOptions = get(options, 'options', {}); const getters = 'getters' in _populateOptions ? _populateOptions.getters : get(virtual, 'options.getters', false); if (localFieldGetters.length > 0 && getters) { const hydratedDoc = (doc.$__ != null) ? doc : model.hydrate(doc); const localFieldValue = utils.getValue(localField, doc); if (Array.isArray(localFieldValue)) { const localFieldHydratedValue = utils.getValue(localField.split('.').slice(0, -1), hydratedDoc); return localFieldValue.map((localFieldArrVal, localFieldArrIndex) => localFieldPath.applyGetters(localFieldArrVal, localFieldHydratedValue[localFieldArrIndex])); } else { return localFieldPath.applyGetters(localFieldValue, hydratedDoc); } } else { return convertTo_id(mpath.get(localField, doc, lookupLocalFields), schema); } } /*! * Retrieve the _id of `val` if a Document or Array of Documents. * * @param {Array|Document|Any} val * @return {Array|Document|Any} */ function convertTo_id(val, schema) { if (val != null && val.$__ != null) { return val._id; } if (val != null && val._id != null && (schema == null || !schema.$isSchemaMap)) { return val._id; } if (Array.isArray(val)) { const rawVal = val.__array != null ? val.__array : val; for (let i = 0; i < rawVal.length; ++i) { if (rawVal[i] != null && rawVal[i].$__ != null) { rawVal[i] = rawVal[i]._id; } } if (val.isMongooseArray && val.$schema()) { return val.$schema()._castForPopulate(val, val.$parent()); } return [].concat(val); } // `populate('map')` may be an object if populating on a doc that hasn't // been hydrated yet if (getConstructorName(val) === 'Object' && // The intent here is we should only flatten the object if we expect // to get a Map in the end. Avoid doing this for mixed types. (schema == null || schema[schemaMixedSymbol] == null)) { const ret = []; for (const key of Object.keys(val)) { ret.push(val[key]); } return ret; } // If doc has already been hydrated, e.g. `doc.populate('map')` // then `val` will already be a map if (val instanceof Map) { return Array.from(val.values()); } return val; } /*! * ignore */ function _findRefPathForDiscriminators(doc, modelSchema, data, options, normalizedRefPath, ret) { // Re: gh-8452. Embedded discriminators may not have `refPath`, so clear // out embedded discriminator docs that don't have a `refPath` on the // populated path. if (!data.isRefPath || normalizedRefPath == null) { return; } const pieces = normalizedRefPath.split('.'); let cur = ''; let modelNames = void 0; for (let i = 0; i < pieces.length; ++i) { const piece = pieces[i]; cur = cur + (cur.length === 0 ? '' : '.') + piece; const schematype = modelSchema.path(cur); if (schematype != null && schematype.$isMongooseArray && schematype.caster.discriminators != null && Object.keys(schematype.caster.discriminators).length > 0) { const subdocs = utils.getValue(cur, doc); const remnant = options.path.substr(cur.length + 1); const discriminatorKey = schematype.caster.schema.options.discriminatorKey; modelNames = []; for (const subdoc of subdocs) { const discriminatorName = utils.getValue(discriminatorKey, subdoc); const discriminator = schematype.caster.discriminators[discriminatorName]; const discriminatorSchema = discriminator && discriminator.schema; if (discriminatorSchema == null) { continue; } const _path = discriminatorSchema.path(remnant); if (_path == null || _path.options.refPath == null) { const docValue = utils.getValue(data.localField.substr(cur.length + 1), subdoc); ret.forEach((v, i) => { if (v === docValue) { ret[i] = SkipPopulateValue(v); } }); continue; } const modelName = utils.getValue(pieces.slice(i + 1).join('.'), subdoc); modelNames.push(modelName); } } } return modelNames; }
1
14,848
`model.schema.base.options.strictPopulate != null`. Why hardcoding false? Also, `model.base.options` should work and be shorter. Finally, any way to move this up to the `populate()` function in `model.js` that calls `getModelsMapForPopulate()` and shallow clone the object before modifying? A bit random to modify this option here.
Automattic-mongoose
js
@@ -35,10 +35,10 @@ namespace Nethermind.Core.Test ISpecProvider specProvider = new CustomSpecProvider((0, releaseSpec)); BlockValidator blockValidator = new BlockValidator(txValidator, AlwaysValidHeaderValidator.Instance, AlwaysValidOmmersValidator.Instance, specProvider, LimboLogs.Instance); - bool noiseRemoved = blockValidator.ValidateSuggestedBlock(Build.A.Block.TestObject); + bool noiseRemoved = blockValidator.ValidateSuggestedBlock(Builders.Build.A.Block.TestObject); Assert.True(noiseRemoved); - bool result = blockValidator.ValidateSuggestedBlock(Build.A.Block.WithOmmers(Build.A.BlockHeader.TestObject).TestObject); + bool result = blockValidator.ValidateSuggestedBlock(Builders.Build.A.Block.WithOmmers(Builders.Build.A.BlockHeader.TestObject).TestObject); Assert.False(result); } }
1
// Copyright (c) 2018 Demerzel Solutions Limited // This file is part of the Nethermind library. // // The Nethermind library is free software: you can redistribute it and/or modify // it under the terms of the GNU Lesser General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // The Nethermind library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with the Nethermind. If not, see <http://www.gnu.org/licenses/>. using Nethermind.Blockchain.Validators; using Nethermind.Core.Specs; using Nethermind.Specs; using Nethermind.Core.Test.Builders; using Nethermind.Logging; using NUnit.Framework; namespace Nethermind.Core.Test { [TestFixture] public class BlockValidatorTests { [Test] public void When_more_uncles_than_allowed_returns_false() { TxValidator txValidator = new TxValidator(ChainId.MainNet); ReleaseSpec releaseSpec = new ReleaseSpec(); releaseSpec.MaximumUncleCount = 0; ISpecProvider specProvider = new CustomSpecProvider((0, releaseSpec)); BlockValidator blockValidator = new BlockValidator(txValidator, AlwaysValidHeaderValidator.Instance, AlwaysValidOmmersValidator.Instance, specProvider, LimboLogs.Instance); bool noiseRemoved = blockValidator.ValidateSuggestedBlock(Build.A.Block.TestObject); Assert.True(noiseRemoved); bool result = blockValidator.ValidateSuggestedBlock(Build.A.Block.WithOmmers(Build.A.BlockHeader.TestObject).TestObject); Assert.False(result); } } }
1
23,095
We now have Prepare.A/Build.A/Builders.Build.A - what are the differences?
NethermindEth-nethermind
.cs
@@ -51,6 +51,12 @@ module Asciidoctor append_newline ? %(#{str}#{LF}) : str end + def preserve_backslash str + str = str. + gsub(/\\/, '\\e') + str + end + def skip_with_warning node, name = nil warn %(asciidoctor: WARNING: converter missing for #{name || node.node_name} node in manpage backend) nil
1
module Asciidoctor # A built-in {Converter} implementation that generates the man page (troff) format. # # The output follows the groff man page definition while also trying to be # consistent with the output produced by the a2x tool from AsciiDoc Python. # # See http://www.gnu.org/software/groff/manual/html_node/Man-usage.html#Man-usage class Converter::ManPageConverter < Converter::BuiltIn LF = "\n" TAB = "\t" ETAB = ' ' * 8 # Converts HTML entity references back to their original form, escapes # special man characters and strips trailing whitespace. # # Optional features: # * fold each endline into a single space # * append a newline def manify str, opts = {} append_newline = opts[:append_newline] preserve_space = opts.fetch :preserve_space, true str = preserve_space ? str.gsub(TAB, ETAB) : str.tr_s(%(#{LF}#{TAB} ), ' ') str = str. gsub(/^\.$/, '\\\&.'). # a lone . is also used in troff to indicate paragraph continuation with visual separator gsub(/^\\\./, '\\\&.'). # line beginning with \. shouldn't be mistaken for troff macro gsub(/\\$/, '\\(rs'). # a literal backslash at the end of a line gsub(/^\.((?:URL|MTO) ".*?" ".*?" )( |[^\s]*)(.*?)( *)$/, ".\\1\"\\2\"#{LF}\\c\\3"). # quote last URL argument gsub(/(?:\A\n|\n *(\n))^\.(URL|MTO) /, "\\1\.\\2 "). # strip blank lines in source that precede a URL gsub('-', '\\-'). gsub('&lt;', '<'). gsub('&gt;', '>'). gsub('&#169;', '\\(co'). # copyright sign gsub('&#174;', '\\(rg'). # registered sign gsub('&#8482;', '\\(tm'). # trademark sign gsub('&#8201;', ' '). # thin space gsub('&#8211;', '\\(en'). # en-dash gsub(/&#8212(?:;&#8203;)?/, '\\(em'). # em-dash gsub('&#8216;', '\\(oq'). # left single quotation mark gsub('&#8217;', '\\(cq'). # right single quotation mark gsub('&#8220;', '\\(lq'). # left double quotation mark gsub('&#8221;', '\\(rq'). # right double quotation mark gsub(/&#8230;(?:&#8203;)?/, '...'). # horizontal ellipsis gsub('&#8592;', '\\(<-'). # leftwards arrow gsub('&#8594;', '\\(->'). # rightwards arrow gsub('&#8656;', '\\(lA'). # leftwards double arrow gsub('&#8658;', '\\(rA'). # rightwards double arrow gsub('&#8203;', '\:'). # zero width space gsub('\'', '\\(aq'). # apostrophe-quote gsub(/<\/?BOUNDARY>/, '').# artificial boundary rstrip # strip trailing space append_newline ? %(#{str}#{LF}) : str end def skip_with_warning node, name = nil warn %(asciidoctor: WARNING: converter missing for #{name || node.node_name} node in manpage backend) nil end def document node unless node.attr? 'mantitle' raise 'asciidoctor: ERROR: doctype must be set to manpage when using manpage backend' end mantitle = node.attr 'mantitle' manvolnum = node.attr 'manvolnum', '1' manname = node.attr 'manname', mantitle result = [%('\\" t .\\" Title: #{mantitle} .\\" Author: #{(node.attr? 'authors') ? (node.attr 'authors') : '[see the "AUTHORS" section]'} .\\" Generator: Asciidoctor #{node.attr 'asciidoctor-version'} .\\" Date: #{docdate = node.attr 'docdate'} .\\" Manual: #{manual = (node.attr? 'manmanual') ? (node.attr 'manmanual') : '\ \&'} .\\" Source: #{source = (node.attr? 'mansource') ? (node.attr 'mansource') : '\ \&'} .\\" Language: English .\\")] # TODO add document-level setting to disable capitalization of manname result << %(.TH "#{manify manname.upcase}" "#{manvolnum}" "#{docdate}" "#{manify source}" "#{manify manual}") # define portability settings # see http://bugs.debian.org/507673 # see http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html result << '.ie \n(.g .ds Aq \(aq' result << '.el .ds Aq \'' # set sentence_space_size to 0 to prevent extra space between sentences separated by a newline # the alternative is to add \& at the end of the line result << '.ss \n[.ss] 0' # disable hyphenation result << '.nh' # disable justification (adjust text to left margin only) result << '.ad l' # define URL macro for portability # see http://web.archive.org/web/20060102165607/http://people.debian.org/~branden/talks/wtfm/wtfm.pdf # # Use: .URL "http://www.debian.org" "Debian" "." # # * First argument: the URL # * Second argument: text to be hyperlinked # * Third (optional) argument: text that needs to immediately trail # the hyperlink without intervening whitespace result << '.de URL \\\\$2 \\(laURL: \\\\$1 \\(ra\\\\$3 .. .if \n[.g] .mso www.tmac .LINKSTYLE blue R < >' unless node.noheader if node.attr? 'manpurpose' result << %(.SH "#{node.attr 'manname-title'}" #{manify mantitle} \\- #{manify node.attr 'manpurpose'}) end end result << node.content # QUESTION should NOTES come after AUTHOR(S)? if node.footnotes? && !(node.attr? 'nofootnotes') result << '.SH "NOTES"' result.concat(node.footnotes.map {|fn| %(#{fn.index}. #{fn.text}) }) end # FIXME detect single author and use appropriate heading; itemize the authors if multiple if node.attr? 'authors' result << %(.SH "AUTHOR(S)" .sp \\fB#{node.attr 'authors'}\\fP .RS 4 Author(s). .RE) end result * LF end # NOTE embedded doesn't really make sense in the manpage backend def embedded node result = [node.content] if node.footnotes? && !(node.attr? 'nofootnotes') result << '.SH "NOTES"' result.concat(node.footnotes.map {|fn| %(#{fn.index}. #{fn.text}) }) end # QUESTION should we add an AUTHOR(S) section? result * LF end def section node slevel = node.level # QUESTION should the check for slevel be done in section? slevel = 1 if slevel == 0 && node.special result = [] if slevel > 1 macro = 'SS' # QUESTION why captioned title? why not for slevel == 1? stitle = node.captioned_title else macro = 'SH' stitle = node.title.upcase end result << %(.#{macro} "#{manify stitle}" #{node.content}) result * LF end def admonition node result = [] result << %(.if n \\{\\ .sp .\\} .RS 4 .it 1 an-trap .nr an-no-space-flag 1 .nr an-break-flag 1 .br .ps +1 .B #{node.caption}#{node.title? ? "\\fP #{manify node.title}" : nil} .ps -1 .br #{resolve_content node} .sp .5v .RE) result * LF end alias :audio :skip_with_warning def colist node result = [] result << %(.sp .B #{manify node.title} .br) if node.title? result << %(.TS tab\(:\); r lw\(\\n\(.lu*75u/100u\).) node.items.each_with_index do |item, index| result << %(\\fB(#{index + 1})\\fP\\h'-2n':T{ #{manify item.text} T}) end result << '.TE' result * LF end # TODO implement title for dlist # TODO implement horizontal (if it makes sense) def dlist node result = [] counter = 0 node.items.each do |terms, dd| counter += 1 case node.style when 'qanda' result << %(.sp #{counter}. #{manify([*terms].map {|dt| dt.text }.join ' ')} .RS 4) else result << %(.sp #{manify([*terms].map {|dt| dt.text }.join ', ')} .RS 4) end if dd result << (manify dd.text) if dd.text? result << dd.content if dd.blocks? end result << '.RE' end result * LF end def example node result = [] result << %(.sp .B #{manify node.captioned_title} .br) if node.title? result << %(.RS 4 #{resolve_content node} .RE) result * LF end def floating_title node %(.SS "#{manify node.title}") end alias :image :skip_with_warning def listing node result = [] result << %(.sp .B #{manify node.captioned_title} .br) if node.title? result << %(.sp .if n \\{\\ .RS 4 .\\} .nf #{manify node.content} .fi .if n \\{\\ .RE .\\}) result * LF end def literal node result = [] result << %(.sp .B #{manify node.title} .br) if node.title? result << %(.sp .if n \\{\\ .RS 4 .\\} .nf #{manify node.content} .fi .if n \\{\\ .RE .\\}) result * LF end def olist node result = [] result << %(.sp .B #{manify node.title} .br) if node.title? node.items.each_with_index do |item, idx| result << %(.sp .RS 4 .ie n \\{\\ \\h'-04' #{idx + 1}.\\h'+01'\\c .\\} .el \\{\\ .sp -1 .IP " #{idx + 1}." 4.2 .\\} #{manify item.text}) result << item.content if item.blocks? result << '.RE' end result * LF end def open node case node.style when 'abstract', 'partintro' resolve_content node else node.content end end # TODO use Page Control https://www.gnu.org/software/groff/manual/html_node/Page-Control.html#Page-Control alias :page_break :skip def paragraph node if node.title? %(.sp .B #{manify node.title} .br #{manify node.content}) else %(.sp #{manify node.content}) end end alias :preamble :content def quote node result = [] if node.title? result << %(.sp .in +.3i .B #{manify node.title} .br .in) end attribution_line = (node.attr? 'citetitle') ? %(#{node.attr 'citetitle'} ) : nil attribution_line = (node.attr? 'attribution') ? %(#{attribution_line}\\\(em #{node.attr 'attribution'}) : nil result << %(.in +.3i .ll -.3i .nf #{resolve_content node} .fi .br .in .ll) if attribution_line result << %(.in +.5i .ll -.5i #{attribution_line} .in .ll) end result * LF end alias :sidebar :skip_with_warning def stem node title_element = node.title? ? %(.sp .B #{manify node.title} .br) : nil open, close = BLOCK_MATH_DELIMITERS[node.style.to_sym] unless ((equation = node.content).start_with? open) && (equation.end_with? close) equation = %(#{open}#{equation}#{close}) end %(#{title_element}#{equation}) end # FIXME: The reason this method is so complicated is because we are not # receiving empty(marked) cells when there are colspans or rowspans. This # method has to create a map of all cells and in the case of rowspans # create empty cells as placeholders of the span. # To fix this, asciidoctor needs to provide an API to tell the user if a # given cell is being used as a colspan or rowspan. def table node result = [] if node.title? result << %(.sp .it 1 an-trap .nr an-no-space-flag 1 .nr an-break-flag 1 .br .B #{manify node.captioned_title}) end result << '.TS allbox tab(:);' row_header = [] row_text = [] row_index = 0 [:head, :body, :foot].each do |tsec| node.rows[tsec].each do |row| row_header[row_index] ||= [] row_text[row_index] ||= [] # result << LF # l left-adjusted # r right-adjusted # c centered-adjusted # n numerical align # a alphabetic align # s spanned # ^ vertically spanned remaining_cells = row.size row.each_with_index do |cell, cell_index| remaining_cells -= 1 row_header[row_index][cell_index] ||= [] # Add an empty cell if this is a rowspan cell if row_header[row_index][cell_index] == ['^t'] row_text[row_index] << %(T{#{LF}.sp#{LF}T}:) end row_text[row_index] << %(T{#{LF}.sp#{LF}) cell_halign = (cell.attr 'halign', 'left')[0..0] if tsec == :head if row_header[row_index].empty? || row_header[row_index][cell_index].empty? row_header[row_index][cell_index] << %(#{cell_halign}tB) else row_header[row_index][cell_index + 1] ||= [] row_header[row_index][cell_index + 1] << %(#{cell_halign}tB) end row_text[row_index] << %(#{cell.text}#{LF}) elsif tsec == :body if row_header[row_index].empty? || row_header[row_index][cell_index].empty? row_header[row_index][cell_index] << %(#{cell_halign}t) else row_header[row_index][cell_index + 1] ||= [] row_header[row_index][cell_index + 1] << %(#{cell_halign}t) end case cell.style when :asciidoc cell_content = cell.content when :verse, :literal cell_content = cell.text else cell_content = cell.content.join end row_text[row_index] << %(#{cell_content}#{LF}) elsif tsec == :foot if row_header[row_index].empty? || row_header[row_index][cell_index].empty? row_header[row_index][cell_index] << %(#{cell_halign}tB) else row_header[row_index][cell_index + 1] ||= [] row_header[row_index][cell_index + 1] << %(#{cell_halign}tB) end row_text[row_index] << %(#{cell.text}#{LF}) end if cell.colspan && cell.colspan > 1 (cell.colspan - 1).times do |i| if row_header[row_index].empty? || row_header[row_index][cell_index].empty? row_header[row_index][cell_index + i] << 'st' else row_header[row_index][cell_index + 1 + i] ||= [] row_header[row_index][cell_index + 1 + i] << 'st' end end end if cell.rowspan && cell.rowspan > 1 (cell.rowspan - 1).times do |i| row_header[row_index + 1 + i] ||= [] if row_header[row_index + 1 + i].empty? || row_header[row_index + 1 + i][cell_index].empty? row_header[row_index + 1 + i][cell_index] ||= [] row_header[row_index + 1 + i][cell_index] << '^t' else row_header[row_index + 1 + i][cell_index + 1] ||= [] row_header[row_index + 1 + i][cell_index + 1] << '^t' end end end if remaining_cells >= 1 row_text[row_index] << 'T}:' else row_text[row_index] << %(T}#{LF}) end end row_index += 1 end end #row_header.each do |row| # result << LF # row.each_with_index do |cell, i| # result << (cell.join ' ') # result << ' ' if row.size > i + 1 # end #end # FIXME temporary fix to get basic table to display result << LF result << row_header.first.map {|r| 'lt'}.join(' ') result << %(.#{LF}) row_text.each do |row| result << row.join end result << %(.TE#{LF}.sp) result.join end def thematic_break node '.sp .ce \l\'\n(.lu*25u/100u\(ap\'' end alias :toc :skip def ulist node result = [] result << %(.sp .B #{manify node.title} .br) if node.title? node.items.map {|item| result << %[.sp .RS 4 .ie n \\{\\ \\h'-04'\\(bu\\h'+03'\\c .\\} .el \\{\\ .sp -1 .IP \\(bu 2.3 .\\} #{manify item.text}] result << item.content if item.blocks? result << '.RE' } result * LF end # FIXME git uses [verse] for the synopsis; detect this special case def verse node result = [] if node.title? result << %(.sp .B #{manify node.title} .br) end attribution_line = (node.attr? 'citetitle') ? %(#{node.attr 'citetitle'} ) : nil attribution_line = (node.attr? 'attribution') ? %(#{attribution_line}\\\(em #{node.attr 'attribution'}) : nil result << %(.sp .nf #{manify node.content} .fi .br) if attribution_line result << %(.in +.5i .ll -.5i #{attribution_line} .in .ll) end result * LF end def video node start_param = (node.attr? 'start', nil, false) ? %(&start=#{node.attr 'start'}) : nil end_param = (node.attr? 'end', nil, false) ? %(&end=#{node.attr 'end'}) : nil %(.sp #{manify node.captioned_title} (video) <#{node.media_uri(node.attr 'target')}#{start_param}#{end_param}>) end def inline_anchor node target = node.target case node.type when :link if (text = node.text) == target text = nil else text = text.gsub '"', '\\(dq' end if target.start_with? 'mailto:' macro = 'MTO' target = target[7..-1].sub('@', '\\(at') else macro = 'URL' end %(#{LF}.#{macro} "#{target}" "#{text}" ) when :xref refid = (node.attr 'refid') || target node.text || (node.document.references[:ids][refid] || %([#{refid}])) when :ref, :bibref # These are anchor points, which shouldn't be visual '' else warn %(asciidoctor: WARNING: unknown anchor type: #{node.type.inspect}) end end def inline_break node %(#{node.text} .br) end def inline_button node %(\\fB[\\ #{node.text}\\ ]\\fP) end def inline_callout node %[\\fB(#{node.text})\\fP] end # TODO supposedly groff has footnotes, but we're in search of an example def inline_footnote node if (index = node.attr 'index') %([#{index}]) elsif node.type == :xref %([#{node.text}]) end end def inline_image node # NOTE alt should always be set alt_text = (node.attr? 'alt') ? (node.attr 'alt') : node.target (node.attr? 'link') ? %([#{alt_text}] <#{node.attr 'link'}>) : %([#{alt_text}]) end def inline_indexterm node node.type == :visible ? node.text : '' end def inline_kbd node if (keys = node.attr 'keys').size == 1 keys[0] else keys.join '\ +\ ' end end def inline_menu node caret = '\ \(fc\ ' menu = node.attr 'menu' if !(submenus = node.attr 'submenus').empty? submenu_path = submenus.map {|item| %(\\fI#{item}\\fP) }.join caret %(\\fI#{menu}\\fP#{caret}#{submenu_path}#{caret}\\fI#{node.attr 'menuitem'}\\fP) elsif (menuitem = node.attr 'menuitem') %(\\fI#{menu}#{caret}#{menuitem}\\fP) else %(\\fI#{menu}\\fP) end end # NOTE use fake <BOUNDARY> element to prevent creating artificial word boundaries def inline_quoted node case node.type when :emphasis %[\\fI<BOUNDARY>#{node.text}</BOUNDARY>\\fP] when :strong %[\\fB<BOUNDARY>#{node.text}</BOUNDARY>\\fP] when :monospaced %[\\f[CR]<BOUNDARY>#{node.text}</BOUNDARY>\\fP] when :single %[\\(oq<BOUNDARY>#{node.text}</BOUNDARY>\\(cq] when :double %[\\(lq<BOUNDARY>#{node.text}</BOUNDARY>\\(rq] else node.text end end def resolve_content node node.content_model == :compound ? node.content : %(.sp#{LF}#{manify node.content}) end end end
1
5,130
Could you reduce this to one line and change the first argument to a single quoted backslash string? Perhaps we can add this as an option named `:preserve_backslash` to the manify method. That way, we don't need an extra method in the first two cases. Obviously, we still need it for the inline case.
asciidoctor-asciidoctor
rb
@@ -49,10 +49,10 @@ var _ = Context("with initialized Felix, etcd datastore, 3 workloads", func() { defaultProfile := api.NewProfile() defaultProfile.Name = "default" defaultProfile.Spec.LabelsToApply = map[string]string{"default": ""} - defaultProfile.Spec.EgressRules = []api.Rule{{Action: "allow"}} + defaultProfile.Spec.EgressRules = []api.Rule{{Action: api.Allow}} defaultProfile.Spec.IngressRules = []api.Rule{{ - Action: "allow", - Source: api.EntityRule{Tag: "default"}, + Action: api.Allow, + Source: api.EntityRule{Selector: "default == ''"}, }} _, err := client.Profiles().Create(utils.Ctx, defaultProfile, utils.NoOptions) Expect(err).NotTo(HaveOccurred())
1
// +build fvtests // Copyright (c) 2017 Tigera, Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package fv_test import ( "strconv" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "github.com/projectcalico/felix/fv/containers" "github.com/projectcalico/felix/fv/utils" "github.com/projectcalico/felix/fv/workload" api "github.com/projectcalico/libcalico-go/lib/apis/v2" client "github.com/projectcalico/libcalico-go/lib/clientv2" ) // So that we can say 'HaveConnectivityTo' without the 'workload.' prefix... var HaveConnectivityTo = workload.HaveConnectivityTo var _ = Context("with initialized Felix, etcd datastore, 3 workloads", func() { var ( etcd *containers.Container felix *containers.Container client client.Interface w [3]*workload.Workload ) BeforeEach(func() { felix, etcd, client = containers.StartSingleNodeEtcdTopology() // Install a default profile that allows workloads with this profile to talk to each // other, in the absence of any Policy. defaultProfile := api.NewProfile() defaultProfile.Name = "default" defaultProfile.Spec.LabelsToApply = map[string]string{"default": ""} defaultProfile.Spec.EgressRules = []api.Rule{{Action: "allow"}} defaultProfile.Spec.IngressRules = []api.Rule{{ Action: "allow", Source: api.EntityRule{Tag: "default"}, }} _, err := client.Profiles().Create(utils.Ctx, defaultProfile, utils.NoOptions) Expect(err).NotTo(HaveOccurred()) // Create three workloads, using that profile. for ii := range w { iiStr := strconv.Itoa(ii) w[ii] = workload.Run(felix, "w"+iiStr, "cali1"+iiStr, "10.65.0.1"+iiStr, "8055", "tcp") w[ii].Configure(client) } }) AfterEach(func() { if CurrentGinkgoTestDescription().Failed { felix.Exec("iptables-save", "-c") felix.Exec("ip", "r") } for ii := range w { w[ii].Stop() } felix.Stop() if CurrentGinkgoTestDescription().Failed { etcd.Exec("etcdctl", "ls", "--recursive", "/") } etcd.Stop() }) It("full connectivity to and from workload 0", func() { Expect(w[1]).To(HaveConnectivityTo(w[0])) Expect(w[2]).To(HaveConnectivityTo(w[0])) Expect(w[0]).To(HaveConnectivityTo(w[1])) Expect(w[0]).To(HaveConnectivityTo(w[2])) }) Context("with ingress-only restriction for workload 0", func() { BeforeEach(func() { policy := api.NewNetworkPolicy() policy.Namespace = "fv" policy.Name = "policy-1" allowFromW1 := api.Rule{ Action: "allow", Source: api.EntityRule{ Selector: w[1].NameSelector(), }, } policy.Spec.IngressRules = []api.Rule{allowFromW1} policy.Spec.Selector = w[0].NameSelector() _, err := client.NetworkPolicies().Create(utils.Ctx, policy, utils.NoOptions) Expect(err).NotTo(HaveOccurred()) }) It("only w1 can connect into w0, but egress from w0 is unrestricted", func() { Eventually(w[2], "10s", "1s").ShouldNot(HaveConnectivityTo(w[0])) Expect(w[1]).To(HaveConnectivityTo(w[0])) Expect(w[0]).To(HaveConnectivityTo(w[1])) Expect(w[0]).To(HaveConnectivityTo(w[1])) }) }) Context("with egress-only restriction for workload 0", func() { BeforeEach(func() { policy := api.NewNetworkPolicy() policy.Namespace = "fv" policy.Name = "policy-1" allowToW1 := api.Rule{ Action: "allow", Destination: api.EntityRule{ Selector: w[1].NameSelector(), }, } policy.Spec.EgressRules = []api.Rule{allowToW1} policy.Spec.Selector = w[0].NameSelector() _, err := client.NetworkPolicies().Create(utils.Ctx, policy, utils.NoOptions) Expect(err).NotTo(HaveOccurred()) }) It("ingress to w0 is unrestricted, but w0 can only connect out to w1", func() { Eventually(w[0], "10s", "1s").ShouldNot(HaveConnectivityTo(w[2])) Expect(w[1]).To(HaveConnectivityTo(w[0])) Expect(w[2]).To(HaveConnectivityTo(w[0])) Expect(w[0]).To(HaveConnectivityTo(w[1])) }) }) Context("with ingress rules and types [ingress,egress]", func() { BeforeEach(func() { policy := api.NewNetworkPolicy() policy.Namespace = "fv" policy.Name = "policy-1" allowFromW1 := api.Rule{ Action: "allow", Source: api.EntityRule{ Selector: w[1].NameSelector(), }, } policy.Spec.IngressRules = []api.Rule{allowFromW1} policy.Spec.Selector = w[0].NameSelector() policy.Spec.Types = []api.PolicyType{api.PolicyTypeIngress, api.PolicyTypeEgress} _, err := client.NetworkPolicies().Create(utils.Ctx, policy, utils.NoOptions) Expect(err).NotTo(HaveOccurred()) }) It("only w1 can connect into w0, and all egress from w0 is denied", func() { Eventually(w[2], "10s", "1s").ShouldNot(HaveConnectivityTo(w[0])) Expect(w[1]).To(HaveConnectivityTo(w[0])) Expect(w[0]).NotTo(HaveConnectivityTo(w[1])) Expect(w[0]).NotTo(HaveConnectivityTo(w[2])) }) }) Context("with an egress deny rule", func() { var policy *api.NetworkPolicy BeforeEach(func() { policy = api.NewNetworkPolicy() policy.Namespace = "fv" policy.Name = "policy-1" allowFromW1 := api.Rule{ Action: "allow", Source: api.EntityRule{ Selector: w[1].NameSelector(), }, } policy.Spec.IngressRules = []api.Rule{allowFromW1} policy.Spec.EgressRules = []api.Rule{{Action: "deny"}} policy.Spec.Selector = w[0].NameSelector() }) JustBeforeEach(func() { _, err := client.NetworkPolicies().Create(utils.Ctx, policy, utils.NoOptions) Expect(err).NotTo(HaveOccurred()) }) Describe("and types [ingress] (i.e. disabling the egress rule)", func() { BeforeEach(func() { policy.Spec.Types = []api.PolicyType{api.PolicyTypeIngress} }) It("only w1 can connect into w0, and all egress from w0 is allowed", func() { Eventually(w[2], "10s", "1s").ShouldNot(HaveConnectivityTo(w[0])) Expect(w[1]).To(HaveConnectivityTo(w[0])) Expect(w[0]).To(HaveConnectivityTo(w[1])) Expect(w[0]).To(HaveConnectivityTo(w[2])) }) }) Describe("and types [ingress, egress]", func() { BeforeEach(func() { policy.Spec.Types = []api.PolicyType{api.PolicyTypeIngress, api.PolicyTypeEgress} }) It("only w1 can connect into w0, and all egress from w0 is blocked", func() { Eventually(w[2], "10s", "1s").ShouldNot(HaveConnectivityTo(w[0])) Expect(w[1]).To(HaveConnectivityTo(w[0])) Expect(w[0]).NotTo(HaveConnectivityTo(w[1])) Expect(w[0]).NotTo(HaveConnectivityTo(w[2])) }) }) }) })
1
15,865
`has(default)` i think is the preferred way of doing this
projectcalico-felix
c
@@ -235,7 +235,7 @@ public class BaseRewriteManifests extends SnapshotProducer<RewriteManifests> imp keptManifests.add(manifest); } else { rewrittenManifests.add(manifest); - try (ManifestReader reader = ManifestFiles.read(manifest, ops.io(), ops.current().specsById()) + try (ManifestReader<DataFile> reader = ManifestFiles.read(manifest, ops.io(), ops.current().specsById()) .select(Arrays.asList("*"))) { reader.liveEntries().forEach( entry -> appendEntry(entry, clusterByFunc.apply(entry.file()), manifest.partitionSpecId())
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg; import java.io.IOException; import java.util.Arrays; import java.util.Collection; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.atomic.AtomicLong; import java.util.function.Function; import java.util.function.Predicate; import java.util.stream.Collectors; import org.apache.iceberg.exceptions.RuntimeIOException; import org.apache.iceberg.exceptions.ValidationException; import org.apache.iceberg.io.InputFile; import org.apache.iceberg.io.OutputFile; import org.apache.iceberg.relocated.com.google.common.base.Preconditions; import org.apache.iceberg.relocated.com.google.common.collect.ImmutableSet; import org.apache.iceberg.relocated.com.google.common.collect.Iterables; import org.apache.iceberg.relocated.com.google.common.collect.Lists; import org.apache.iceberg.relocated.com.google.common.collect.Maps; import org.apache.iceberg.relocated.com.google.common.collect.Sets; import org.apache.iceberg.util.Pair; import org.apache.iceberg.util.Tasks; import org.apache.iceberg.util.ThreadPools; import static org.apache.iceberg.TableProperties.MANIFEST_TARGET_SIZE_BYTES; import static org.apache.iceberg.TableProperties.MANIFEST_TARGET_SIZE_BYTES_DEFAULT; import static org.apache.iceberg.TableProperties.SNAPSHOT_ID_INHERITANCE_ENABLED; import static org.apache.iceberg.TableProperties.SNAPSHOT_ID_INHERITANCE_ENABLED_DEFAULT; public class BaseRewriteManifests extends SnapshotProducer<RewriteManifests> implements RewriteManifests { private static final String KEPT_MANIFESTS_COUNT = "manifests-kept"; private static final String CREATED_MANIFESTS_COUNT = "manifests-created"; private static final String REPLACED_MANIFESTS_COUNT = "manifests-replaced"; private static final String PROCESSED_ENTRY_COUNT = "entries-processed"; private final TableOperations ops; private final Map<Integer, PartitionSpec> specsById; private final long manifestTargetSizeBytes; private final boolean snapshotIdInheritanceEnabled; private final Set<ManifestFile> deletedManifests = Sets.newHashSet(); private final List<ManifestFile> addedManifests = Lists.newArrayList(); private final List<ManifestFile> rewrittenAddedManifests = Lists.newArrayList(); private final Collection<ManifestFile> keptManifests = new ConcurrentLinkedQueue<>(); private final Collection<ManifestFile> newManifests = new ConcurrentLinkedQueue<>(); private final Set<ManifestFile> rewrittenManifests = Sets.newConcurrentHashSet(); private final Map<Object, WriterWrapper> writers = Maps.newConcurrentMap(); private final AtomicLong entryCount = new AtomicLong(0); private Function<DataFile, Object> clusterByFunc; private Predicate<ManifestFile> predicate; private final SnapshotSummary.Builder summaryBuilder = SnapshotSummary.builder(); BaseRewriteManifests(TableOperations ops) { super(ops); this.ops = ops; this.specsById = ops.current().specsById(); this.manifestTargetSizeBytes = ops.current().propertyAsLong(MANIFEST_TARGET_SIZE_BYTES, MANIFEST_TARGET_SIZE_BYTES_DEFAULT); this.snapshotIdInheritanceEnabled = ops.current() .propertyAsBoolean(SNAPSHOT_ID_INHERITANCE_ENABLED, SNAPSHOT_ID_INHERITANCE_ENABLED_DEFAULT); } @Override protected RewriteManifests self() { return this; } @Override protected String operation() { return DataOperations.REPLACE; } @Override public RewriteManifests set(String property, String value) { summaryBuilder.set(property, value); return this; } @Override protected Map<String, String> summary() { int createdManifestsCount = newManifests.size() + addedManifests.size() + rewrittenAddedManifests.size(); summaryBuilder.set(CREATED_MANIFESTS_COUNT, String.valueOf(createdManifestsCount)); summaryBuilder.set(KEPT_MANIFESTS_COUNT, String.valueOf(keptManifests.size())); summaryBuilder.set(REPLACED_MANIFESTS_COUNT, String.valueOf(rewrittenManifests.size() + deletedManifests.size())); summaryBuilder.set(PROCESSED_ENTRY_COUNT, String.valueOf(entryCount.get())); return summaryBuilder.build(); } @Override public RewriteManifests clusterBy(Function<DataFile, Object> func) { this.clusterByFunc = func; return this; } @Override public RewriteManifests rewriteIf(Predicate<ManifestFile> pred) { this.predicate = pred; return this; } @Override public RewriteManifests deleteManifest(ManifestFile manifest) { deletedManifests.add(manifest); return this; } @Override public RewriteManifests addManifest(ManifestFile manifest) { Preconditions.checkArgument(!manifest.hasAddedFiles(), "Cannot add manifest with added files"); Preconditions.checkArgument(!manifest.hasDeletedFiles(), "Cannot add manifest with deleted files"); Preconditions.checkArgument( manifest.snapshotId() == null || manifest.snapshotId() == -1, "Snapshot id must be assigned during commit"); Preconditions.checkArgument(manifest.sequenceNumber() == -1, "Sequence must be assigned during commit"); if (snapshotIdInheritanceEnabled && manifest.snapshotId() == null) { addedManifests.add(manifest); } else { // the manifest must be rewritten with this update's snapshot ID ManifestFile copiedManifest = copyManifest(manifest); rewrittenAddedManifests.add(copiedManifest); } return this; } private ManifestFile copyManifest(ManifestFile manifest) { TableMetadata current = ops.current(); InputFile toCopy = ops.io().newInputFile(manifest.path()); OutputFile newFile = newManifestOutput(); return ManifestFiles.copyRewriteManifest( current.formatVersion(), toCopy, specsById, newFile, snapshotId(), summaryBuilder); } @Override public List<ManifestFile> apply(TableMetadata base) { List<ManifestFile> currentManifests = base.currentSnapshot().dataManifests(); Set<ManifestFile> currentManifestSet = ImmutableSet.copyOf(currentManifests); validateDeletedManifests(currentManifestSet); if (requiresRewrite(currentManifestSet)) { performRewrite(currentManifests); } else { keepActiveManifests(currentManifests); } validateFilesCounts(); Iterable<ManifestFile> newManifestsWithMetadata = Iterables.transform( Iterables.concat(newManifests, addedManifests, rewrittenAddedManifests), manifest -> GenericManifestFile.copyOf(manifest).withSnapshotId(snapshotId()).build()); // put new manifests at the beginning List<ManifestFile> apply = Lists.newArrayList(); Iterables.addAll(apply, newManifestsWithMetadata); apply.addAll(keptManifests); apply.addAll(base.currentSnapshot().deleteManifests()); return apply; } private boolean requiresRewrite(Set<ManifestFile> currentManifests) { if (clusterByFunc == null) { // manifests are deleted and added directly so don't perform a rewrite return false; } if (rewrittenManifests.size() == 0) { // nothing yet processed so perform a full rewrite return true; } // if any processed manifest is not in the current manifest list, perform a full rewrite return rewrittenManifests.stream().anyMatch(manifest -> !currentManifests.contains(manifest)); } private void keepActiveManifests(List<ManifestFile> currentManifests) { // keep any existing manifests as-is that were not processed keptManifests.clear(); currentManifests.stream() .filter(manifest -> !rewrittenManifests.contains(manifest) && !deletedManifests.contains(manifest)) .forEach(manifest -> keptManifests.add(manifest)); } private void reset() { cleanUncommitted(newManifests, ImmutableSet.of()); entryCount.set(0); keptManifests.clear(); rewrittenManifests.clear(); newManifests.clear(); writers.clear(); } private void performRewrite(List<ManifestFile> currentManifests) { reset(); List<ManifestFile> remainingManifests = currentManifests.stream() .filter(manifest -> !deletedManifests.contains(manifest)) .collect(Collectors.toList()); try { Tasks.foreach(remainingManifests) .executeWith(ThreadPools.getWorkerPool()) .run(manifest -> { if (predicate != null && !predicate.test(manifest)) { keptManifests.add(manifest); } else { rewrittenManifests.add(manifest); try (ManifestReader reader = ManifestFiles.read(manifest, ops.io(), ops.current().specsById()) .select(Arrays.asList("*"))) { reader.liveEntries().forEach( entry -> appendEntry(entry, clusterByFunc.apply(entry.file()), manifest.partitionSpecId()) ); } catch (IOException x) { throw new RuntimeIOException(x); } } }); } finally { Tasks.foreach(writers.values()).executeWith(ThreadPools.getWorkerPool()).run(writer -> writer.close()); } } private void validateDeletedManifests(Set<ManifestFile> currentManifests) { // directly deleted manifests must be still present in the current snapshot deletedManifests.stream() .filter(manifest -> !currentManifests.contains(manifest)) .findAny() .ifPresent(manifest -> { throw new ValidationException("Manifest is missing: %s", manifest.path()); }); } private void validateFilesCounts() { Iterable<ManifestFile> createdManifests = Iterables.concat(newManifests, addedManifests, rewrittenAddedManifests); int createdManifestsFilesCount = activeFilesCount(createdManifests); Iterable<ManifestFile> replacedManifests = Iterables.concat(rewrittenManifests, deletedManifests); int replacedManifestsFilesCount = activeFilesCount(replacedManifests); if (createdManifestsFilesCount != replacedManifestsFilesCount) { throw new ValidationException( "Replaced and created manifests must have the same number of active files: %d (new), %d (old)", createdManifestsFilesCount, replacedManifestsFilesCount); } } private int activeFilesCount(Iterable<ManifestFile> manifests) { int activeFilesCount = 0; for (ManifestFile manifest : manifests) { Preconditions.checkNotNull(manifest.addedFilesCount(), "Missing file counts in %s", manifest.path()); Preconditions.checkNotNull(manifest.existingFilesCount(), "Missing file counts in %s", manifest.path()); activeFilesCount += manifest.addedFilesCount(); activeFilesCount += manifest.existingFilesCount(); } return activeFilesCount; } private void appendEntry(ManifestEntry<DataFile> entry, Object key, int partitionSpecId) { Preconditions.checkNotNull(entry, "Manifest entry cannot be null"); Preconditions.checkNotNull(key, "Key cannot be null"); WriterWrapper writer = getWriter(key, partitionSpecId); writer.addEntry(entry); entryCount.incrementAndGet(); } private WriterWrapper getWriter(Object key, int partitionSpecId) { return writers.computeIfAbsent( Pair.of(key, partitionSpecId), k -> new WriterWrapper(specsById.get(partitionSpecId))); } @Override protected void cleanUncommitted(Set<ManifestFile> committed) { cleanUncommitted(newManifests, committed); // clean up only rewrittenAddedManifests as they are always owned by the table // don't clean up addedManifests as they are added to the manifest list and are not compacted cleanUncommitted(rewrittenAddedManifests, committed); } private void cleanUncommitted(Iterable<ManifestFile> manifests, Set<ManifestFile> committedManifests) { for (ManifestFile manifest : manifests) { if (!committedManifests.contains(manifest)) { deleteFile(manifest.path()); } } } long getManifestTargetSizeBytes() { return manifestTargetSizeBytes; } class WriterWrapper { private final PartitionSpec spec; private ManifestWriter<DataFile> writer; WriterWrapper(PartitionSpec spec) { this.spec = spec; } synchronized void addEntry(ManifestEntry<DataFile> entry) { if (writer == null) { writer = newManifestWriter(spec); } else if (writer.length() >= getManifestTargetSizeBytes()) { close(); writer = newManifestWriter(spec); } writer.existing(entry); } synchronized void close() { if (writer != null) { try { writer.close(); newManifests.add(writer.toManifestFile()); } catch (IOException x) { throw new RuntimeIOException(x); } } } } }
1
20,466
Does this mean we can only do rewrites if there are no `DeleteFile`'s?
apache-iceberg
java
@@ -96,10 +96,12 @@ class InstallTest(QuiltTestCase): """ table_data, table_hash = self.make_table_data() file_data, file_hash = self.make_file_data() - contents, contents_hash = self.make_contents(table=table_hash, file=file_hash) + contents, contents_hash = self.make_contents(table=table_hash, + file=file_hash) self._mock_tag('foo/bar', 'latest', contents_hash, team='qux') - self._mock_package('foo/bar', contents_hash, '', contents, [table_hash, file_hash], team='qux') + self._mock_package('foo/bar', contents_hash, '', + contents, [table_hash, file_hash], team='qux') self._mock_s3(table_hash, table_data) self._mock_s3(file_hash, file_data)
1
""" Tests for the install command. """ import hashlib import json import os import time import requests import responses from six import assertRaisesRegex from six.moves import urllib from ..tools import command from ..tools.const import HASH_TYPE from ..tools.core import ( decode_node, encode_node, hash_contents, FileNode, GroupNode, PackageFormat, TableNode, RootNode, ) from ..tools.package import Package from ..tools.store import PackageStore from ..tools.util import gzip_compress from .utils import QuiltTestCase class InstallTest(QuiltTestCase): """ Unit tests for quilt install. """ @classmethod def make_table_data(cls, string="table"): table_data = (string * 10).encode('utf-8') h = hashlib.new(HASH_TYPE) h.update(table_data) table_hash = h.hexdigest() return table_data, table_hash @classmethod def make_file_data(cls, string="file"): file_data = (string * 10).encode('utf-8') h = hashlib.new(HASH_TYPE) h.update(file_data) file_hash = h.hexdigest() return file_data, file_hash @classmethod def make_contents(cls, **args): contents = RootNode(dict( group=GroupNode(dict([ (key, TableNode([val], PackageFormat.default.value) if 'table' in key else FileNode([val])) for key, val in args.items()] )) )) return contents, hash_contents(contents) def test_install_latest(self): """ Install the latest update of a package. """ table_data, table_hash = self.make_table_data() file_data, file_hash = self.make_file_data() contents, contents_hash = self.make_contents(table=table_hash, file=file_hash) self._mock_tag('foo/bar', 'latest', contents_hash) self._mock_package('foo/bar', contents_hash, '', contents, [table_hash, file_hash]) self._mock_s3(table_hash, table_data) self._mock_s3(file_hash, file_data) command.install('foo/bar') teststore = PackageStore(self._store_dir) with open(os.path.join(teststore.package_path(None, 'foo', 'bar'), Package.CONTENTS_DIR, contents_hash)) as fd: file_contents = json.load(fd, object_hook=decode_node) assert file_contents == contents with open(teststore.object_path(objhash=table_hash), 'rb') as fd: contents = fd.read() assert contents == table_data with open(teststore.object_path(objhash=file_hash), 'rb') as fd: contents = fd.read() assert contents == file_data def test_install_team_latest(self): """ Install the latest team update of a package. """ table_data, table_hash = self.make_table_data() file_data, file_hash = self.make_file_data() contents, contents_hash = self.make_contents(table=table_hash, file=file_hash) self._mock_tag('foo/bar', 'latest', contents_hash, team='qux') self._mock_package('foo/bar', contents_hash, '', contents, [table_hash, file_hash], team='qux') self._mock_s3(table_hash, table_data) self._mock_s3(file_hash, file_data) command.install('qux:foo/bar') teststore = PackageStore(self._store_dir) with open(os.path.join(teststore.package_path('qux', 'foo', 'bar'), Package.CONTENTS_DIR, contents_hash)) as fd: file_contents = json.load(fd, object_hook=decode_node) assert file_contents == contents with open(teststore.object_path(objhash=table_hash), 'rb') as fd: contents = fd.read() assert contents == table_data with open(teststore.object_path(objhash=file_hash), 'rb') as fd: contents = fd.read() assert contents == file_data def test_short_hashes(self): """ Test various functions that use short hashes """ table_data, table_hash = self.make_table_data() file_data, file_hash = self.make_file_data() contents, contents_hash = self.make_contents(table=table_hash, file=file_hash) self._mock_log('foo/bar', contents_hash) self._mock_tag('foo/bar', 'mytag', contents_hash[0:6], cmd=responses.PUT) command.tag_add('foo/bar', 'mytag', contents_hash[0:6]) self._mock_version('foo/bar', '1.0', contents_hash[0:6], cmd=responses.PUT) command.version_add('foo/bar', '1.0', contents_hash[0:6], force=True) def test_install_subpackage(self): """ Install a part of a package. """ table_data, table_hash = self.make_table_data() contents, contents_hash = self.make_contents(table=table_hash) self._mock_tag('foo/bar', 'latest', contents_hash) self._mock_package('foo/bar', contents_hash, 'group/table', contents, [table_hash]) self._mock_s3(table_hash, table_data) command.install('foo/bar/group/table') teststore = PackageStore(self._store_dir) with open(os.path.join(teststore.package_path(None, 'foo', 'bar'), Package.CONTENTS_DIR, contents_hash)) as fd: file_contents = json.load(fd, object_hook=decode_node) assert file_contents == contents with open(teststore.object_path(objhash=table_hash), 'rb') as fd: contents = fd.read() assert contents == table_data def validate_file(self, user, package, contents_hash, contents, table_hash, table_data): teststore = PackageStore(self._store_dir) with open(os.path.join(teststore.package_path(None, user, package), Package.CONTENTS_DIR, contents_hash), 'r') as fd: file_contents = json.load(fd, object_hook=decode_node) assert file_contents == contents with open(teststore.object_path(objhash=table_hash), 'rb') as fd: contents = fd.read() assert contents == table_data def getmtime(self, user, package, contents_hash): teststore = PackageStore(self._store_dir) return os.path.getmtime(os.path.join(teststore.package_path(None, user, package), Package.CONTENTS_DIR, contents_hash)) def test_install_dependencies(self): """ Install multiple packages via requirements file """ table_data1, table_hash1 = self.make_table_data('table1') contents1, contents_hash1 = self.make_contents(table1=table_hash1) self._mock_tag('foo/bar', 'latest', contents_hash1) self._mock_package('foo/bar', contents_hash1, '', contents1, [table_hash1]) self._mock_s3(table_hash1, table_data1) table_data2, table_hash2 = self.make_table_data('table2') contents2, contents_hash2 = self.make_contents(table2=table_hash2) self._mock_tag('baz/bat', 'nexttag', contents_hash2) self._mock_package('baz/bat', contents_hash2, '', contents2, [table_hash2]) self._mock_s3(table_hash2, table_data2) table_data3, table_hash3 = self.make_table_data('table3') contents3, contents_hash3 = self.make_contents(table3=table_hash3) self._mock_version('usr1/pkga', 'v1', contents_hash3) self._mock_package('usr1/pkga', contents_hash3, '', contents3, [table_hash3]) self._mock_s3(table_hash3, table_data3) table_data4, table_hash4 = self.make_table_data('table4') contents4, contents_hash4 = self.make_contents(table4=table_hash4) self._mock_tag('usr2/pkgb', 'latest', contents_hash4) self._mock_package('usr2/pkgb', contents_hash4, '', contents4, [table_hash4]) self._mock_s3(table_hash4, table_data4) table_data5, table_hash5 = self.make_table_data('table5') contents5, contents_hash5 = self.make_contents(table5=table_hash5) self._mock_log('usr3/pkgc', contents_hash5) self._mock_package('usr3/pkgc', contents_hash5, '', contents5, [table_hash5]) self._mock_s3(table_hash5, table_data5) table_data6, table_hash6 = self.make_table_data('table6') contents6, contents_hash6 = self.make_contents(table6=table_hash6) self._mock_tag('danWebster/sgRNAs', 'latest', contents_hash6) self._mock_package('danWebster/sgRNAs', contents_hash6, 'libraries/brunello', contents6, [table_hash6]) self._mock_s3(table_hash6, table_data6) # inline test of quilt.yml command.install(''' packages: - foo/bar:t:latest # comment - baz/bat:t:nexttag - usr1/pkga:version:v1 - usr2/pkgb - usr3/pkgc:h:SHORTHASH5 - danWebster/sgRNAs/libraries/brunello # subpath '''.replace('SHORTHASH5', contents_hash5[0:8])) # short hash self.validate_file('foo', 'bar', contents_hash1, contents1, table_hash1, table_data1) self.validate_file('baz','bat', contents_hash2, contents2, table_hash2, table_data2) self.validate_file('usr1','pkga', contents_hash3, contents3, table_hash3, table_data3) self.validate_file('usr2','pkgb', contents_hash4, contents4, table_hash4, table_data4) self.validate_file('usr3','pkgc', contents_hash5, contents5, table_hash5, table_data5) self.validate_file('danWebster', 'sgRNAs', contents_hash6, contents6, table_hash6, table_data6) # check that installation happens in the order listed in quilt.yml assert (self.getmtime('foo','bar', contents_hash1) <= self.getmtime('baz','bat', contents_hash2) <= self.getmtime('usr1','pkga', contents_hash3) <= self.getmtime('usr2','pkgb', contents_hash4) <= self.getmtime('usr3','pkgc', contents_hash5) <= self.getmtime('danWebster', 'sgRNAs', contents_hash6)) # test reading from file table_data7, table_hash7 = self.make_table_data('table7') contents7, contents_hash7 = self.make_contents(table7=table_hash7) self._mock_tag('usr4/pkgd', 'latest', contents_hash7) self._mock_package('usr4/pkgd', contents_hash7, '', contents7, [table_hash7]) self._mock_s3(table_hash7, table_data7) with open('tmp_quilt.yml', 'w') as fd: fd.write("packages:\n- usr4/pkgd") fd.close() command.install('@tmp_quilt.yml') def test_bad_install_dependencies(self): """ Install multiple packages via requirements file """ table_data1, table_hash1 = self.make_table_data('table1') contents1, contents_hash1 = self.make_contents(table1=table_hash1) # missing/malformed requests with assertRaisesRegex(self, command.CommandException, "package name is empty"): command.install(" ") with assertRaisesRegex(self, command.CommandException, "file not found: quilt.yml"): command.install("@quilt.yml") with assertRaisesRegex(self, command.CommandException, "Specify package as"): command.install("packages:\n") with assertRaisesRegex(self, command.CommandException, "Specify package as"): command.install("packages:\n- foo") with assertRaisesRegex(self, command.CommandException, "Invalid versioninfo"): command.install("packages:\n- foo/bar:xxx:bar") with assertRaisesRegex(self, Exception, "No such file or directory"): self.validate_file('foo', 'bar', contents_hash1, contents1, table_hash1, table_data1) def test_quilt_yml_unknown_hash(self): table_data1, table_hash1 = self.make_table_data('table1') contents1, contents_hash1 = self.make_contents(table1=table_hash1) self._mock_log('akarve/sales', contents_hash1) with assertRaisesRegex(self, command.CommandException, "Invalid hash"): command.install("packages:\n- akarve/sales:h:123456") def test_quilt_yml_unknown_tag(self): table_data1, table_hash1 = self.make_table_data('table1') contents1, contents_hash1 = self.make_contents(table1=table_hash1) self._mock_tag('akarve/sales', 'unknown', contents_hash1, status=404, message='Tag unknown does not exist') with assertRaisesRegex(self, command.CommandException, "Tag unknown does not exist"): command.install("packages:\n- akarve/sales:t:unknown") def test_quilt_yml_unknown_version(self): table_data1, table_hash1 = self.make_table_data('table1') contents1, contents_hash1 = self.make_contents(table1=table_hash1) self._mock_version('akarve/sales', '99.99', contents_hash1, status=404, message='Version 99.99 does not exist') with assertRaisesRegex(self, command.CommandException, "Version 99.99 does not exist"): command.install("packages:\n- akarve/sales:v:99.99") def test_quilt_yml_unknown_subpath(self): table_data1, table_hash1 = self.make_table_data('table1') contents1, contents_hash1 = self.make_contents(table1=table_hash1) self._mock_tag('baz/bat', 'latest', contents_hash1) self._mock_package('baz/bat', contents_hash1, 'badsubpath', contents1, [table_hash1], status=404, message='Invalid subpath') with assertRaisesRegex(self, command.CommandException, "Invalid subpath"): command.install("packages:\n- baz/bat/badsubpath") def test_bad_contents_hash(self): """ Test that a package with a bad contents hash fails installation. """ tabledata = b'Bad package' h = hashlib.new(HASH_TYPE) h.update(tabledata) obj_hash = h.hexdigest() contents = GroupNode(dict( foo=GroupNode(dict( bar=TableNode([obj_hash], PackageFormat.default.value) )) )) contents_hash = 'e867010701edc0b1c8be177e02a93aa3cb1342bb1123046e1f6b40e428c6048e' self._mock_tag('foo/bar', 'latest', contents_hash) self._mock_package('foo/bar', contents_hash, '', contents, [obj_hash]) with assertRaisesRegex(self, command.CommandException, "Mismatched hash"): command.install('foo/bar') assert not os.path.exists(os.path.join(self._store_dir, 'foo/bar.json')) def test_bad_object_hash(self): """ Test that a package with a file hash mismatch fails installation. """ tabledata = b'Bad package' h = hashlib.new(HASH_TYPE) h.update(tabledata) obj_hash = 'e867010701edc0b1c8be177e02a93aa3cb1342bb1123046e1f6b40e428c6048e' contents = GroupNode(dict( foo=GroupNode(dict( bar=TableNode([obj_hash], PackageFormat.default.value) )) )) contents_hash = hash_contents(contents) self._mock_tag('foo/bar', 'latest', contents_hash) self._mock_package('foo/bar', contents_hash, '', contents, [obj_hash]) self._mock_s3(obj_hash, tabledata) with self.assertRaises(command.CommandException): command.install('foo/bar') assert not os.path.exists(os.path.join(self._store_dir, 'foo/bar.json')) def test_resume_download(self): """ Test that existing objects don't get re-downloaded - unless their hash is wrong. """ file_data_list = [] file_hash_list = [] for i in range(2): file_data, file_hash = self.make_file_data('file%d' % i) file_data_list.append(file_data) file_hash_list.append(file_hash) contents = RootNode(dict( file0=FileNode([file_hash_list[0]]), file1=FileNode([file_hash_list[1]]), )) contents_hash = hash_contents(contents) # Create a package store object to use its path helpers teststore = PackageStore(self._store_dir) # file0 already exists. teststore.create_dirs() with open(teststore.object_path(objhash=file_hash_list[0]), 'wb') as fd: fd.write(file_data_list[0]) # file1 does not exist. self._mock_tag('foo/bar', 'latest', contents_hash) self._mock_package('foo/bar', contents_hash, '', contents, file_hash_list) # Don't mock file0, since it's not supposed to be downloaded. self._mock_s3(file_hash_list[1], file_data_list[1]) command.install('foo/bar') def test_download_retry(self): table_data, table_hash = self.make_table_data() contents, contents_hash = self.make_contents(table=table_hash) s3_url = 'https://example.com/%s' % table_hash error = requests.exceptions.ConnectionError("Timeout") # Fail to install after 3 timeouts. self._mock_tag('foo/bar', 'latest', contents_hash) self._mock_package('foo/bar', contents_hash, 'group/table', contents, [table_hash]) self.requests_mock.add(responses.GET, s3_url, body=error) self.requests_mock.add(responses.GET, s3_url, body=error) self.requests_mock.add(responses.GET, s3_url, body=error) self._mock_s3(table_hash, table_data) # We won't actually get to this one. with self.assertRaises(command.CommandException): command.install('foo/bar/group/table') self.requests_mock.reset() # Succeed after 2 timeouts and a successful response. self._mock_tag('foo/bar', 'latest', contents_hash) self._mock_package('foo/bar', contents_hash, 'group/table', contents, [table_hash]) self.requests_mock.add(responses.GET, s3_url, body=error) self.requests_mock.add(responses.GET, s3_url, body=error) self._mock_s3(table_hash, table_data) command.install('foo/bar/group/table') def _mock_log(self, package, pkg_hash): log_url = '%s/api/log/%s/' % (command.get_registry_url(None), package) self.requests_mock.add(responses.GET, log_url, json.dumps({'logs': [ {'created': int(time.time()), 'hash': pkg_hash, 'author': 'author' } ]})) def _mock_tag(self, package, tag, pkg_hash, cmd=responses.GET, status=200, message=None, team=None): tag_url = '%s/api/tag/%s/%s' % (command.get_registry_url(team), package, tag) self.requests_mock.add(cmd, tag_url, json.dumps( dict(message=message) if message else dict(hash=pkg_hash) ), status=status) def _mock_version(self, package, version, pkg_hash, cmd=responses.GET, status=200, message=None, team=None): version_url = '%s/api/version/%s/%s' % (command.get_registry_url(team), package, version) self.requests_mock.add(cmd, version_url, json.dumps( dict(message=message) if message else dict(hash=pkg_hash) ), status=status) def _mock_package(self, package, pkg_hash, subpath, contents, hashes, status=200, message=None, team=None): pkg_url = '%s/api/package/%s/%s?%s' % ( command.get_registry_url(team), package, pkg_hash, urllib.parse.urlencode(dict(subpath=subpath)) ) self.requests_mock.add(responses.GET, pkg_url, body=json.dumps( dict(message=message) if message else dict(contents=contents, urls={h: 'https://example.com/%s' % h for h in hashes}) , default=encode_node), match_querystring=True, status=status) def _mock_s3(self, pkg_hash, contents): s3_url = 'https://example.com/%s' % pkg_hash headers = { 'Content-Range': 'bytes 0-%d/%d' % (len(contents) - 1, len(contents)) } body = gzip_compress(contents) self.requests_mock.add(responses.GET, s3_url, body, headers=headers)
1
15,831
Actually, these are fine; we're using 110 as the max line length. (it's also set in the .pylintrc)
quiltdata-quilt
py
@@ -17,6 +17,7 @@ package main import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + "github.com/projectcalico/felix/k8sfv/leastsquares" )
1
// Copyright (c) 2017 Tigera, Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "github.com/projectcalico/felix/k8sfv/leastsquares" ) var _ = Context("least squares", func() { It("should fit a straight line", func() { p := []leastsquares.Point{ {1, 1}, {2, 2}, {3, 3}, {4, 4}, } gradient, constant := leastsquares.LeastSquaresMethod(p) Expect(gradient).To(BeNumerically("==", 1)) Expect(constant).To(BeNumerically("==", 0)) }) })
1
15,292
No issue here, but what tool are you running to put these imports into the preferred organization? I have configuration to run `goimports` whenever I save a file, but it appears that that is not quite sufficient.
projectcalico-felix
c
@@ -131,6 +131,7 @@ public class DTAFileReaderSpi extends TabularDataFileReaderSpi{ @Override public boolean canDecodeInput(BufferedInputStream stream) throws IOException { + //who closes this stream? if (stream ==null){ throw new IllegalArgumentException("stream == null!"); }
1
/* Copyright (C) 2005-2014, by the President and Fellows of Harvard College. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Dataverse Network - A web application to share, preserve and analyze research data. Developed at the Institute for Quantitative Social Science, Harvard University. Version 3.0. */ package edu.harvard.iq.dataverse.ingest.tabulardata.impl.plugins.dta; import edu.harvard.iq.dataverse.ingest.tabulardata.TabularDataFileReader; import edu.harvard.iq.dataverse.ingest.tabulardata.spi.TabularDataFileReaderSpi; import java.io.*; import java.nio.*; import java.nio.channels.*; import java.util.logging.*; import javax.imageio.IIOException; import java.util.*; import org.apache.commons.codec.binary.Hex; /** * Service Provider registration class for the Stata/DTA ingest plugin. * Based on the code originally developed by Akio Sone, HMDC/ODUM * for v.2 of the DVN. * * @author Leonid Andreev * @author asone */ public class DTAFileReaderSpi extends TabularDataFileReaderSpi{ private static Map<Byte, String> stataReleaseNumber = new HashMap<Byte, String>(); static { stataReleaseNumber.put((byte) 104, "rel_3"); stataReleaseNumber.put((byte) 105, "rel_4or5"); stataReleaseNumber.put((byte) 108, "rel_6"); stataReleaseNumber.put((byte) 110, "rel_7first"); stataReleaseNumber.put((byte) 111, "rel_7scnd"); stataReleaseNumber.put((byte) 113, "rel_8_or_9"); stataReleaseNumber.put((byte) 114, "rel_10"); } private static String[] formatNames = {"dta", "DTA"}; private static String[] extensions = {"dta"}; private static String[] mimeType = {"application/x-stata"}; private static Logger dbgLog = Logger.getLogger( DTAFileReaderSpi.class.getPackage().getName()); private static int DTA_HEADER_SIZE = 4; public DTAFileReaderSpi() { super("HU-IQSS-DataVerse-project", "4.0", formatNames, extensions, mimeType, "edu.harvard.iq.dataverse.ingest.tabulardata.impl.plugins.dta.DTAFileReaderSpi"); dbgLog.fine("DTAFileReaderSpi is called"); } public String getDescription(Locale locale) { return "HU-IQSS-DataVerse-project Stata File Ingest plugin"; } @Override public boolean canDecodeInput(Object source) throws IOException { if (!(source instanceof BufferedInputStream)) { return false; } if (source ==null){ throw new IllegalArgumentException("stream == null!"); } BufferedInputStream stream = (BufferedInputStream)source; dbgLog.fine("applying the dta test\n"); byte[] b = new byte[DTA_HEADER_SIZE]; if (stream.markSupported()){ stream.mark(0); } int nbytes = stream.read(b, 0, DTA_HEADER_SIZE); if (nbytes == 0){ throw new IOException(); } if (stream.markSupported()){ stream.reset(); } dbgLog.info("hex dump: 1st 4bytes =>" + new String(Hex.encodeHex(b)) + "<-"); if (b[2] != 1) { dbgLog.fine("3rd byte is not 1: given file is not stata-dta type"); return false; } else if ((b[1] != 1) && (b[1] != 2)) { dbgLog.fine("2nd byte is neither 0 nor 1: this file is not stata-dta type"); return false; } else if (!DTAFileReaderSpi.stataReleaseNumber.containsKey(b[0])) { dbgLog.fine("1st byte (" + b[0]+ ") is not within the ingestable range [rel. 3-10]:"+ "this file is NOT stata-dta type"); return false; } else { dbgLog.fine("this file is stata-dta type: " + DTAFileReaderSpi.stataReleaseNumber.get(b[0]) + "(No in byte=" + b[0] + ")"); return true; } } @Override public boolean canDecodeInput(BufferedInputStream stream) throws IOException { if (stream ==null){ throw new IllegalArgumentException("stream == null!"); } dbgLog.fine("applying the dta test\n"); byte[] b = new byte[DTA_HEADER_SIZE]; if (stream.markSupported()){ stream.mark(0); } int nbytes = stream.read(b, 0, DTA_HEADER_SIZE); if (nbytes == 0){ throw new IOException(); } if (stream.markSupported()){ stream.reset(); } dbgLog.info("hex dump: 1st 4bytes =>" + new String(Hex.encodeHex(b)) + "<-"); if (b[2] != 1) { dbgLog.fine("3rd byte is not 1: given file is not stata-dta type"); return false; } else if ((b[1] != 1) && (b[1] != 2)) { dbgLog.fine("2nd byte is neither 0 nor 1: this file is not stata-dta type"); return false; } else if (!DTAFileReaderSpi.stataReleaseNumber.containsKey(b[0])) { dbgLog.fine("1st byte (" + b[0]+ ") is not within the ingestable range [rel. 3-10]:"+ "this file is NOT stata-dta type"); return false; } else { dbgLog.fine("this file is stata-dta type: " + DTAFileReaderSpi.stataReleaseNumber.get(b[0]) + "(No in HEX=" + b[0] + ")"); return true; } } @Override public boolean canDecodeInput(File file) throws IOException { if (file ==null){ throw new IllegalArgumentException("file == null!"); } if (!file.canRead()){ throw new IIOException("cannot read the input file"); } // set-up a FileChannel instance for a given file object FileChannel srcChannel = new FileInputStream(file).getChannel(); // create a read-only MappedByteBuffer MappedByteBuffer buff = srcChannel.map(FileChannel.MapMode.READ_ONLY, 0, DTA_HEADER_SIZE); //printHexDump(buff, "hex dump of the byte-buffer"); buff.rewind(); dbgLog.fine("applying the dta test\n"); byte[] hdr4 = new byte[4]; buff.get(hdr4, 0, 4); dbgLog.fine("hex dump: 1st 4bytes =>" + new String(Hex.encodeHex(hdr4)) + "<-"); if (hdr4[2] != 1) { dbgLog.fine("3rd byte is not 1: given file is not stata-dta type"); return false; } else if ((hdr4[1] != 1) && (hdr4[1] != 2)) { dbgLog.fine("2nd byte is neither 0 nor 1: this file is not stata-dta type"); return false; } else if (!stataReleaseNumber.containsKey(hdr4[0])) { dbgLog.fine("1st byte (" + hdr4[0] + ") is not within the ingestable range [rel. 3-10]: this file is NOT stata-dta type"); return false; } else { dbgLog.fine("this file is stata-dta type: " + stataReleaseNumber.get(hdr4[0]) + "(No in HEX=" + hdr4[0] + ")"); return true; } } @Override public TabularDataFileReader createReaderInstance(Object ext) throws IIOException { return new DTAFileReader(this); } }
1
43,123
guessing this method could/should close it but I didn't see where it was getting called.
IQSS-dataverse
java
@@ -50,11 +50,13 @@ public class ExecutableFlow extends ExecutableFlowBase { public static final String AZKABANFLOWVERSION_PARAM = "azkabanFlowVersion"; public static final String IS_LOCKED_PARAM = "isLocked"; public static final String FLOW_LOCK_ERROR_MESSAGE_PARAM = "flowLockErrorMessage"; + public static final String EXECUTION_SOURCE = "execution_source"; private final HashSet<String> proxyUsers = new HashSet<>(); private int executionId = -1; private int scheduleId = -1; private int projectId; + private String executionSource; private String projectName; private String lastModifiedUser; private int version;
1
/* * Copyright 2013 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import azkaban.flow.Flow; import azkaban.project.Project; import azkaban.sla.SlaOption; import azkaban.utils.Props; import azkaban.utils.TypedMapWrapper; import com.sun.istack.NotNull; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; public class ExecutableFlow extends ExecutableFlowBase { public static final String EXECUTIONID_PARAM = "executionId"; public static final String EXECUTIONPATH_PARAM = "executionPath"; public static final String EXECUTIONOPTIONS_PARAM = "executionOptions"; public static final String PROJECTID_PARAM = "projectId"; public static final String SCHEDULEID_PARAM = "scheduleId"; public static final String SUBMITUSER_PARAM = "submitUser"; public static final String SUBMITTIME_PARAM = "submitTime"; public static final String VERSION_PARAM = "version"; public static final String PROXYUSERS_PARAM = "proxyUsers"; public static final String PROJECTNAME_PARAM = "projectName"; public static final String LASTMODIFIEDTIME_PARAM = "lastModfiedTime"; public static final String LASTMODIFIEDUSER_PARAM = "lastModifiedUser"; public static final String SLAOPTIONS_PARAM = "slaOptions"; public static final String AZKABANFLOWVERSION_PARAM = "azkabanFlowVersion"; public static final String IS_LOCKED_PARAM = "isLocked"; public static final String FLOW_LOCK_ERROR_MESSAGE_PARAM = "flowLockErrorMessage"; private final HashSet<String> proxyUsers = new HashSet<>(); private int executionId = -1; private int scheduleId = -1; private int projectId; private String projectName; private String lastModifiedUser; private int version; private long submitTime = -1; private long lastModifiedTimestamp; private String submitUser; private String executionPath; private ExecutionOptions executionOptions; private double azkabanFlowVersion; private boolean isLocked; private ExecutableFlowRampMetadata executableFlowRampMetadata; private String flowLockErrorMessage; // For Flow_Status_Changed event private String failedJobId = "unknown"; private String modifiedBy = "unknown"; // For slaOption information private String slaOptionStr = "null"; public ExecutableFlow(final Project project, final Flow flow) { this.projectId = project.getId(); this.projectName = project.getName(); this.version = project.getVersion(); this.scheduleId = -1; this.lastModifiedTimestamp = project.getLastModifiedTimestamp(); this.lastModifiedUser = project.getLastModifiedUser(); setAzkabanFlowVersion(flow.getAzkabanFlowVersion()); setLocked(flow.isLocked()); setFlowLockErrorMessage(flow.getFlowLockErrorMessage()); this.setFlow(project, flow); } public ExecutableFlow() { } public static ExecutableFlow createExecutableFlow(final Object obj, final Status status) { final ExecutableFlow exFlow = new ExecutableFlow(); final HashMap<String, Object> flowObj = (HashMap<String, Object>) obj; exFlow.fillExecutableFromMapObject(flowObj); // overwrite status from the flow data blob as that one should NOT be used exFlow.setStatus(status); return exFlow; } @Override public String getId() { return getFlowId(); } @Override public ExecutableFlow getExecutableFlow() { return this; } public void addAllProxyUsers(final Collection<String> proxyUsers) { this.proxyUsers.addAll(proxyUsers); } public Set<String> getProxyUsers() { return new HashSet<>(this.proxyUsers); } public ExecutionOptions getExecutionOptions() { return this.executionOptions; } public void setExecutionOptions(final ExecutionOptions options) { this.executionOptions = options; } @Override protected void setFlow(final Project project, final Flow flow) { super.setFlow(project, flow); this.executionOptions = new ExecutionOptions(); this.executionOptions.setMailCreator(flow.getMailCreator()); if (flow.getSuccessEmails() != null) { this.executionOptions.setSuccessEmails(flow.getSuccessEmails()); } if (flow.getFailureEmails() != null) { this.executionOptions.setFailureEmails(flow.getFailureEmails()); } } @Override public int getExecutionId() { return this.executionId; } public void setExecutionId(final int executionId) { this.executionId = executionId; } @Override public long getLastModifiedTimestamp() { return this.lastModifiedTimestamp; } public void setLastModifiedTimestamp(final long lastModifiedTimestamp) { this.lastModifiedTimestamp = lastModifiedTimestamp; } @Override public String getLastModifiedByUser() { return this.lastModifiedUser; } public void setLastModifiedByUser(final String lastModifiedUser) { this.lastModifiedUser = lastModifiedUser; } @Override public int getProjectId() { return this.projectId; } public void setProjectId(final int projectId) { this.projectId = projectId; } @Override public String getProjectName() { return this.projectName; } public int getScheduleId() { return this.scheduleId; } public void setScheduleId(final int scheduleId) { this.scheduleId = scheduleId; } public String getExecutionPath() { return this.executionPath; } public void setExecutionPath(final String executionPath) { this.executionPath = executionPath; } public String getSubmitUser() { return this.submitUser; } public void setSubmitUser(final String submitUser) { this.submitUser = submitUser; } @Override public int getVersion() { return this.version; } public void setVersion(final int version) { this.version = version; } public long getSubmitTime() { return this.submitTime; } public void setSubmitTime(final long submitTime) { this.submitTime = submitTime; } public double getAzkabanFlowVersion() { return this.azkabanFlowVersion; } public void setAzkabanFlowVersion(final double azkabanFlowVersion) { this.azkabanFlowVersion = azkabanFlowVersion; } public boolean isLocked() { return this.isLocked; } public void setLocked(boolean locked) { this.isLocked = locked; } public String getFlowLockErrorMessage() { return this.flowLockErrorMessage; } public void setFlowLockErrorMessage(final String flowLockErrorMessage) { this.flowLockErrorMessage = flowLockErrorMessage; } public String getSlaOptionStr() { return slaOptionStr; } @Override public Map<String, Object> toObject() { final HashMap<String, Object> flowObj = new HashMap<>(); fillMapFromExecutable(flowObj); flowObj.put(EXECUTIONID_PARAM, this.executionId); flowObj.put(EXECUTIONPATH_PARAM, this.executionPath); flowObj.put(PROJECTID_PARAM, this.projectId); flowObj.put(PROJECTNAME_PARAM, this.projectName); if (this.scheduleId >= 0) { flowObj.put(SCHEDULEID_PARAM, this.scheduleId); } flowObj.put(SUBMITUSER_PARAM, this.submitUser); flowObj.put(VERSION_PARAM, this.version); flowObj.put(LASTMODIFIEDTIME_PARAM, this.lastModifiedTimestamp); flowObj.put(LASTMODIFIEDUSER_PARAM, this.lastModifiedUser); flowObj.put(AZKABANFLOWVERSION_PARAM, this.azkabanFlowVersion); flowObj.put(EXECUTIONOPTIONS_PARAM, this.executionOptions.toObject()); final ArrayList<String> proxyUserList = new ArrayList<>(this.proxyUsers); flowObj.put(PROXYUSERS_PARAM, proxyUserList); flowObj.put(SUBMITTIME_PARAM, this.submitTime); final List<Map<String, Object>> slaOptions = new ArrayList<>(); List<SlaOption> slaOptionList = this.executionOptions.getSlaOptions(); if (slaOptionList != null) { for (SlaOption slaOption : slaOptionList) { slaOptions.add(slaOption.toObject()); } } flowObj.put(SLAOPTIONS_PARAM, slaOptions); flowObj.put(IS_LOCKED_PARAM, this.isLocked); flowObj.put(FLOW_LOCK_ERROR_MESSAGE_PARAM, this.flowLockErrorMessage); return flowObj; } @Override public void fillExecutableFromMapObject( final TypedMapWrapper<String, Object> flowObj) { super.fillExecutableFromMapObject(flowObj); this.executionId = flowObj.getInt(EXECUTIONID_PARAM); this.executionPath = flowObj.getString(EXECUTIONPATH_PARAM); this.projectId = flowObj.getInt(PROJECTID_PARAM); this.projectName = flowObj.getString(PROJECTNAME_PARAM); this.scheduleId = flowObj.getInt(SCHEDULEID_PARAM); this.submitUser = flowObj.getString(SUBMITUSER_PARAM); this.version = flowObj.getInt(VERSION_PARAM); this.lastModifiedTimestamp = flowObj.getLong(LASTMODIFIEDTIME_PARAM); this.lastModifiedUser = flowObj.getString(LASTMODIFIEDUSER_PARAM); this.submitTime = flowObj.getLong(SUBMITTIME_PARAM); this.azkabanFlowVersion = flowObj.getDouble(AZKABANFLOWVERSION_PARAM); if (flowObj.containsKey(EXECUTIONOPTIONS_PARAM)) { this.executionOptions = ExecutionOptions.createFromObject(flowObj .getObject(EXECUTIONOPTIONS_PARAM)); } else { // for backwards compatibility should remove in a few versions. this.executionOptions = ExecutionOptions.createFromObject(flowObj); } if (flowObj.containsKey(PROXYUSERS_PARAM)) { final List<String> proxyUserList = flowObj.<String>getList(PROXYUSERS_PARAM); this.addAllProxyUsers(proxyUserList); } if (flowObj.containsKey(SLAOPTIONS_PARAM)) { final List<SlaOption> slaOptions = flowObj.getList(SLAOPTIONS_PARAM).stream().map(SlaOption::fromObject) .collect(Collectors.toList()); this.executionOptions.setSlaOptions(slaOptions); // Fill slaOptionStr a comma delimited String of slaOptions StringBuilder slaBuilder = new StringBuilder(); for (SlaOption slaOption: slaOptions){ slaBuilder.append(slaOption.toString()); slaBuilder.append(';'); } this.slaOptionStr = slaBuilder.toString(); } this.setLocked(flowObj.getBool(IS_LOCKED_PARAM, false)); this.setFlowLockErrorMessage(flowObj.getString(FLOW_LOCK_ERROR_MESSAGE_PARAM, null)); } @Override public Map<String, Object> toUpdateObject(final long lastUpdateTime) { final Map<String, Object> updateData = super.toUpdateObject(lastUpdateTime); updateData.put(EXECUTIONID_PARAM, this.executionId); return updateData; } @Override public void resetForRetry() { super.resetForRetry(); this.setStatus(Status.RUNNING); } public ExecutableFlowRampMetadata getExecutableFlowRampMetadata() { return executableFlowRampMetadata; } public void setExecutableFlowRampMetadata(ExecutableFlowRampMetadata executableFlowRampMetadata) { this.executableFlowRampMetadata = executableFlowRampMetadata; } /** * Get the Relative Flow Directory against project directory */ public String getDirectory() { return String.valueOf(getProjectId()) + "." + String.valueOf(getVersion()); } /** * Get Ramp Props For Job * @param jobId job Id * @param jobType jobType aka job plugin type * @return ramp Props */ synchronized public Props getRampPropsForJob(@NotNull final String jobId, @NotNull final String jobType) { return Optional.ofNullable(executableFlowRampMetadata) .map(metadata -> metadata.selectRampPropsForJob(jobId, jobType)) .orElse(null); } public void setFailedJobId(String id) { this.failedJobId = id; } public String getFailedJobId() { return failedJobId; } @Override public String getModifiedBy() { return modifiedBy; } @Override public void setModifiedBy(String id) { this.modifiedBy = id; } }
1
19,793
Could you please use `camelCase` in the property name to follow the naming convention used in the rest of the properties?
azkaban-azkaban
java
@@ -12,7 +12,7 @@ import ( ) const ( - fmtErrPublicSubnetsFromEnv = "get public subnet IDs from environment %s: %w" + fmtErrPublicSubnetsFromEnv = "get public subnet IDs from environment %s: %w " fmtErrSecurityGroupsFromEnv = "get security groups from environment %s: %w" envSecurityGroupCFNLogicalIDTagKey = "aws:cloudformation:logical-id"
1
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package task import ( "fmt" "github.com/aws/copilot-cli/internal/pkg/aws/ec2" "github.com/aws/copilot-cli/internal/pkg/aws/ecs" "github.com/aws/copilot-cli/internal/pkg/deploy" ) const ( fmtErrPublicSubnetsFromEnv = "get public subnet IDs from environment %s: %w" fmtErrSecurityGroupsFromEnv = "get security groups from environment %s: %w" envSecurityGroupCFNLogicalIDTagKey = "aws:cloudformation:logical-id" envSecurityGroupCFNLogicalIDTagValue = "EnvironmentSecurityGroup" ) // Names for tag filters var ( tagFilterNameForApp = fmt.Sprintf(ec2.TagFilterName, deploy.AppTagKey) tagFilterNameForEnv = fmt.Sprintf(ec2.TagFilterName, deploy.EnvTagKey) ) // EnvRunner can run an Amazon ECS task in the VPC and the cluster of an environment. type EnvRunner struct { // Count of the tasks to be launched. Count int // Group Name of the tasks that use the same task definition. GroupName string // App and Env in which the tasks will be launched. App string Env string // Interfaces to interact with dependencies. Must not be nil. VPCGetter VPCGetter ClusterGetter ClusterGetter Starter Runner } // Run runs tasks in the environment of the application, and returns the tasks. func (r *EnvRunner) Run() ([]*Task, error) { if err := r.validateDependencies(); err != nil { return nil, err } cluster, err := r.ClusterGetter.ClusterARN(r.App, r.Env) if err != nil { return nil, fmt.Errorf("get cluster for environment %s: %w", r.Env, err) } filters := r.filtersForVPCFromAppEnv() subnets, err := r.VPCGetter.PublicSubnetIDs(filters...) if err != nil { return nil, fmt.Errorf(fmtErrPublicSubnetsFromEnv, r.Env, err) } if len(subnets) == 0 { return nil, errNoSubnetFound } // Use only environment security group https://github.com/aws/copilot-cli/issues/1882. securityGroups, err := r.VPCGetter.SecurityGroups(append(filters, ec2.Filter{ Name: fmt.Sprintf(ec2.TagFilterName, envSecurityGroupCFNLogicalIDTagKey), Values: []string{envSecurityGroupCFNLogicalIDTagValue}, })...) if err != nil { return nil, fmt.Errorf(fmtErrSecurityGroupsFromEnv, r.Env, err) } ecsTasks, err := r.Starter.RunTask(ecs.RunTaskInput{ Cluster: cluster, Count: r.Count, Subnets: subnets, SecurityGroups: securityGroups, TaskFamilyName: taskFamilyName(r.GroupName), StartedBy: startedBy, }) if err != nil { return nil, &errRunTask{ groupName: r.GroupName, parentErr: err, } } return convertECSTasks(ecsTasks), nil } func (r *EnvRunner) filtersForVPCFromAppEnv() []ec2.Filter { return []ec2.Filter{ { Name: tagFilterNameForEnv, Values: []string{r.Env}, }, { Name: tagFilterNameForApp, Values: []string{r.App}, }, } } func (r *EnvRunner) validateDependencies() error { if r.VPCGetter == nil { return errVPCGetterNil } if r.ClusterGetter == nil { return errClusterGetterNil } if r.Starter == nil { return errStarterNil } return nil }
1
16,604
Can we revert this change? looks like an extra space was added at the end
aws-copilot-cli
go
@@ -222,6 +222,9 @@ public class RubyPackageMetadataTransformer implements ModelToViewTransformer { productConfig, ImportSectionView.newBuilder().build(), surfaceNamer)) .hasSmokeTests(hasSmokeTests) .versionPath(surfaceNamer.getVersionIndexFileImportName()) + .versionNamespace( + surfaceNamer.getNamespace( + new InterfaceView().getElementIterable(model).iterator().next())) .build(); }
1
/* Copyright 2016 Google Inc * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.api.codegen.transformer.ruby; import com.google.api.codegen.InterfaceView; import com.google.api.codegen.TargetLanguage; import com.google.api.codegen.config.FlatteningConfig; import com.google.api.codegen.config.GapicProductConfig; import com.google.api.codegen.config.PackageMetadataConfig; import com.google.api.codegen.transformer.DynamicLangApiMethodTransformer; import com.google.api.codegen.transformer.FileHeaderTransformer; import com.google.api.codegen.transformer.GapicInterfaceContext; import com.google.api.codegen.transformer.GapicMethodContext; import com.google.api.codegen.transformer.InitCodeTransformer; import com.google.api.codegen.transformer.ModelToViewTransformer; import com.google.api.codegen.transformer.ModelTypeTable; import com.google.api.codegen.transformer.PackageMetadataTransformer; import com.google.api.codegen.transformer.SurfaceNamer; import com.google.api.codegen.transformer.TestCaseTransformer; import com.google.api.codegen.util.ruby.RubyTypeTable; import com.google.api.codegen.util.testing.StandardValueProducer; import com.google.api.codegen.util.testing.ValueProducer; import com.google.api.codegen.viewmodel.ApiMethodView; import com.google.api.codegen.viewmodel.ImportSectionView; import com.google.api.codegen.viewmodel.InitCodeView; import com.google.api.codegen.viewmodel.OptionalArrayMethodView; import com.google.api.codegen.viewmodel.ViewModel; import com.google.api.tools.framework.model.Interface; import com.google.api.tools.framework.model.Method; import com.google.api.tools.framework.model.Model; import com.google.common.base.Strings; import com.google.common.collect.ImmutableList; import java.util.List; /** Responsible for producing package metadata related views for Ruby */ public class RubyPackageMetadataTransformer implements ModelToViewTransformer { private static final String GEMSPEC_FILE = "ruby/gemspec.snip"; private static final String README_FILE = "ruby/README.md.snip"; private static final String README_OUTPUT_FILE = "README.md"; private static final List<String> TOP_LEVEL_FILES = ImmutableList.of("ruby/Gemfile.snip", "ruby/Rakefile.snip", "LICENSE.snip"); private static final List<String> TOP_LEVEL_DOT_FILES = ImmutableList.of("ruby/gitignore.snip", "ruby/rubocop.yml.snip", "ruby/yardopts.snip"); private static final String GITHUB_DOC_HOST = "https://googlecloudplatform.github.io/google-cloud-ruby"; private static final String GITHUB_REPO_HOST = "https://github.com/GoogleCloudPlatform/google-cloud-ruby"; private static final String AUTH_DOC_PATH = "/#/docs/google-cloud/master/guides/authentication"; private static final String LIB_DOC_PATH = "/#/docs/%s/latest/%s"; private static final String MAIN_README_PATH = "/blob/master/README.md"; private static final String VERSIONING_DOC_PATH = "#versioning"; private final FileHeaderTransformer fileHeaderTransformer = new FileHeaderTransformer(new RubyImportSectionTransformer()); private final PackageMetadataConfig packageConfig; private final PackageMetadataTransformer metadataTransformer = new PackageMetadataTransformer(); private final ValueProducer valueProducer = new StandardValueProducer(); private final TestCaseTransformer testCaseTransformer = new TestCaseTransformer(valueProducer); private static final String RUBY_PREFIX = "ruby/"; public RubyPackageMetadataTransformer(PackageMetadataConfig packageConfig) { this.packageConfig = packageConfig; } @Override public List<String> getTemplateFileNames() { return ImmutableList.<String>builder() .add(GEMSPEC_FILE) .add(README_FILE) .addAll(TOP_LEVEL_FILES) .addAll(TOP_LEVEL_DOT_FILES) .build(); } @Override public List<ViewModel> transform(Model model, GapicProductConfig productConfig) { RubyPackageMetadataNamer namer = new RubyPackageMetadataNamer(productConfig.getPackageName()); return ImmutableList.<ViewModel>builder() .add(generateGemspecView(model, namer)) .add(generateReadmeView(model, productConfig, namer)) .addAll(generateMetadataViews(model, productConfig, namer, TOP_LEVEL_FILES)) .addAll(generateMetadataViews(model, productConfig, namer, TOP_LEVEL_DOT_FILES, ".")) .build(); } private ViewModel generateGemspecView(Model model, RubyPackageMetadataNamer namer) { return metadataTransformer .generateMetadataView( packageConfig, model, GEMSPEC_FILE, namer.getOutputFileName(), TargetLanguage.RUBY) .identifier(namer.getMetadataIdentifier()) .build(); } private ViewModel generateReadmeView( Model model, GapicProductConfig productConfig, RubyPackageMetadataNamer namer) { List<ApiMethodView> exampleMethods = generateExampleMethods(model, productConfig); return metadataTransformer .generateMetadataView( packageConfig, model, README_FILE, README_OUTPUT_FILE, TargetLanguage.RUBY) .identifier(namer.getMetadataIdentifier()) .fileHeader( fileHeaderTransformer.generateFileHeader( productConfig, ImportSectionView.newBuilder().build(), new RubySurfaceNamer(productConfig.getPackageName()))) .developmentStatusTitle( namer.getReleaseAnnotation(packageConfig.releaseLevel(TargetLanguage.RUBY))) .exampleMethods(exampleMethods) .targetLanguage("Ruby") .mainReadmeLink(GITHUB_REPO_HOST + MAIN_README_PATH) .libraryDocumentationLink( GITHUB_DOC_HOST + String.format( LIB_DOC_PATH, namer.getMetadataIdentifier(), packageConfig.protoPath())) .authDocumentationLink(GITHUB_DOC_HOST + AUTH_DOC_PATH) .versioningDocumentationLink(GITHUB_REPO_HOST + VERSIONING_DOC_PATH) .build(); } // Generates methods used as examples for the README.md file. // This currently generates a list of methods that have smoke test configuration. In the future, // the example methods may be configured separately. private List<ApiMethodView> generateExampleMethods( Model model, GapicProductConfig productConfig) { ImmutableList.Builder<ApiMethodView> exampleMethods = ImmutableList.builder(); for (Interface apiInterface : new InterfaceView().getElementIterable(model)) { GapicInterfaceContext context = createContext(apiInterface, productConfig); if (context.getInterfaceConfig().getSmokeTestConfig() != null) { Method method = context.getInterfaceConfig().getSmokeTestConfig().getMethod(); FlatteningConfig flatteningGroup = testCaseTransformer.getSmokeTestFlatteningGroup( context.getMethodConfig(method), context.getInterfaceConfig().getSmokeTestConfig()); GapicMethodContext flattenedMethodContext = context.asFlattenedMethodContext(method, flatteningGroup); exampleMethods.add(createExampleApiMethodView(flattenedMethodContext)); } } return exampleMethods.build(); } private OptionalArrayMethodView createExampleApiMethodView(GapicMethodContext context) { OptionalArrayMethodView initialApiMethodView = new DynamicLangApiMethodTransformer(new RubyApiMethodParamTransformer()) .generateMethod(context); OptionalArrayMethodView.Builder apiMethodView = initialApiMethodView.toBuilder(); InitCodeTransformer initCodeTransformer = new InitCodeTransformer(); InitCodeView initCodeView = initCodeTransformer.generateInitCode( context, testCaseTransformer.createSmokeTestInitContext(context)); apiMethodView.initCode(initCodeView); return apiMethodView.build(); } private List<ViewModel> generateMetadataViews( Model model, GapicProductConfig productConfig, RubyPackageMetadataNamer namer, List<String> snippets) { return generateMetadataViews(model, productConfig, namer, snippets, null); } private List<ViewModel> generateMetadataViews( Model model, GapicProductConfig productConfig, RubyPackageMetadataNamer namer, List<String> snippets, String filePrefix) { ImmutableList.Builder<ViewModel> views = ImmutableList.builder(); for (String template : snippets) { views.add(generateMetadataView(model, productConfig, template, namer, filePrefix)); } return views.build(); } private ViewModel generateMetadataView( Model model, GapicProductConfig productConfig, String template, RubyPackageMetadataNamer namer, String filePrefix) { String noLeadingRubyDir = template.startsWith(RUBY_PREFIX) ? template.substring(RUBY_PREFIX.length()) : template; if (!Strings.isNullOrEmpty(filePrefix)) { noLeadingRubyDir = filePrefix + noLeadingRubyDir; } int extensionIndex = noLeadingRubyDir.lastIndexOf("."); String outputPath = noLeadingRubyDir.substring(0, extensionIndex); boolean hasSmokeTests = false; for (Interface apiInterface : new InterfaceView().getElementIterable(model)) { GapicInterfaceContext context = createContext(apiInterface, productConfig); if (context.getInterfaceConfig().getSmokeTestConfig() != null) { hasSmokeTests = true; break; } } SurfaceNamer surfaceNamer = new RubySurfaceNamer(productConfig.getPackageName()); return metadataTransformer .generateMetadataView(packageConfig, model, template, outputPath, TargetLanguage.RUBY) .identifier(namer.getMetadataIdentifier()) .fileHeader( fileHeaderTransformer.generateFileHeader( productConfig, ImportSectionView.newBuilder().build(), surfaceNamer)) .hasSmokeTests(hasSmokeTests) .versionPath(surfaceNamer.getVersionIndexFileImportName()) .build(); } private GapicInterfaceContext createContext( Interface apiInterface, GapicProductConfig productConfig) { return GapicInterfaceContext.create( apiInterface, productConfig, new ModelTypeTable( new RubyTypeTable(productConfig.getPackageName()), new RubyModelTypeNameConverter(productConfig.getPackageName())), new RubySurfaceNamer(productConfig.getPackageName()), new RubyFeatureConfig()); } }
1
22,340
This just grabs any one of the proto services defined in the API, right? What happens when there are multiple (like with PubSub)?
googleapis-gapic-generator
java
@@ -40,7 +40,9 @@ def define_violation(model_name, dbengine): """ base = declarative_base() - violations_tablename = '{}_violations'.format(model_name) + # pylint: disable=too-many-format-args + violations_tablename = 'violations'.format(model_name) + # pylint: enable=too-many-format-args class Violation(base): """Row entry for a violation."""
1
# Copyright 2017 The Forseti Security Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Database access objects for Forseti Scanner. """ import json from sqlalchemy import Column from sqlalchemy import String, Integer, Text from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import sessionmaker from google.cloud.forseti.services import db # pylint: disable=no-member def define_violation(model_name, dbengine): """Defines table class for violations. A violation table will be created on a per-model basis. Args: model_name (str): name of the current model dbengine (engine): sqlalchemy database engine Returns: ViolationAcccess: facade for accessing violations. """ base = declarative_base() violations_tablename = '{}_violations'.format(model_name) class Violation(base): """Row entry for a violation.""" __tablename__ = violations_tablename id = Column(Integer, primary_key=True) resource_type = Column(String(256), nullable=False) rule_name = Column(String(256)) rule_index = Column(Integer, default=0) violation_type = Column(String(256), nullable=False) data = Column(Text) def __repr__(self): """String representation. Returns: str: string representation of the Violation row entry. """ string = ("<Violation(violation_type='{}', resource_type='{}' " "rule_name='{}')>") return string.format( self.violation_type, self.resource_type, self.rule_name) class ViolationAccess(object): """Facade for violations, implement APIs against violations table.""" TBL_VIOLATIONS = Violation def __init__(self, dbengine): """Constructor for the Violation Access. Args: dbengine (engine): sqlalchemy database engine """ self.engine = dbengine self.violationmaker = self._create_violation_session() def _create_violation_session(self): """Create a session to read from the models table. Returns: ScopedSessionmaker: A scoped session maker that will create a session that is automatically released. """ return db.ScopedSessionMaker( sessionmaker( bind=self.engine, expire_on_commit=False), auto_commit=True) def create(self, violations): """Save violations to the db table. Args: violations (list): A list of violations. """ with self.violationmaker() as session: for violation in violations: violation = self.TBL_VIOLATIONS( resource_type=violation.get('resource_type'), rule_name=violation.get('rule_name'), rule_index=violation.get('rule_index'), violation_type=violation.get('violation_type'), data=json.dumps(violation.get('violation_data')) ) session.add(violation) def list(self): """List all violations from the db table. Returns: list: List of Violation row entry objects. """ with self.violationmaker() as session: return session.query(self.TBL_VIOLATIONS).all() base.metadata.create_all(dbengine) return ViolationAccess
1
28,388
Remove the pylint disable and remove the .format() from the string. The tablename is just 'violations'
forseti-security-forseti-security
py
@@ -8,6 +8,16 @@ class Trail < ActiveRecord::Base friendly_id :name, use: [:slugged, :finders] + # Override setters so it preserves the ordering + def exercise_ids=(new_exercise_ids) + super + new_exercise_ids = new_exercise_ids.reject(&:blank?).map(&:to_i) + + new_exercise_ids.each_with_index do |exercise_id, index| + steps.where(exercise_id: exercise_id).update_all(position: index + 1) + end + end + def steps_remaining_for(user) ExerciseWithProgressQuery. new(user: user, exercises: exercises).
1
class Trail < ActiveRecord::Base extend FriendlyId validates :name, :description, presence: true has_many :steps, -> { order "position ASC" }, dependent: :destroy has_many :exercises, through: :steps friendly_id :name, use: [:slugged, :finders] def steps_remaining_for(user) ExerciseWithProgressQuery. new(user: user, exercises: exercises). count { |exercise| exercise.state != Status::REVIEWED } end def self.most_recent_published order(created_at: :desc).where(published: true) end end
1
12,384
Will this implementation allow you to add new exercises to the trail?
thoughtbot-upcase
rb
@@ -76,9 +76,17 @@ func trapSignalsPosix() { caddyfileToUse = newCaddyfile } + // Backup old event hooks + oldEventHooks := cloneEventHooks() + + // Purge the old event hooks + purgeEventHooks() + // Kick off the restart; our work is done _, err = inst.Restart(caddyfileToUse) if err != nil { + eventHooks = oldEventHooks + log.Printf("[ERROR] SIGUSR1: %v", err) }
1
// Copyright 2015 Light Code Labs, LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // +build !windows,!plan9,!nacl package caddy import ( "log" "os" "os/signal" "syscall" ) // trapSignalsPosix captures POSIX-only signals. func trapSignalsPosix() { go func() { sigchan := make(chan os.Signal, 1) signal.Notify(sigchan, syscall.SIGTERM, syscall.SIGHUP, syscall.SIGQUIT, syscall.SIGUSR1, syscall.SIGUSR2) for sig := range sigchan { switch sig { case syscall.SIGQUIT: log.Println("[INFO] SIGQUIT: Quitting process immediately") for _, f := range OnProcessExit { f() // only perform important cleanup actions } os.Exit(0) case syscall.SIGTERM: log.Println("[INFO] SIGTERM: Shutting down servers then terminating") exitCode := executeShutdownCallbacks("SIGTERM") for _, f := range OnProcessExit { f() // only perform important cleanup actions } err := Stop() if err != nil { log.Printf("[ERROR] SIGTERM stop: %v", err) exitCode = 3 } os.Exit(exitCode) case syscall.SIGUSR1: log.Println("[INFO] SIGUSR1: Reloading") // Start with the existing Caddyfile caddyfileToUse, inst, err := getCurrentCaddyfile() if err != nil { log.Printf("[ERROR] SIGUSR1: %v", err) continue } if loaderUsed.loader == nil { // This also should never happen log.Println("[ERROR] SIGUSR1: no Caddyfile loader with which to reload Caddyfile") continue } // Load the updated Caddyfile newCaddyfile, err := loaderUsed.loader.Load(inst.serverType) if err != nil { log.Printf("[ERROR] SIGUSR1: loading updated Caddyfile: %v", err) continue } if newCaddyfile != nil { caddyfileToUse = newCaddyfile } // Kick off the restart; our work is done _, err = inst.Restart(caddyfileToUse) if err != nil { log.Printf("[ERROR] SIGUSR1: %v", err) } case syscall.SIGUSR2: log.Println("[INFO] SIGUSR2: Upgrading") if err := Upgrade(); err != nil { log.Printf("[ERROR] SIGUSR2: upgrading: %v", err) } case syscall.SIGHUP: // ignore; this signal is sometimes sent outside of the user's control } } }() }
1
11,924
Actually there is race condition here, because pointer assignment isn't atomic on some platform. But this is a rare case we should hit, we can just let it go for now. Maybe this is one of the TODO list.
caddyserver-caddy
go
@@ -181,6 +181,10 @@ void lbann::distributed_io_buffer::calculate_num_iterations_per_epoch(int num_mo max_mini_batch_size = data_reader->get_num_data(); } + bool partitioned = data_reader->is_partitioned(); + //@todo "if (partitioned)" conditionals below assume one processor per model; + // this needs to be revisited for cases with multiple cpus per model + /// Check to make sure that there is enough data for all of the parallel readers int num_parallel_readers_per_model = compute_max_num_parallel_readers(data_reader->get_num_data(), max_mini_batch_size, m_requested_max_num_parallel_readers); data_reader->set_num_parallel_readers(num_parallel_readers_per_model);
1
//////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC. // Produced at the Lawrence Livermore National Laboratory. // Written by the LBANN Research Team (B. Van Essen, et al.) listed in // the CONTRIBUTORS file. <[email protected]> // // LLNL-CODE-697807. // All rights reserved. // // This file is part of LBANN: Livermore Big Artificial Neural Network // Toolkit. For details, see http://software.llnl.gov/LBANN or // https://github.com/LLNL/LBANN. // // Licensed under the Apache License, Version 2.0 (the "Licensee"); you // may not use this file except in compliance with the License. You may // obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the license. //////////////////////////////////////////////////////////////////////////////// #include "lbann/io/data_buffers/distributed_io_buffer.hpp" #include "lbann/utils/exception.hpp" lbann::distributed_io_buffer::distributed_io_buffer(lbann_comm *comm, int num_parallel_readers, std::map<execution_mode, generic_data_reader *> data_readers, int num_child_layers) : generic_io_buffer(comm, num_parallel_readers, data_readers), m_requested_max_num_parallel_readers(num_parallel_readers), m_num_child_layers(num_child_layers) { m_data_buffers[execution_mode::training] = new data_buffer(comm, num_child_layers); m_data_buffers[execution_mode::validation] = new data_buffer(comm, num_child_layers); m_data_buffers[execution_mode::testing] = new data_buffer(comm, num_child_layers); } int lbann::distributed_io_buffer::fetch_to_local_matrix(generic_data_reader *data_reader, execution_mode mode) { int num_parallel_readers = data_reader->get_num_parallel_readers(); /// Check to see if this rank has valid data -- if not read in the next batch /// Coordinate all available readers so that the perform I/O in the same step data_buffer *buf = get_data_buffer(mode); if (buf->m_root == 0) { if (m_comm->get_rank_in_model() < num_parallel_readers && !buf->m_local_reader_done) { for(auto& m : buf->M_local) { Zero(*m); } /// Each data reader needs to either have independent / split /// data, or take an offset / stride if(buf->M_local.size() == 2) { buf->m_num_samples_in_batch = (*fetch_data_fn)(*buf->M_local[0], *buf->M_local[1], data_reader); }else { buf->m_num_samples_in_batch = (*fetch_data_fn)(*buf->M_local[0], data_reader); } bool data_valid = (buf->m_num_samples_in_batch > 0); if(data_valid) { buf->m_num_data_per_epoch+=buf->m_num_samples_in_batch; } buf->m_local_data_valid = data_valid; } } return buf->m_num_samples_in_batch; } void lbann::distributed_io_buffer::distribute_from_local_matrix(generic_data_reader *data_reader, execution_mode mode, AbsDistMat& sample, AbsDistMat& response) { int num_parallel_readers = data_reader->get_num_parallel_readers(); data_buffer *buf = get_data_buffer(mode); buf->Ms[0]->SetRoot(buf->m_root); buf->Ms[1]->SetRoot(buf->m_root); m_comm->model_barrier(); if (m_comm->get_rank_in_model() == buf->m_root) { if(!buf->m_local_data_valid) { std::stringstream err; err << __FILE__ << " " << __LINE__ << " :: lbann_distributed_io_buffer: No valid data for this step -- local data was invalid"; lbann_exception(err.str()); } for (int i = 0; i < 2; i++) { El::Int width = sample.Width(); if(i == 1) { width = response.Width(); } CopyFromRoot((*buf->M_local[i])(El::ALL, El::IR(0, width)), *buf->Ms[i]); } buf->m_local_data_valid = false; buf->m_num_samples_in_batch = 0; } else { for (int i = 0; i < 2; i++) { CopyFromNonRoot(*buf->Ms[i]); } } m_comm->model_barrier(); buf->m_root = (buf->m_root + 1) % num_parallel_readers; Copy(*buf->Ms[0], sample); Copy(*buf->Ms[1], response); return; } bool lbann::distributed_io_buffer::is_data_set_processed(generic_data_reader *data_reader, execution_mode mode) { // not just the ones in the last round. This will ensure that all readers, that had data // will have distributed it. int num_parallel_readers = data_reader->get_num_parallel_readers(); int num_iterations_per_epoch = data_reader->get_num_iterations_per_epoch(); int current_step_in_epoch = data_reader->get_current_step_in_epoch(); // Get the current step before the update function increments it data_buffer *buf = get_data_buffer(mode); bool is_active_reader = (m_comm->get_rank_in_model() < num_parallel_readers) && ((m_comm->get_rank_in_model()+1)%num_parallel_readers == buf->m_root); if(is_active_reader) { if(buf->m_local_data_valid) { /// Make sure that all local data has been processed std::stringstream err; err << __FILE__ << " "<< __LINE__ << " :: lbann_input_layer_distributed_io_buffer: all valid data was not processed."; throw lbann_exception(err.str()); } } buf->m_local_reader_done = !(*update_data_reader_fn)(is_active_reader, data_reader); /// Once all of the readers have finished their part of the mini-batch indicate that the epoch is finished if(current_step_in_epoch == (num_iterations_per_epoch - 1)) { buf->m_local_reader_done = false; buf->m_root = 0; /// When the epoch is finished, make sure that the root node for distributing data is reset because /// if the number of parallel readers does not evenly divide the data set size, the epoch will finish /// without all of the parallel readers participating in the last round. buf->m_num_data_per_epoch = 0; return true; } else { return false; } } /** Make sure that there are enough ranks and data for all of the * parallel readers requested. */ int lbann::distributed_io_buffer::compute_max_num_parallel_readers(long data_set_size, int mini_batch_size, int requested_num_parallel_readers) const { int num_parallel_readers = requested_num_parallel_readers; /// Are there enough ranks in the model to support the requested /// number of parallel readers if(m_comm->get_model_grid().Size() < num_parallel_readers) { if(m_comm->am_model_master()) { std::cout << "Warning the grid size " << m_comm->get_model_grid().Size() << "is smaller than the number of requested parallel readers " << num_parallel_readers << "." << std::endl; } num_parallel_readers = m_comm->get_model_grid().Size(); } /// Check to make sure that there is enough data for all of the parallel readers if(data_set_size != 0) { int max_num_parallel_readers = num_parallel_readers; while(ceil((float)data_set_size / (float)(mini_batch_size * m_comm->get_num_models())) < max_num_parallel_readers) { max_num_parallel_readers--; } if(m_comm->am_world_master() && max_num_parallel_readers != num_parallel_readers) { std::cout << "Warning the training data set size " << data_set_size << " is too small for the number of requested parallel readers " << num_parallel_readers << ", using " << max_num_parallel_readers << "." << std::endl; } return max_num_parallel_readers; } else { return 0; } } void lbann::distributed_io_buffer::calculate_num_iterations_per_epoch(int num_models, int model_rank, int max_mini_batch_size, generic_data_reader *data_reader) { if(data_reader == nullptr) { return; } // If the data reader does not have any data bail out (e.g. unused validation reader) if(data_reader->get_num_data() == 0) { return; } if(max_mini_batch_size > data_reader->get_num_data()) { max_mini_batch_size = data_reader->get_num_data(); } /// Check to make sure that there is enough data for all of the parallel readers int num_parallel_readers_per_model = compute_max_num_parallel_readers(data_reader->get_num_data(), max_mini_batch_size, m_requested_max_num_parallel_readers); data_reader->set_num_parallel_readers(num_parallel_readers_per_model); if(num_parallel_readers_per_model == 0) { throw lbann_exception( std::string{} + __FILE__ + " " + std::to_string(__LINE__) + " :: distributed_io_buffer: number of parallel readers is zero"); } /// Set the basic parameters for stride and offset of the data reader int batch_stride = num_models * num_parallel_readers_per_model * max_mini_batch_size; int base_offset = m_comm->get_rank_in_model() * num_models * max_mini_batch_size; int model_offset = model_rank * max_mini_batch_size; /// Set mini-batch size and stride data_reader->set_mini_batch_size(max_mini_batch_size); data_reader->set_stride_to_next_mini_batch(batch_stride); data_reader->set_sample_stride(1); data_reader->set_iteration_stride(num_parallel_readers_per_model); data_reader->set_reset_mini_batch_index(m_comm->get_rank_in_model()); /// Set data reader base offset and model offset data_reader->set_base_offset(base_offset); data_reader->set_model_offset(model_offset); data_reader->set_initial_position(); int min_stride_across_models = max_mini_batch_size * num_models; /// Given that each model has to have at least one reader, what is the minimum stride data_reader->set_global_mini_batch_size(min_stride_across_models); /// The global mini-batch is a full mini-batch per model data_reader->set_last_mini_batch_size(max_mini_batch_size); /// By default the last mini-batch is a full one data_reader->set_global_last_mini_batch_size(min_stride_across_models); /// By default the last mini-batch is a full one per model int num_whole_mini_batches_per_model = floor(data_reader->get_num_data() / min_stride_across_models); int num_whole_mini_batches_per_reader = floor(num_whole_mini_batches_per_model / num_parallel_readers_per_model); int parallel_readers_with_extra_mini_batch = num_whole_mini_batches_per_model % num_parallel_readers_per_model; int global_partial_mini_batch_size = data_reader->get_num_data() - (num_whole_mini_batches_per_model * min_stride_across_models); int per_model_partial_mini_batch_size = global_partial_mini_batch_size / num_models; int world_master_remainder_data = 0; // Compute how many full "parallel" mini-batches are available //int last_mini_batch_threshold = num_whole_mini_batches_per_model * min_stride_across_models; // BVE FIXME revisit this piece of code if(m_comm->get_rank_in_model() < parallel_readers_with_extra_mini_batch) { num_whole_mini_batches_per_reader += 1; } int world_master_remainder_adjustment = data_reader->get_num_data() - (num_whole_mini_batches_per_model * min_stride_across_models) - (per_model_partial_mini_batch_size * num_models); if(model_rank == 0 && m_comm->get_rank_in_model() == parallel_readers_with_extra_mini_batch) { world_master_remainder_data = world_master_remainder_adjustment; world_master_remainder_adjustment = 0; } per_model_partial_mini_batch_size += world_master_remainder_data; if(world_master_remainder_adjustment != 0) { data_reader->set_world_master_mini_batch_adjustment(world_master_remainder_adjustment); } /// If there is a partial mini-batch all readers need to know about it if(per_model_partial_mini_batch_size > 0) { data_reader->set_last_mini_batch_size(per_model_partial_mini_batch_size); data_reader->set_global_last_mini_batch_size(global_partial_mini_batch_size); } // BVE FIXME this is wonky if(global_partial_mini_batch_size != 0) { data_reader->set_num_iterations_per_epoch(num_whole_mini_batches_per_model+1); }else { data_reader->set_num_iterations_per_epoch(num_whole_mini_batches_per_model); } if(data_reader->get_last_mini_batch_size() > max_mini_batch_size) { throw new lbann_exception("Error in calculating the partial mini-batch size, exceeds the max mini-batch size"); } /// Note that model_rank + m_comm->get_rank_in_model() is not equivalent to m_comm->get_world_rank() from a parallel I/O perspective /// Given the data readers model rank, how many models have a higher rank int last_mini_batch_offset = std::max(0, /// Number of complete multi-model mini-batches that will be fetched /// Ranks after current reader ((num_parallel_readers_per_model - m_comm->get_rank_in_model() - 1) /// Ranks on the next round + parallel_readers_with_extra_mini_batch) * min_stride_across_models /// Ranks remaining within the current mini-batch + (num_models - model_rank) * max_mini_batch_size); /// The last mini-batch may be partial and thus a reader may have a smaller stride to get there if(m_comm->get_rank_in_model() == parallel_readers_with_extra_mini_batch && per_model_partial_mini_batch_size > 0) { /// Note that if the parallel reader only has the last mini-batch, its base offset will equal the last mini-batch threshold /// However, it shouldn't need to use the last mini-batch threshold data_reader->set_stride_to_last_mini_batch(last_mini_batch_offset + model_rank * per_model_partial_mini_batch_size + world_master_remainder_adjustment); /// BVE 2/4/18 /// Consider the corner case where there is a very small number of mini-batches /// compared to the number of parallel readers. In this case, the base offset /// may be incorrectly computed if(m_comm->get_rank_in_model() == num_whole_mini_batches_per_model) { model_offset = model_rank * per_model_partial_mini_batch_size + world_master_remainder_adjustment; data_reader->set_model_offset(model_offset); data_reader->set_initial_position(); } }else { /// By default last mini-batch the last stride of each reader is part of a regular (full) round data_reader->set_stride_to_last_mini_batch(data_reader->get_stride_to_next_mini_batch()); } // if(m_comm->get_rank_in_model() <= num_parallel_readers_per_model) { // std::cout << "[" << m_comm->get_rank_in_world() << "] " << model_rank << " model rank, "<< m_comm->get_rank_in_model() << " rank in model, num_whole_mini_batches_per_model " << num_whole_mini_batches_per_model << " num_whole_mini_batches_per_reader " << num_whole_mini_batches_per_reader << " parallel_readers_with_extra_mini_batch " << parallel_readers_with_extra_mini_batch << " partial_mini_batch_size=" << per_model_partial_mini_batch_size << " last mini batch size=" << data_reader->get_last_mini_batch_size() << " world_master_remainder_data=" << world_master_remainder_data << " last mini-batch threshold " << last_mini_batch_threshold << " with a last stride of " << data_reader->get_stride_to_last_mini_batch() << " and stride of " << data_reader->get_stride_to_next_mini_batch() << " and there are " << num_parallel_readers_per_model << " parallel readers per model" << " last mini batch offset = " << last_mini_batch_offset << " parallel reader with extra minibatch = " << parallel_readers_with_extra_mini_batch << " model bracket = " << (parallel_readers_with_extra_mini_batch * max_mini_batch_size + per_model_partial_mini_batch_size + world_master_remainder_data) <<" base ofset "<< data_reader->get_base_offset() << " model offset " << data_reader->get_model_offset() << " world master remainder adjustment " << world_master_remainder_adjustment <<std::endl; // } return; } void lbann::distributed_io_buffer::calculate_num_iterations_per_epoch_spanning_models(int max_mini_batch_size, generic_data_reader *data_reader) { calculate_num_iterations_per_epoch(m_comm->get_num_models(), m_comm->get_model_rank(), max_mini_batch_size, data_reader); } void lbann::distributed_io_buffer::calculate_num_iterations_per_epoch_single_model(int max_mini_batch_size, generic_data_reader *data_reader) { calculate_num_iterations_per_epoch(1, 0, max_mini_batch_size, data_reader); }
1
13,029
We should check somewhere here and in the partitioned_io_buffer that the max_mini_batch_size is properly computed for each model and that it is not aggregate (sum) for all models. The global/max_mini_batch_size should be as specified by the user in the prototext/cmd line.
LLNL-lbann
cpp
@@ -132,7 +132,7 @@ const Capability = { * Defines how the driver should handle unexpected alerts. The value should * be one of "accept", "dismiss", or "ignore". */ - UNEXPECTED_ALERT_BEHAVIOR: 'unexpectedAlertBehavior', + UNEXPECTED_ALERT_BEHAVIOUR: 'unexpectedAlertBehaviour', /** Defines the browser version. */ VERSION: 'version'
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. 'use strict'; /** * @fileoverview Defines types related to describing the capabilities of a * WebDriver session. */ const Symbols = require('./symbols'); /** * Recognized browser names. * @enum {string} */ const Browser = { ANDROID: 'android', CHROME: 'chrome', EDGE: 'MicrosoftEdge', FIREFOX: 'firefox', IE: 'internet explorer', INTERNET_EXPLORER: 'internet explorer', IPAD: 'iPad', IPHONE: 'iPhone', OPERA: 'opera', PHANTOM_JS: 'phantomjs', SAFARI: 'safari', HTMLUNIT: 'htmlunit' }; /** * Common Capability keys. * @enum {string} */ const Capability = { /** * Indicates whether a driver should accept all SSL certs by default. This * capability only applies when requesting a new session. To query whether * a driver can handle insecure SSL certs, see {@link #SECURE_SSL}. */ ACCEPT_SSL_CERTS: 'acceptSslCerts', /** * The browser name. Common browser names are defined in the {@link Browser} * enum. */ BROWSER_NAME: 'browserName', /** * Defines how elements should be scrolled into the viewport for interaction. * This capability will be set to zero (0) if elements are aligned with the * top of the viewport, or one (1) if aligned with the bottom. The default * behavior is to align with the top of the viewport. */ ELEMENT_SCROLL_BEHAVIOR: 'elementScrollBehavior', /** * Whether the driver is capable of handling modal alerts (e.g. alert, * confirm, prompt). To define how a driver <i>should</i> handle alerts, * use {@link #UNEXPECTED_ALERT_BEHAVIOR}. */ HANDLES_ALERTS: 'handlesAlerts', /** * Key for the logging driver logging preferences. */ LOGGING_PREFS: 'loggingPrefs', /** * Whether this session generates native events when simulating user input. */ NATIVE_EVENTS: 'nativeEvents', /** * Describes the platform the browser is running on. Will be one of * ANDROID, IOS, LINUX, MAC, UNIX, or WINDOWS. When <i>requesting</i> a * session, ANY may be used to indicate no platform preference (this is * semantically equivalent to omitting the platform capability). */ PLATFORM: 'platform', /** * Describes the proxy configuration to use for a new WebDriver session. */ PROXY: 'proxy', /** Whether the driver supports changing the browser's orientation. */ ROTATABLE: 'rotatable', /** * Whether a driver is only capable of handling secure SSL certs. To request * that a driver accept insecure SSL certs by default, use * {@link #ACCEPT_SSL_CERTS}. */ SECURE_SSL: 'secureSsl', /** Whether the driver supports manipulating the app cache. */ SUPPORTS_APPLICATION_CACHE: 'applicationCacheEnabled', /** Whether the driver supports locating elements with CSS selectors. */ SUPPORTS_CSS_SELECTORS: 'cssSelectorsEnabled', /** Whether the browser supports JavaScript. */ SUPPORTS_JAVASCRIPT: 'javascriptEnabled', /** Whether the driver supports controlling the browser's location info. */ SUPPORTS_LOCATION_CONTEXT: 'locationContextEnabled', /** Whether the driver supports taking screenshots. */ TAKES_SCREENSHOT: 'takesScreenshot', /** * Defines how the driver should handle unexpected alerts. The value should * be one of "accept", "dismiss", or "ignore". */ UNEXPECTED_ALERT_BEHAVIOR: 'unexpectedAlertBehavior', /** Defines the browser version. */ VERSION: 'version' }; /** * Describes how a proxy should be configured for a WebDriver session. * @record */ function ProxyConfig() {} /** * The proxy type. Must be one of {"manual", "pac", "system"}. * @type {string} */ ProxyConfig.prototype.proxyType; /** * URL for the PAC file to use. Only used if {@link #proxyType} is "pac". * @type {(string|undefined)} */ ProxyConfig.prototype.proxyAutoconfigUrl; /** * The proxy host for FTP requests. Only used if {@link #proxyType} is "manual". * @type {(string|undefined)} */ ProxyConfig.prototype.ftpProxy; /** * The proxy host for HTTP requests. Only used if {@link #proxyType} is * "manual". * @type {(string|undefined)} */ ProxyConfig.prototype.httpProxy; /** * The proxy host for HTTPS requests. Only used if {@link #proxyType} is * "manual". * @type {(string|undefined)} */ ProxyConfig.prototype.sslProxy; /** * A comma delimited list of hosts which should bypass all proxies. Only used if * {@link #proxyType} is "manual". * @type {(string|undefined)} */ ProxyConfig.prototype.noProxy; /** * Converts a generic hash object to a map. * @param {!Object<string, ?>} hash The hash object. * @return {!Map<string, ?>} The converted map. */ function toMap(hash) { let m = new Map; for (let key in hash) { if (hash.hasOwnProperty(key)) { m.set(key, hash[key]); } } return m; } /** * Describes a set of capabilities for a WebDriver session. */ class Capabilities extends Map { /** * @param {(Capabilities|Map<string, ?>|Object)=} opt_other Another set of * capabilities to initialize this instance from. */ constructor(opt_other) { if (opt_other && !(opt_other instanceof Map)) { opt_other = toMap(opt_other); } super(opt_other); } /** * @return {!Capabilities} A basic set of capabilities for Android. */ static android() { return new Capabilities() .set(Capability.BROWSER_NAME, Browser.ANDROID) .set(Capability.PLATFORM, 'ANDROID'); } /** * @return {!Capabilities} A basic set of capabilities for Chrome. */ static chrome() { return new Capabilities().set(Capability.BROWSER_NAME, Browser.CHROME); } /** * @return {!Capabilities} A basic set of capabilities for Microsoft Edge. */ static edge() { return new Capabilities() .set(Capability.BROWSER_NAME, Browser.EDGE) .set(Capability.PLATFORM, 'WINDOWS'); } /** * @return {!Capabilities} A basic set of capabilities for Firefox. */ static firefox() { return new Capabilities().set(Capability.BROWSER_NAME, Browser.FIREFOX); } /** * @return {!Capabilities} A basic set of capabilities for Internet Explorer. */ static ie() { return new Capabilities(). set(Capability.BROWSER_NAME, Browser.INTERNET_EXPLORER). set(Capability.PLATFORM, 'WINDOWS'); } /** * @return {!Capabilities} A basic set of capabilities for iPad. */ static ipad() { return new Capabilities(). set(Capability.BROWSER_NAME, Browser.IPAD). set(Capability.PLATFORM, 'MAC'); } /** * @return {!Capabilities} A basic set of capabilities for iPhone. */ static iphone() { return new Capabilities(). set(Capability.BROWSER_NAME, Browser.IPHONE). set(Capability.PLATFORM, 'MAC'); } /** * @return {!Capabilities} A basic set of capabilities for Opera. */ static opera() { return new Capabilities(). set(Capability.BROWSER_NAME, Browser.OPERA); } /** * @return {!Capabilities} A basic set of capabilities for PhantomJS. */ static phantomjs() { return new Capabilities(). set(Capability.BROWSER_NAME, Browser.PHANTOM_JS); } /** * @return {!Capabilities} A basic set of capabilities for Safari. */ static safari() { return new Capabilities(). set(Capability.BROWSER_NAME, Browser.SAFARI). set(Capability.PLATFORM, 'MAC'); } /** * @return {!Capabilities} A basic set of capabilities for HTMLUnit. */ static htmlunit() { return new Capabilities(). set(Capability.BROWSER_NAME, Browser.HTMLUNIT); } /** * @return {!Capabilities} A basic set of capabilities for HTMLUnit * with enabled Javascript. */ static htmlunitwithjs() { return new Capabilities(). set(Capability.BROWSER_NAME, Browser.HTMLUNIT). set(Capability.SUPPORTS_JAVASCRIPT, true); } /** * @return {!Object<string, ?>} The JSON representation of this instance. * Note, the returned object may contain nested promised values. * @suppress {checkTypes} Suppress [] access on a struct (state inherited from * Map). */ [Symbols.serialize]() { return serialize(this); } /** * Merges another set of capabilities into this instance. * @param {!(Capabilities|Map<String, ?>|Object<string, ?>)} other The other * set of capabilities to merge. * @return {!Capabilities} A self reference. */ merge(other) { if (!other) { throw new TypeError('no capabilities provided for merge'); } if (!(other instanceof Map)) { other = toMap(other); } for (let key of other.keys()) { this.set(key, other.get(key)); } return this; } /** * @param {string} key The capability key. * @param {*} value The capability value. * @return {!Capabilities} A self reference. * @throws {TypeError} If the `key` is not a string. * @override */ set(key, value) { if (typeof key !== 'string') { throw new TypeError('Capability keys must be strings: ' + typeof key); } super.set(key, value); return this; } /** * Sets the logging preferences. Preferences may be specified as a * {@link ./logging.Preferences} instance, or as a map of log-type to * log-level. * @param {!(./logging.Preferences|Object<string>)} prefs The logging * preferences. * @return {!Capabilities} A self reference. */ setLoggingPrefs(prefs) { return this.set(Capability.LOGGING_PREFS, prefs); } /** * Sets the proxy configuration for this instance. * @param {ProxyConfig} proxy The desired proxy configuration. * @return {!Capabilities} A self reference. */ setProxy(proxy) { return this.set(Capability.PROXY, proxy); } /** * Sets whether native events should be used. * @param {boolean} enabled Whether to enable native events. * @return {!Capabilities} A self reference. */ setEnableNativeEvents(enabled) { return this.set(Capability.NATIVE_EVENTS, enabled); } /** * Sets how elements should be scrolled into view for interaction. * @param {number} behavior The desired scroll behavior: either 0 to align * with the top of the viewport or 1 to align with the bottom. * @return {!Capabilities} A self reference. */ setScrollBehavior(behavior) { return this.set(Capability.ELEMENT_SCROLL_BEHAVIOR, behavior); } /** * Sets the default action to take with an unexpected alert before returning * an error. * @param {string} behavior The desired behavior should be "accept", * "dismiss", or "ignore". Defaults to "dismiss". * @return {!Capabilities} A self reference. */ setAlertBehavior(behavior) { return this.set(Capability.UNEXPECTED_ALERT_BEHAVIOR, behavior); } } /** * Serializes a capabilities object. This is defined as a standalone function * so it may be type checked (where Capabilities[Symbols.serialize] has type * checking disabled since it is defined with [] access on a struct). * * @param {!Capabilities} caps The capabilities to serialize. * @return {!Object<string, ?>} The JSON representation of this instance. * Note, the returned object may contain nested promised values. */ function serialize(caps) { let ret = {}; for (let key of caps.keys()) { let cap = caps.get(key); if (cap !== undefined && cap !== null) { ret[key] = cap; } } return ret; } // PUBLIC API module.exports = { Browser: Browser, Capabilities: Capabilities, Capability: Capability, ProxyConfig: ProxyConfig };
1
14,472
you can change the string value, but not the code constant
SeleniumHQ-selenium
rb
@@ -27,10 +27,10 @@ func TestBech32(t *testing.T) { {"split1checkupstagehandshakeupstreamerranterredcaperred2y9e2w", false}, // invalid checksum {"s lit1checkupstagehandshakeupstreamerranterredcaperredp8hs2p", false}, // invalid character (space) in hrp {"spl" + string(127) + "t1checkupstagehandshakeupstreamerranterredcaperred2y9e3w", false}, // invalid character (DEL) in hrp - {"split1cheo2y9e2w", false}, // invalid character (o) in data part - {"split1a2y9w", false}, // too short data part - {"1checkupstagehandshakeupstreamerranterredcaperred2y9e3w", false}, // empty hrp - {"li1dgmt3", false}, // Too short checksum + {"split1cheo2y9e2w", false}, // invalid character (o) in data part + {"split1a2y9w", false}, // too short data part + {"1checkupstagehandshakeupstreamerranterredcaperred2y9e3w", false}, // empty hrp + {"li1dgmt3", false}, // Too short checksum {"11qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqsqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqc8247j", false}, // overall max length exceeded }
1
// Copyright (c) 2018 IoTeX // This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no // warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent // permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache // License 2.0 that can be found in the LICENSE file. package bech32 import ( "strings" "testing" ) func TestBech32(t *testing.T) { tests := []struct { str string valid bool }{ // Try some test vectors from https://github.com/bitcoin/bips/blob/master/bip-0173.mediawiki#Bech32 {"A12UEL5L", true}, {"a12uel5l", true}, {"an83characterlonghumanreadablepartthatcontainsthenumber1andtheexcludedcharactersbio1tt5tgs", true}, {"abcdef1qpzry9x8gf2tvdw0s3jn54khce6mua7lmqqqxw", true}, {"11qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqc8247j", true}, {"split1checkupstagehandshakeupstreamerranterredcaperred2y9e3w", true}, {"Split1checkupstagehandshakeupstreamerranterredcaperred2y9e3w", false}, // mix of lower upper {"split1checkupstagehandshakeupstreamerranterredcaperred2y9e2w", false}, // invalid checksum {"s lit1checkupstagehandshakeupstreamerranterredcaperredp8hs2p", false}, // invalid character (space) in hrp {"spl" + string(127) + "t1checkupstagehandshakeupstreamerranterredcaperred2y9e3w", false}, // invalid character (DEL) in hrp {"split1cheo2y9e2w", false}, // invalid character (o) in data part {"split1a2y9w", false}, // too short data part {"1checkupstagehandshakeupstreamerranterredcaperred2y9e3w", false}, // empty hrp {"li1dgmt3", false}, // Too short checksum {"11qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqsqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqc8247j", false}, // overall max length exceeded } for _, test := range tests { str := test.str hrp, decoded, err := Decode(str) if !test.valid { // Invalid string decoding should result in error. if err == nil { t.Errorf("expected decoding to fail for invalid string %v", test.str) } continue } // Valid string decoding should result in no error. if err != nil { t.Errorf("expected string to be valid bech32: %v", err) } // Check that it encodes to the same string encoded, err := Encode(hrp, decoded) if err != nil { t.Errorf("encoding failed: %v", err) } if encoded != strings.ToLower(str) { t.Errorf("expected data to encode to %v, but got %v", str, encoded) } // Flip a bit in the string an make sure it is caught. pos := strings.LastIndexAny(str, "1") flipped := str[:pos+1] + string((str[pos+1] ^ 1)) + str[pos+2:] _, _, err = Decode(flipped) if err == nil { t.Error("expected decoding to fail") } } }
1
12,157
File is not `goimports`-ed
iotexproject-iotex-core
go
@@ -36,6 +36,6 @@ public class AzkabanExecServerModule extends AbstractModule { bind(AzkabanExecutorServer.class).in(Scopes.SINGLETON); bind(TriggerManager.class).in(Scopes.SINGLETON); bind(FlowRunnerManager.class).in(Scopes.SINGLETON); - + // TODO: add ExecMetrics when guicy } }
1
/* * Copyright 2017 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. * */ package azkaban.execapp; import azkaban.executor.ExecutorLoader; import azkaban.executor.JdbcExecutorLoader; import com.google.inject.AbstractModule; import com.google.inject.Scopes; /** * This Guice module is currently a one place container for all bindings in the current module. This * is intended to help during the migration process to Guice. Once this class starts growing we can * move towards more modular structuring of Guice components. */ public class AzkabanExecServerModule extends AbstractModule { @Override protected void configure() { bind(ExecutorLoader.class).to(JdbcExecutorLoader.class).in(Scopes.SINGLETON); bind(AzkabanExecutorServer.class).in(Scopes.SINGLETON); bind(TriggerManager.class).in(Scopes.SINGLETON); bind(FlowRunnerManager.class).in(Scopes.SINGLETON); } }
1
13,718
Is this Todo necessary?
azkaban-azkaban
java
@@ -108,10 +108,7 @@ class ErrorHandler(object): message = value["value"] if not isinstance(message, basestring): value = message - try: - message = message['message'] - except TypeError: - message = None + message = message.get('message', None) else: message = value.get('message', None) except ValueError:
1
# Licensed to the Software Freedom Conservancy (SFC) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The SFC licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from selenium.common.exceptions import (ElementNotInteractableException, ElementNotSelectableException, ElementNotVisibleException, ErrorInResponseException, InvalidElementStateException, InvalidSelectorException, ImeNotAvailableException, ImeActivationFailedException, MoveTargetOutOfBoundsException, NoSuchElementException, NoSuchFrameException, NoSuchWindowException, NoAlertPresentException, StaleElementReferenceException, TimeoutException, UnexpectedAlertPresentException, WebDriverException) try: basestring except NameError: # Python 3.x basestring = str class ErrorCode(object): """ Error codes defined in the WebDriver wire protocol. """ # Keep in sync with org.openqa.selenium.remote.ErrorCodes and errorcodes.h SUCCESS = 0 NO_SUCH_ELEMENT = [7, 'no such element'] NO_SUCH_FRAME = [8, 'no such frame'] UNKNOWN_COMMAND = [9, 'unknown command'] STALE_ELEMENT_REFERENCE = [10, 'stale element reference'] ELEMENT_NOT_VISIBLE = [11, 'element not visible'] INVALID_ELEMENT_STATE = [12, 'invalid element state'] UNKNOWN_ERROR = [13, 'unknown error'] ELEMENT_NOT_INTERACTABLE = ["element not interactable"] ELEMENT_IS_NOT_SELECTABLE = [15, 'element not selectable'] JAVASCRIPT_ERROR = [17, 'javascript error'] XPATH_LOOKUP_ERROR = [19, 'invalid selector'] TIMEOUT = [21, 'timeout'] NO_SUCH_WINDOW = [23, 'no such window'] INVALID_COOKIE_DOMAIN = [24, 'invalid cookie domain'] UNABLE_TO_SET_COOKIE = [25, 'unable to set cookie'] UNEXPECTED_ALERT_OPEN = [26, 'unexpected alert open'] NO_ALERT_OPEN = [27, 'no such alert'] SCRIPT_TIMEOUT = [28, 'script timeout'] INVALID_ELEMENT_COORDINATES = [29, 'invalid element coordinates'] IME_NOT_AVAILABLE = [30, 'ime not available'] IME_ENGINE_ACTIVATION_FAILED = [31, 'ime engine activation failed'] INVALID_SELECTOR = [32, 'invalid selector'] MOVE_TARGET_OUT_OF_BOUNDS = [34, 'move target out of bounds'] INVALID_XPATH_SELECTOR = [51, 'invalid selector'] INVALID_XPATH_SELECTOR_RETURN_TYPER = [52, 'invalid selector'] METHOD_NOT_ALLOWED = [405, 'unsupported operation'] class ErrorHandler(object): """ Handles errors returned by the WebDriver server. """ def check_response(self, response): """ Checks that a JSON response from the WebDriver does not have an error. :Args: - response - The JSON response from the WebDriver server as a dictionary object. :Raises: If the response contains an error message. """ status = response.get('status', None) if status is None or status == ErrorCode.SUCCESS: return value = None message = response.get("message", "") screen = response.get("screen", "") stacktrace = None if isinstance(status, int): value_json = response.get('value', None) if value_json and isinstance(value_json, basestring): import json try: value = json.loads(value_json) if len(value.keys()) == 1: value = value['value'] status = value.get('error', None) if status is None: status = value["status"] message = value["value"] if not isinstance(message, basestring): value = message try: message = message['message'] except TypeError: message = None else: message = value.get('message', None) except ValueError: pass exception_class = ErrorInResponseException if status in ErrorCode.NO_SUCH_ELEMENT: exception_class = NoSuchElementException elif status in ErrorCode.NO_SUCH_FRAME: exception_class = NoSuchFrameException elif status in ErrorCode.NO_SUCH_WINDOW: exception_class = NoSuchWindowException elif status in ErrorCode.STALE_ELEMENT_REFERENCE: exception_class = StaleElementReferenceException elif status in ErrorCode.ELEMENT_NOT_VISIBLE: exception_class = ElementNotVisibleException elif status in ErrorCode.INVALID_ELEMENT_STATE: exception_class = InvalidElementStateException elif status in ErrorCode.INVALID_SELECTOR \ or status in ErrorCode.INVALID_XPATH_SELECTOR \ or status in ErrorCode.INVALID_XPATH_SELECTOR_RETURN_TYPER: exception_class = InvalidSelectorException elif status in ErrorCode.ELEMENT_IS_NOT_SELECTABLE: exception_class = ElementNotSelectableException elif status in ErrorCode.ELEMENT_NOT_INTERACTABLE: exception_class = ElementNotInteractableException elif status in ErrorCode.INVALID_COOKIE_DOMAIN: exception_class = WebDriverException elif status in ErrorCode.UNABLE_TO_SET_COOKIE: exception_class = WebDriverException elif status in ErrorCode.TIMEOUT: exception_class = TimeoutException elif status in ErrorCode.SCRIPT_TIMEOUT: exception_class = TimeoutException elif status in ErrorCode.UNKNOWN_ERROR: exception_class = WebDriverException elif status in ErrorCode.UNEXPECTED_ALERT_OPEN: exception_class = UnexpectedAlertPresentException elif status in ErrorCode.NO_ALERT_OPEN: exception_class = NoAlertPresentException elif status in ErrorCode.IME_NOT_AVAILABLE: exception_class = ImeNotAvailableException elif status in ErrorCode.IME_ENGINE_ACTIVATION_FAILED: exception_class = ImeActivationFailedException elif status in ErrorCode.MOVE_TARGET_OUT_OF_BOUNDS: exception_class = MoveTargetOutOfBoundsException else: exception_class = WebDriverException if value == '' or value is None: value = response['value'] if isinstance(value, basestring): if exception_class == ErrorInResponseException: raise exception_class(response, value) raise exception_class(value) if message == "" and 'message' in value: message = value['message'] screen = None if 'screen' in value: screen = value['screen'] stacktrace = None if 'stackTrace' in value and value['stackTrace']: stacktrace = [] try: for frame in value['stackTrace']: line = self._value_or_default(frame, 'lineNumber', '') file = self._value_or_default(frame, 'fileName', '<anonymous>') if line: file = "%s:%s" % (file, line) meth = self._value_or_default(frame, 'methodName', '<anonymous>') if 'className' in frame: meth = "%s.%s" % (frame['className'], meth) msg = " at %s (%s)" msg = msg % (meth, file) stacktrace.append(msg) except TypeError: pass if exception_class == ErrorInResponseException: raise exception_class(response, message) elif exception_class == UnexpectedAlertPresentException and 'alert' in value: raise exception_class(message, screen, stacktrace, value['alert'].get('text')) raise exception_class(message, screen, stacktrace) def _value_or_default(self, obj, key, default): return obj[key] if key in obj else default
1
14,481
I would recommend to leave out `None` because None is already the default.
SeleniumHQ-selenium
py
@@ -97,6 +97,10 @@ const ( taskIDFlag = "task-id" containerFlag = "container" + + valuesFlag = "values" + overwriteFlag = "overwrite" + inputFilePathFlag = "cli-input-yaml" ) // Short flag names.
1
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package cli import ( "fmt" "strings" "github.com/aws/copilot-cli/internal/pkg/manifest" "github.com/aws/copilot-cli/internal/pkg/template" ) // Long flag names. const ( // Common flags. nameFlag = "name" appFlag = "app" envFlag = "env" workloadFlag = "workload" svcTypeFlag = "svc-type" jobTypeFlag = "job-type" typeFlag = "type" profileFlag = "profile" yesFlag = "yes" jsonFlag = "json" allFlag = "all" // Command specific flags. dockerFileFlag = "dockerfile" imageTagFlag = "tag" resourceTagsFlag = "resource-tags" stackOutputDirFlag = "output-dir" limitFlag = "limit" followFlag = "follow" sinceFlag = "since" startTimeFlag = "start-time" endTimeFlag = "end-time" tasksFlag = "tasks" prodEnvFlag = "prod" deployFlag = "deploy" resourcesFlag = "resources" githubURLFlag = "github-url" repoURLFlag = "url" githubAccessTokenFlag = "github-access-token" gitBranchFlag = "git-branch" envsFlag = "environments" domainNameFlag = "domain" localFlag = "local" deleteSecretFlag = "delete-secret" svcPortFlag = "port" storageTypeFlag = "storage-type" storagePartitionKeyFlag = "partition-key" storageSortKeyFlag = "sort-key" storageNoSortFlag = "no-sort" storageLSIConfigFlag = "lsi" storageNoLSIFlag = "no-lsi" storageRDSEngineFlag = "engine" storageRDSInitialDBFlag = "initial-db" storageRDSParameterGroupFlag = "parameter-group" taskGroupNameFlag = "task-group-name" countFlag = "count" cpuFlag = "cpu" memoryFlag = "memory" imageFlag = "image" taskRoleFlag = "task-role" executionRoleFlag = "execution-role" clusterFlag = "cluster" subnetsFlag = "subnets" securityGroupsFlag = "security-groups" envVarsFlag = "env-vars" secretsFlag = "secrets" commandFlag = "command" entrypointFlag = "entrypoint" taskDefaultFlag = "default" vpcIDFlag = "import-vpc-id" publicSubnetsFlag = "import-public-subnets" privateSubnetsFlag = "import-private-subnets" vpcCIDRFlag = "override-vpc-cidr" publicSubnetCIDRsFlag = "override-public-cidrs" privateSubnetCIDRsFlag = "override-private-cidrs" defaultConfigFlag = "default-config" accessKeyIDFlag = "aws-access-key-id" secretAccessKeyFlag = "aws-secret-access-key" sessionTokenFlag = "aws-session-token" regionFlag = "region" retriesFlag = "retries" timeoutFlag = "timeout" scheduleFlag = "schedule" taskIDFlag = "task-id" containerFlag = "container" ) // Short flag names. // A short flag only exists if the flag or flag set is mandatory by the command. const ( nameFlagShort = "n" appFlagShort = "a" envFlagShort = "e" typeFlagShort = "t" workloadFlagShort = "w" dockerFileFlagShort = "d" commandFlagShort = "c" imageFlagShort = "i" repoURLFlagShort = "u" githubAccessTokenFlagShort = "t" gitBranchFlagShort = "b" envsFlagShort = "e" scheduleFlagShort = "s" ) // Descriptions for flags. var ( svcTypeFlagDescription = fmt.Sprintf(`Type of service to create. Must be one of: %s`, strings.Join(template.QuoteSliceFunc(manifest.ServiceTypes), ", ")) imageFlagDescription = fmt.Sprintf(`The location of an existing Docker image. Mutually exclusive with -%s, --%s`, dockerFileFlagShort, dockerFileFlag) dockerFileFlagDescription = fmt.Sprintf(`Path to the Dockerfile. Mutually exclusive with -%s, --%s`, imageFlagShort, imageFlag) storageTypeFlagDescription = fmt.Sprintf(`Type of storage to add. Must be one of: %s`, strings.Join(template.QuoteSliceFunc(storageTypes), ", ")) jobTypeFlagDescription = fmt.Sprintf(`Type of job to create. Must be one of: %s`, strings.Join(template.QuoteSliceFunc(manifest.JobTypes), ", ")) wkldTypeFlagDescription = fmt.Sprintf(`Type of job or svc to create. Must be one of: %s`, strings.Join(template.QuoteSliceFunc(manifest.WorkloadTypes), ", ")) clusterFlagDescription = fmt.Sprintf(`Optional. The short name or full ARN of the cluster to run the task in. Cannot be specified with '%s', '%s' or '%s'.`, appFlag, envFlag, taskDefaultFlag) subnetsFlagDescription = fmt.Sprintf(`Optional. The subnet IDs for the task to use. Can be specified multiple times. Cannot be specified with '%s', '%s' or '%s'.`, appFlag, envFlag, taskDefaultFlag) securityGroupsFlagDescription = fmt.Sprintf(`Optional. The security group IDs for the task to use. Can be specified multiple times. Cannot be specified with '%s' or '%s'.`, appFlag, envFlag) taskRunDefaultFlagDescription = fmt.Sprintf(`Optional. Run tasks in default cluster and default subnets. Cannot be specified with '%s', '%s' or '%s'.`, appFlag, envFlag, subnetsFlag) taskExecDefaultFlagDescription = fmt.Sprintf(`Optional. Execute commands in running tasks in default cluster and default subnets. Cannot be specified with '%s' or '%s'.`, appFlag, envFlag) taskDeleteDefaultFlagDescription = fmt.Sprintf(`Optional. Delete a task which was launched in the default cluster and subnets. Cannot be specified with '%s' or '%s'`, appFlag, envFlag) taskEnvFlagDescription = fmt.Sprintf(`Optional. Name of the environment. Cannot be specified with '%s', '%s' or '%s'`, taskDefaultFlag, subnetsFlag, securityGroupsFlag) taskAppFlagDescription = fmt.Sprintf(`Optional. Name of the application. Cannot be specified with '%s', '%s' or '%s'`, taskDefaultFlag, subnetsFlag, securityGroupsFlag) ) const ( appFlagDescription = "Name of the application." envFlagDescription = "Name of the environment." svcFlagDescription = "Name of the service." jobFlagDescription = "Name of the job." workloadFlagDescription = "Name of the service or job." nameFlagDescription = "Name of the service, job, or task group." pipelineFlagDescription = "Name of the pipeline." profileFlagDescription = "Name of the profile." yesFlagDescription = "Skips confirmation prompt." execYesFlagDescription = "Optional. Whether to update the Session Manager Plugin." jsonFlagDescription = "Optional. Outputs in JSON format." imageTagFlagDescription = `Optional. The container image tag.` resourceTagsFlagDescription = `Optional. Labels with a key and value separated by commas. Allows you to categorize resources.` stackOutputDirFlagDescription = "Optional. Writes the stack template and template configuration to a directory." prodEnvFlagDescription = "If the environment contains production services." limitFlagDescription = `Optional. The maximum number of log events returned. Default is 10 unless any time filtering flags are set.` followFlagDescription = "Optional. Specifies if the logs should be streamed." sinceFlagDescription = `Optional. Only return logs newer than a relative duration like 5s, 2m, or 3h. Defaults to all logs. Only one of start-time / since may be used.` startTimeFlagDescription = `Optional. Only return logs after a specific date (RFC3339). Defaults to all logs. Only one of start-time / since may be used.` endTimeFlagDescription = `Optional. Only return logs before a specific date (RFC3339). Defaults to all logs. Only one of end-time / follow may be used.` tasksLogsFlagDescription = "Optional. Only return logs from specific task IDs." deployTestFlagDescription = `Deploy your service or job to a "test" environment.` githubURLFlagDescription = "(Deprecated.) Use --url instead. Repository URL to trigger your pipeline." repoURLFlagDescription = "The repository URL to trigger your pipeline." githubAccessTokenFlagDescription = "GitHub personal access token for your repository." gitBranchFlagDescription = "Branch used to trigger your pipeline." pipelineEnvsFlagDescription = "Environments to add to the pipeline." domainNameFlagDescription = "Optional. Your existing custom domain name." envResourcesFlagDescription = "Optional. Show the resources in your environment." svcResourcesFlagDescription = "Optional. Show the resources in your service." pipelineResourcesFlagDescription = "Optional. Show the resources in your pipeline." localSvcFlagDescription = "Only show services in the workspace." localJobFlagDescription = "Only show jobs in the workspace." deleteSecretFlagDescription = "Deletes AWS Secrets Manager secret associated with a pipeline source repository." svcPortFlagDescription = "Optional. The port on which your service listens." storageFlagDescription = "Name of the storage resource to create." storageWorkloadFlagDescription = "Name of the service or job to associate with storage." storagePartitionKeyFlagDescription = `Partition key for the DDB table. Must be of the format '<keyName>:<dataType>'.` storageSortKeyFlagDescription = `Optional. Sort key for the DDB table. Must be of the format '<keyName>:<dataType>'.` storageNoSortFlagDescription = "Optional. Skip configuring sort keys." storageNoLSIFlagDescription = `Optional. Don't ask about configuring alternate sort keys.` storageLSIConfigFlagDescription = `Optional. Attribute to use as an alternate sort key. May be specified up to 5 times. Must be of the format '<keyName>:<dataType>'.` storageRDSEngineFlagDescription = `The database engine used in the cluster. Must be either "MySQL" or "PostgreSQL".` storageRDSInitialDBFlagDescription = "The initial database to create in the cluster." storageRDSParameterGroupFlagDescription = "Optional. The name of the parameter group to associate with the cluster." countFlagDescription = "Optional. The number of tasks to set up." cpuFlagDescription = "Optional. The number of CPU units to reserve for each task." memoryFlagDescription = "Optional. The amount of memory to reserve in MiB for each task." taskRoleFlagDescription = "Optional. The ARN of the role for the task to use." executionRoleFlagDescription = "Optional. The ARN of the role that grants the container agent permission to make AWS API calls." envVarsFlagDescription = "Optional. Environment variables specified by key=value separated by commas." secretsFlagDescription = "Optional. Secrets to inject into the container. Specified by key=value separated by commas." runCommandFlagDescription = `Optional. The command that is passed to "docker run" to override the default command.` entrypointFlagDescription = `Optional. The entrypoint that is passed to "docker run" to override the default entrypoint.` taskGroupFlagDescription = `Optional. The group name of the task. Tasks with the same group name share the same set of resources. (default directory name)` taskImageTagFlagDescription = `Optional. The container image tag in addition to "latest".` vpcIDFlagDescription = "Optional. Use an existing VPC ID." publicSubnetsFlagDescription = "Optional. Use existing public subnet IDs." privateSubnetsFlagDescription = "Optional. Use existing private subnet IDs." vpcCIDRFlagDescription = "Optional. Global CIDR to use for VPC (default 10.0.0.0/16)." publicSubnetCIDRsFlagDescription = "Optional. CIDR to use for public subnets (default 10.0.0.0/24,10.0.1.0/24)." privateSubnetCIDRsFlagDescription = "Optional. CIDR to use for private subnets (default 10.0.2.0/24,10.0.3.0/24)." defaultConfigFlagDescription = "Optional. Skip prompting and use default environment configuration." accessKeyIDFlagDescription = "Optional. An AWS access key." secretAccessKeyFlagDescription = "Optional. An AWS secret access key." sessionTokenFlagDescription = "Optional. An AWS session token for temporary credentials." envRegionTokenFlagDescription = "Optional. An AWS region where the environment will be created." retriesFlagDescription = "Optional. The number of times to try restarting the job on a failure." timeoutFlagDescription = `Optional. The total execution time for the task, including retries. Accepts valid Go duration strings. For example: "2h", "1h30m", "900s".` scheduleFlagDescription = `The schedule on which to run this job. Accepts cron expressions of the format (M H DoM M DoW) and schedule definition strings. For example: "0 * * * *", "@daily", "@weekly", "@every 1h30m". AWS Schedule Expressions of the form "rate(10 minutes)" or "cron(0 12 L * ? 2021)" are also accepted.` upgradeAllEnvsDescription = "Optional. Upgrade all environments." taskIDFlagDescription = "Optional. ID of the task you want to exec in." execCommandFlagDescription = `Optional. The command that is passed to a running container.` containerFlagDescription = "Optional. The specific container you want to exec in. By default the first essential container will be used." )
1
17,401
I may have missed previous discussion on this; apologies if that's the case. What do you think about just `input-yaml`, without the `cli-`?
aws-copilot-cli
go
@@ -57,13 +57,14 @@ namespace Microsoft.AspNetCore.Server.Kestrel.Transport.Sockets.Internal public async Task StartAsync(IConnectionHandler connectionHandler) { + Exception error = null; try { connectionHandler.OnConnection(this); // Spawn send and receive logic Task receiveTask = DoReceive(); - Task sendTask = DoSend(); + Task<Exception> sendTask = DoSend(); // If the sending task completes then close the receive // We don't need to do this in the other direction because the kestrel
1
// Copyright (c) .NET Foundation. All rights reserved. // Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information. using System; using System.Buffers; using System.Collections.Generic; using System.Diagnostics; using System.IO; using System.IO.Pipelines; using System.Net; using System.Net.Sockets; using System.Threading.Tasks; using Microsoft.AspNetCore.Protocols; using System.Threading; using Microsoft.AspNetCore.Server.Kestrel.Transport.Abstractions.Internal; using Microsoft.Extensions.Logging; namespace Microsoft.AspNetCore.Server.Kestrel.Transport.Sockets.Internal { internal sealed class SocketConnection : TransportConnection { private const int MinAllocBufferSize = 2048; private readonly Socket _socket; private readonly ISocketsTrace _trace; private readonly SocketReceiver _receiver; private readonly SocketSender _sender; private volatile bool _aborted; internal SocketConnection(Socket socket, MemoryPool memoryPool, ISocketsTrace trace) { Debug.Assert(socket != null); Debug.Assert(memoryPool != null); Debug.Assert(trace != null); _socket = socket; MemoryPool = memoryPool; _trace = trace; var localEndPoint = (IPEndPoint)_socket.LocalEndPoint; var remoteEndPoint = (IPEndPoint)_socket.RemoteEndPoint; LocalAddress = localEndPoint.Address; LocalPort = localEndPoint.Port; RemoteAddress = remoteEndPoint.Address; RemotePort = remoteEndPoint.Port; _receiver = new SocketReceiver(_socket); _sender = new SocketSender(_socket); } public override MemoryPool MemoryPool { get; } public override Scheduler InputWriterScheduler => Scheduler.Inline; public override Scheduler OutputReaderScheduler => Scheduler.TaskRun; public async Task StartAsync(IConnectionHandler connectionHandler) { try { connectionHandler.OnConnection(this); // Spawn send and receive logic Task receiveTask = DoReceive(); Task sendTask = DoSend(); // If the sending task completes then close the receive // We don't need to do this in the other direction because the kestrel // will trigger the output closing once the input is complete. if (await Task.WhenAny(receiveTask, sendTask) == sendTask) { // Tell the reader it's being aborted _socket.Dispose(); } // Now wait for both to complete await receiveTask; await sendTask; // Dispose the socket(should noop if already called) _socket.Dispose(); } catch (Exception ex) { _trace.LogError(0, ex, $"Unexpected exception in {nameof(SocketConnection)}.{nameof(StartAsync)}."); } } private async Task DoReceive() { Exception error = null; try { while (true) { // Ensure we have some reasonable amount of buffer space var buffer = Input.Alloc(MinAllocBufferSize); try { var bytesReceived = await _receiver.ReceiveAsync(buffer.Buffer); if (bytesReceived == 0) { // FIN _trace.ConnectionReadFin(ConnectionId); break; } buffer.Advance(bytesReceived); } finally { buffer.Commit(); } var flushTask = buffer.FlushAsync(); if (!flushTask.IsCompleted) { _trace.ConnectionPause(ConnectionId); await flushTask; _trace.ConnectionResume(ConnectionId); } var result = flushTask.GetAwaiter().GetResult(); if (result.IsCompleted) { // Pipe consumer is shut down, do we stop writing break; } } } catch (SocketException ex) when (ex.SocketErrorCode == SocketError.ConnectionReset) { error = new ConnectionResetException(ex.Message, ex); _trace.ConnectionReset(ConnectionId); } catch (SocketException ex) when (ex.SocketErrorCode == SocketError.OperationAborted || ex.SocketErrorCode == SocketError.ConnectionAborted || ex.SocketErrorCode == SocketError.Interrupted || ex.SocketErrorCode == SocketError.InvalidArgument) { if (!_aborted) { // Calling Dispose after ReceiveAsync can cause an "InvalidArgument" error on *nix. error = new ConnectionAbortedException(); _trace.ConnectionError(ConnectionId, error); } } catch (ObjectDisposedException) { if (!_aborted) { error = new ConnectionAbortedException(); _trace.ConnectionError(ConnectionId, error); } } catch (IOException ex) { error = ex; _trace.ConnectionError(ConnectionId, error); } catch (Exception ex) { error = new IOException(ex.Message, ex); _trace.ConnectionError(ConnectionId, error); } finally { if (_aborted) { error = error ?? new ConnectionAbortedException(); } Input.Complete(error); } } private async Task DoSend() { Exception error = null; try { while (true) { // Wait for data to write from the pipe producer var result = await Output.ReadAsync(); var buffer = result.Buffer; if (result.IsCancelled) { break; } try { if (!buffer.IsEmpty) { await _sender.SendAsync(buffer); } else if (result.IsCompleted) { break; } } finally { Output.Advance(buffer.End); } } } catch (SocketException ex) when (ex.SocketErrorCode == SocketError.OperationAborted) { error = null; } catch (ObjectDisposedException) { error = null; } catch (IOException ex) { error = ex; } catch (Exception ex) { error = new IOException(ex.Message, ex); } finally { Output.Complete(error); // Make sure to close the connection only after the _aborted flag is set. // Without this, the RequestsCanBeAbortedMidRead test will sometimes fail when // a BadHttpRequestException is thrown instead of a TaskCanceledException. _aborted = true; _trace.ConnectionWriteFin(ConnectionId); _socket.Shutdown(SocketShutdown.Both); } } } }
1
14,648
Nit: rename to sendError.
aspnet-KestrelHttpServer
.cs
@@ -7,6 +7,10 @@ Makes functions in .tools.command accessible directly from quilt. # None: CLI params have not yet been parsed to determine mode. _DEV_MODE = None +# Suppress numpy warnings for Python 2.7 +import warnings +warnings.filterwarnings("ignore", message="numpy.dtype size changed") + # Normally a try: except: block on or in main() would be better and simpler, # but we load a bunch of external modules that take a lot of time, during which
1
""" Makes functions in .tools.command accessible directly from quilt. """ # True: Force dev mode # False: Force normal mode # None: CLI params have not yet been parsed to determine mode. _DEV_MODE = None # Normally a try: except: block on or in main() would be better and simpler, # but we load a bunch of external modules that take a lot of time, during which # ctrl-c will cause an exception that misses that block. ..so, we catch the # signal instead of using try:except, and we catch it here, early during load. # # Note: This doesn't *guarantee* that a traceback won't occur, and there's no # real way to do so, because if it happens early enough (during parsing, for # example, or inside the entry point file) we have no way to stop it. def _install_interrupt_handler(): """Suppress KeyboardInterrupt traceback display in specific situations If not running in dev mode, and if executed from the command line, then we raise SystemExit instead of KeyboardInterrupt. This provides a clean exit. :returns: None if no action is taken, original interrupt handler otherwise """ # These would clutter the quilt.x namespace, so they're imported here instead. import os import sys import signal import pkg_resources from .tools import const # Check to see what entry points / scripts are configred to run quilt from the CLI # By doing this, we have these benefits: # * Avoid closing someone's Jupyter/iPython/bPython session when they hit ctrl-c # * Avoid calling exit() when being used as an external lib # * Provide exceptions when running in Jupyter/iPython/bPython # * Provide exceptions when running in unexpected circumstances quilt = pkg_resources.get_distribution('quilt') executable = os.path.basename(sys.argv[0]) entry_points = quilt.get_entry_map().get('console_scripts', []) # When python is run with '-c', this was executed via 'python -c "<some python code>"' if executable == '-c': # This is awkward and somewhat hackish, but we have to ensure that this is *us* # executing via 'python -c' if len(sys.argv) > 1 and sys.argv[1] == 'quilt testing': # it's us. Let's pretend '-c' is an entry point. entry_points['-c'] = 'blah' sys.argv.pop(1) if executable not in entry_points: return # We're running as a console script. # If not in dev mode, use SystemExit instead of raising KeyboardInterrupt def handle_interrupt(signum, stack): # Check for dev mode if _DEV_MODE is None: # Args and environment have not been parsed, and no _DEV_MODE state has been set. dev_mode = True if len(sys.argv) > 1 and sys.argv[1] == '--dev' else False dev_mode = True if os.environ.get('QUILT_DEV_MODE', '').strip().lower() == 'true' else dev_mode else: # Use forced dev-mode if _DEV_MODE is set dev_mode = _DEV_MODE # In order to display the full traceback, we lose control of the exit code here. # Dev mode ctrl-c exit just produces the generic exit error code 1 if dev_mode: raise KeyboardInterrupt() # Normal exit # avoid annoying prompt displacement when hitting ctrl-c print() exit(const.EXIT_KB_INTERRUPT) return signal.signal(signal.SIGINT, handle_interrupt) # This should be called as early in the execution process as is possible. # ..original handler saved in case someone wants it, but it's probably just signal.default_int_handler. _orig_interrupt_handler = _install_interrupt_handler() from .tools.command import ( access_add, access_list, access_remove, audit, build, check, config, create_user, delete_user, enable_user, disable_user, export, generate, inspect, install, list_packages, list_users, list_users_detailed, load, log, login, login_with_token, logout, ls, delete, push, rm, search, tag_add, tag_list, tag_remove, version_add, version_list, )
1
16,943
`pylint` wants to know why this `import` isn't at the top of the file; and it's kinda right
quiltdata-quilt
py
@@ -0,0 +1,19 @@ +package de.danoeh.antennapod.core.feed; + +public class FeedUrlNotFoundException extends RuntimeException { + private final String artistName; + private final String trackName; + + public FeedUrlNotFoundException(String url, String trackName) { + this.artistName = url; + this.trackName = trackName; + } + + public String getArtistName() { + return artistName; + } + + public String getTrackName() { + return trackName; + } +}
1
1
21,193
To me, using a `RuntimeException` for this feels a bit weird. I would just extend `Exception`, or maybe even `IoException`. Could you also please overwrite the `getMessage` function to return something like `"Result does not specify a feed url"`?
AntennaPod-AntennaPod
java
@@ -88,7 +88,8 @@ func convertStorage(ctx context.Context, msg *cepubsub.Message, sendMode ModeTyp } } event.SetDataContentType(*cloudevents.StringOfApplicationJSON()) - event.SetData(msg.Data) + event.Data = msg.Data + event.DataEncoded = true // Attributes are extensions. if msg.Attributes != nil && len(msg.Attributes) > 0 { for k, v := range msg.Attributes {
1
/* Copyright 2019 Google LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package converters import ( "context" "fmt" "go.uber.org/zap" "knative.dev/pkg/logging" cloudevents "github.com/cloudevents/sdk-go" cepubsub "github.com/cloudevents/sdk-go/pkg/cloudevents/transport/pubsub" pubsubcontext "github.com/cloudevents/sdk-go/pkg/cloudevents/transport/pubsub/context" "github.com/google/knative-gcp/pkg/apis/events/v1alpha1" ) var ( // Mapping of GCS eventTypes to CloudEvent types. storageEventTypes = map[string]string{ "OBJECT_FINALIZE": v1alpha1.StorageFinalize, "OBJECT_ARCHIVE": v1alpha1.StorageArchive, "OBJECT_DELETE": v1alpha1.StorageDelete, "OBJECT_METADATA_UPDATE": v1alpha1.StorageMetadataUpdate, } ) const ( storageDefaultEventType = "com.google.cloud.storage" // Schema extracted from https://raw.githubusercontent.com/googleapis/google-api-go-client/master/storage/v1/storage-api.json. // TODO find the public google endpoint we should use to point to the schema and avoid hosting it ourselves. // The link above is tied to the go-client, and it seems not to be a valid json schema. storageSchemaUrl = "https://raw.githubusercontent.com/google/knative-gcp/master/schemas/storage/schema.json" ) func convertStorage(ctx context.Context, msg *cepubsub.Message, sendMode ModeType) (*cloudevents.Event, error) { if msg == nil { return nil, fmt.Errorf("nil pubsub message") } tx := pubsubcontext.TransportContextFrom(ctx) // Make a new event and convert the message payload. event := cloudevents.NewEvent(cloudevents.VersionV03) event.SetID(tx.ID) event.SetTime(tx.PublishTime) event.SetDataSchema(storageSchemaUrl) if msg.Attributes != nil { if val, ok := msg.Attributes["bucketId"]; ok { delete(msg.Attributes, "bucketId") event.SetSource(v1alpha1.StorageEventSource(val)) } else { return nil, fmt.Errorf("received event did not have bucketId") } if val, ok := msg.Attributes["objectId"]; ok { delete(msg.Attributes, "objectId") event.SetSubject(val) } else { // Not setting subject, as it's optional logging.FromContext(ctx).Desugar().Debug("received event did not have objectId") } if val, ok := msg.Attributes["eventType"]; ok { delete(msg.Attributes, "eventType") if eventType, ok := storageEventTypes[val]; ok { event.SetType(eventType) } else { logging.FromContext(ctx).Desugar().Debug("Unknown eventType, using default", zap.String("eventType", eventType), zap.String("default", storageDefaultEventType)) event.SetType(storageDefaultEventType) } } else { return nil, fmt.Errorf("received event did not have eventType") } if _, ok := msg.Attributes["eventTime"]; ok { delete(msg.Attributes, "eventTime") } } event.SetDataContentType(*cloudevents.StringOfApplicationJSON()) event.SetData(msg.Data) // Attributes are extensions. if msg.Attributes != nil && len(msg.Attributes) > 0 { for k, v := range msg.Attributes { event.SetExtension(k, v) } } return &event, nil }
1
9,587
what was the issue here? why did you have to directly set?
google-knative-gcp
go
@@ -226,10 +226,8 @@ func (c *Command) proposals() { for _, proposal := range proposals { country := proposal.ServiceDefinition.LocationOriginate.Country var countryString string - if country != nil { - countryString = *country - } else { - countryString = "Unknown" + if len(country) == 0 { + country = "Unknown" } msg := fmt.Sprintf("- provider id: %v, proposal id: %v, country: %v", proposal.ProviderID, proposal.ID, countryString) info(msg)
1
package cli import ( "fmt" "github.com/chzyer/readline" "github.com/mysterium/node/cmd" tequilapi_client "github.com/mysterium/node/tequilapi/client" "io" "log" "strings" ) // NewCommand constructs CLI based with possibility to control quiting func NewCommand( historyFile string, tequilapi *tequilapi_client.Client, ) *Command { return &Command{ historyFile: historyFile, tequilapi: tequilapi, } } // Command describes CLI based Mysterium UI type Command struct { historyFile string tequilapi *tequilapi_client.Client fetchedProposals []tequilapi_client.ProposalDTO completer *readline.PrefixCompleter reader *readline.Instance } const redColor = "\033[31m%s\033[0m" const identityDefaultPassphrase = "" const statusConnected = "Connected" // Run runs CLI interface synchronously, in the same thread while blocking it func (c *Command) Run() (err error) { c.fetchedProposals = c.fetchProposals() c.completer = newAutocompleter(c.tequilapi, c.fetchedProposals) c.reader, err = readline.NewEx(&readline.Config{ Prompt: fmt.Sprintf(redColor, "» "), HistoryFile: c.historyFile, AutoComplete: c.completer, InterruptPrompt: "^C", EOFPrompt: "exit", }) if err != nil { return err } // TODO Should overtake output of CommandRun log.SetOutput(c.reader.Stderr()) for { line, err := c.reader.Readline() if err == readline.ErrInterrupt { if len(line) == 0 { c.quit() } else { continue } } else if err == io.EOF { c.quit() } c.handleActions(line) } return nil } // Kill stops cli func (c *Command) Kill() error { c.reader.Clean() return c.reader.Close() } func (c *Command) handleActions(line string) { line = strings.TrimSpace(line) staticCmds := []struct { command string handler func() }{ {"exit", c.quit}, {"quit", c.quit}, {"help", c.help}, {"status", c.status}, {"proposals", c.proposals}, {"ip", c.ip}, {"disconnect", c.disconnect}, {"stop", c.stopClient}, } argCmds := []struct { command string handler func(argsString string) }{ {command: "connect", handler: c.connect}, {command: "unlock", handler: c.unlock}, {command: "identities", handler: c.identities}, } for _, cmd := range staticCmds { if line == cmd.command { cmd.handler() return } } for _, cmd := range argCmds { if strings.HasPrefix(line, cmd.command) { argsString := strings.TrimSpace(line[len(cmd.command):]) cmd.handler(argsString) return } } if len(line) > 0 { c.help() } } func (c *Command) connect(argsString string) { if len(argsString) == 0 { info("Press tab to select identity or create a new one. Connect <consumer-identity> <provider-identity>") return } identities := strings.Fields(argsString) if len(identities) != 2 { info("Please type in the provider identity. Connect <consumer-identity> <provider-identity>") return } consumerID, providerID := identities[0], identities[1] if consumerID == "new" { id, err := c.tequilapi.NewIdentity(identityDefaultPassphrase) if err != nil { warn(err) return } consumerID = id.Address success("New identity created:", consumerID) } status("CONNECTING", "from:", consumerID, "to:", providerID) _, err := c.tequilapi.Connect(consumerID, providerID) if err != nil { warn(err) return } success("Connected.") } func (c *Command) unlock(argsString string) { unlockSignature := "Unlock <identity> [passphrase]" if len(argsString) == 0 { info("Press tab to select identity.", unlockSignature) return } args := strings.Fields(argsString) var identity, passphrase string if len(args) == 1 { identity, passphrase = args[0], "" } else if len(args) == 2 { identity, passphrase = args[0], args[1] } else { info("Please type in identity and optional passphrase.", unlockSignature) return } info("Unlocking", identity) err := c.tequilapi.Unlock(identity, passphrase) if err != nil { warn(err) return } success(fmt.Sprintf("Identity %s unlocked.", identity)) } func (c *Command) disconnect() { err := c.tequilapi.Disconnect() if err != nil { warn(err) return } success("Disconnected.") } func (c *Command) status() { status, err := c.tequilapi.Status() if err != nil { warn(err) } else { info("Status:", status.Status) info("SID:", status.SessionID) } if status.Status == statusConnected { statistics, err := c.tequilapi.ConnectionStatistics() if err != nil { warn(err) } else { info(fmt.Sprintf("Connection duration: %ds", statistics.Duration)) info("Bytes sent:", statistics.BytesSent) info("Bytes received:", statistics.BytesReceived) } } } func (c *Command) proposals() { proposals := c.fetchProposals() c.fetchedProposals = proposals info(fmt.Sprintf("Found %v proposals", len(proposals))) for _, proposal := range proposals { country := proposal.ServiceDefinition.LocationOriginate.Country var countryString string if country != nil { countryString = *country } else { countryString = "Unknown" } msg := fmt.Sprintf("- provider id: %v, proposal id: %v, country: %v", proposal.ProviderID, proposal.ID, countryString) info(msg) } } func (c *Command) fetchProposals() []tequilapi_client.ProposalDTO { proposals, err := c.tequilapi.Proposals() if err != nil { warn(err) return []tequilapi_client.ProposalDTO{} } return proposals } func (c *Command) ip() { ip, err := c.tequilapi.GetIP() if err != nil { warn(err) return } info("IP:", ip) } func (c *Command) help() { info("Mysterium CLI tequilapi commands:") fmt.Println(c.completer.Tree(" ")) } // quit stops cli and client commands and exits application func (c *Command) quit() { stop := cmd.NewApplicationStopper(c.Kill) stop() } func (c *Command) identities(argsString string) { const usage = "identities command:\n list\n new [passphrase]" if len(argsString) == 0 { info(usage) return } args := strings.Fields(argsString) if len(args) < 1 { info(usage) return } action := args[0] if action == "list" { if len(args) > 1 { info(usage) return } ids, err := c.tequilapi.GetIdentities() if err != nil { fmt.Println("Error occured:", err) return } for _, id := range ids { status("+", id.Address) } return } if action == "new" { var passphrase string if len(args) == 1 { passphrase = identityDefaultPassphrase } else if len(args) == 2 { passphrase = args[1] } else { info(usage) return } id, err := c.tequilapi.NewIdentity(passphrase) if err != nil { warn(err) return } success("New identity created:", id.Address) } } func (c *Command) stopClient() { err := c.tequilapi.Stop() if err != nil { warn("Cannot stop client:", err) } success("Client stopped") } func getIdentityOptionList(tequilapi *tequilapi_client.Client) func(string) []string { return func(line string) []string { identities := []string{"new"} ids, err := tequilapi.GetIdentities() if err != nil { warn(err) return identities } for _, id := range ids { identities = append(identities, id.Address) } return identities } } func getProposalOptionList(proposals []tequilapi_client.ProposalDTO) func(string) []string { return func(line string) []string { var providerIDS []string for _, proposal := range proposals { providerIDS = append(providerIDS, proposal.ProviderID) } return providerIDS } } func newAutocompleter(tequilapi *tequilapi_client.Client, proposals []tequilapi_client.ProposalDTO) *readline.PrefixCompleter { return readline.NewPrefixCompleter( readline.PcItem( "connect", readline.PcItemDynamic( getIdentityOptionList(tequilapi), readline.PcItemDynamic( getProposalOptionList(proposals), ), ), ), readline.PcItem( "identities", readline.PcItem("new"), readline.PcItem("list"), ), readline.PcItem("status"), readline.PcItem("proposals"), readline.PcItem("ip"), readline.PcItem("disconnect"), readline.PcItem("help"), readline.PcItem("quit"), readline.PcItem("stop"), readline.PcItem( "unlock", readline.PcItemDynamic( getIdentityOptionList(tequilapi), ), ), ) }
1
10,740
I'm confusied - Isn't `countryString` left empty if `len(country)` != 0?
mysteriumnetwork-node
go
@@ -53,7 +53,7 @@ DesktopSwitch::DesktopSwitch(const ILXQtPanelPluginStartupInfo &startupInfo) : mLabelType(static_cast<DesktopSwitchButton::LabelType>(-1)) { m_buttons = new QButtonGroup(this); - connect (m_pSignalMapper, SIGNAL(mapped(int)), this, SLOT(setDesktop(int))); + connect (m_pSignalMapper, QOverload<int>::of(&QSignalMapper::mapped), this, &DesktopSwitch::setDesktop); mLayout = new LXQt::GridLayout(&mWidget); mWidget.setLayout(mLayout);
1
/* BEGIN_COMMON_COPYRIGHT_HEADER * (c)LGPL2+ * * LXQt - a lightweight, Qt based, desktop toolset * https://lxqt.org * * Copyright: 2011 Razor team * Authors: * Petr Vanek <[email protected]> * * This program or library is free software; you can redistribute it * and/or modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * You should have received a copy of the GNU Lesser General * Public License along with this library; if not, write to the * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, * Boston, MA 02110-1301 USA * * END_COMMON_COPYRIGHT_HEADER */ #include <QButtonGroup> #include <QWheelEvent> #include <QtDebug> #include <QSignalMapper> #include <QTimer> #include <lxqt-globalkeys.h> #include <LXQt/GridLayout> #include <KWindowSystem/KWindowSystem> #include <QX11Info> #include <cmath> #include "desktopswitch.h" #include "desktopswitchbutton.h" #include "desktopswitchconfiguration.h" static const QString DEFAULT_SHORTCUT_TEMPLATE(QStringLiteral("Control+F%1")); DesktopSwitch::DesktopSwitch(const ILXQtPanelPluginStartupInfo &startupInfo) : QObject(), ILXQtPanelPlugin(startupInfo), m_pSignalMapper(new QSignalMapper(this)), m_desktopCount(KWindowSystem::numberOfDesktops()), mRows(-1), mShowOnlyActive(false), mDesktops(new NETRootInfo(QX11Info::connection(), NET::NumberOfDesktops | NET::CurrentDesktop | NET::DesktopNames, NET::WM2DesktopLayout)), mLabelType(static_cast<DesktopSwitchButton::LabelType>(-1)) { m_buttons = new QButtonGroup(this); connect (m_pSignalMapper, SIGNAL(mapped(int)), this, SLOT(setDesktop(int))); mLayout = new LXQt::GridLayout(&mWidget); mWidget.setLayout(mLayout); settingsChanged(); onCurrentDesktopChanged(KWindowSystem::currentDesktop()); QTimer::singleShot(0, this, SLOT(registerShortcuts())); connect(m_buttons, SIGNAL(buttonClicked(int)), this, SLOT(setDesktop(int))); connect(KWindowSystem::self(), SIGNAL(numberOfDesktopsChanged(int)), SLOT(onNumberOfDesktopsChanged(int))); connect(KWindowSystem::self(), SIGNAL(currentDesktopChanged(int)), SLOT(onCurrentDesktopChanged(int))); connect(KWindowSystem::self(), SIGNAL(desktopNamesChanged()), SLOT(onDesktopNamesChanged())); connect(KWindowSystem::self(), static_cast<void (KWindowSystem::*)(WId, NET::Properties, NET::Properties2)>(&KWindowSystem::windowChanged), this, &DesktopSwitch::onWindowChanged); } void DesktopSwitch::registerShortcuts() { // Register shortcuts to change desktop GlobalKeyShortcut::Action * gshortcut; QString path; QString description; for (int i = 0; i < 12; ++i) { path = QStringLiteral("/panel/%1/desktop_%2").arg(settings()->group()).arg(i + 1); description = tr("Switch to desktop %1").arg(i + 1); gshortcut = GlobalKeyShortcut::Client::instance()->addAction(QString(), path, description, this); if (nullptr != gshortcut) { m_keys << gshortcut; connect(gshortcut, &GlobalKeyShortcut::Action::registrationFinished, this, &DesktopSwitch::shortcutRegistered); connect(gshortcut, SIGNAL(activated()), m_pSignalMapper, SLOT(map())); m_pSignalMapper->setMapping(gshortcut, i); } } } void DesktopSwitch::shortcutRegistered() { GlobalKeyShortcut::Action * const shortcut = qobject_cast<GlobalKeyShortcut::Action*>(sender()); disconnect(shortcut, &GlobalKeyShortcut::Action::registrationFinished, this, &DesktopSwitch::shortcutRegistered); const int i = m_keys.indexOf(shortcut); Q_ASSERT(-1 != i); if (shortcut->shortcut().isEmpty()) { shortcut->changeShortcut(DEFAULT_SHORTCUT_TEMPLATE.arg(i + 1)); } } void DesktopSwitch::onWindowChanged(WId id, NET::Properties properties, NET::Properties2 /*properties2*/) { if (properties.testFlag(NET::WMState) && isWindowHighlightable(id)) { KWindowInfo info = KWindowInfo(id, NET::WMDesktop | NET::WMState); int desktop = info.desktop(); if (!info.valid() || info.onAllDesktops()) return; else { DesktopSwitchButton *button = static_cast<DesktopSwitchButton *>(m_buttons->button(desktop - 1)); if(button) button->setUrgencyHint(id, info.hasState(NET::DemandsAttention)); } } } void DesktopSwitch::refresh() { const QList<QAbstractButton*> btns = m_buttons->buttons(); int i = 0; const int current_desktop = KWindowSystem::currentDesktop(); const int current_cnt = btns.count(); const int border = qMin(btns.count(), m_desktopCount); //update existing buttons for ( ; i < border; ++i) { DesktopSwitchButton * button = qobject_cast<DesktopSwitchButton*>(btns[i]); button->update(i, mLabelType, KWindowSystem::desktopName(i + 1).isEmpty() ? tr("Desktop %1").arg(i + 1) : KWindowSystem::desktopName(i + 1)); button->setVisible(!mShowOnlyActive || i + 1 == current_desktop); } //create new buttons (if neccessary) QAbstractButton *b; for ( ; i < m_desktopCount; ++i) { b = new DesktopSwitchButton(&mWidget, i, mLabelType, KWindowSystem::desktopName(i+1).isEmpty() ? tr("Desktop %1").arg(i+1) : KWindowSystem::desktopName(i+1)); mWidget.layout()->addWidget(b); m_buttons->addButton(b, i); b->setVisible(!mShowOnlyActive || i + 1 == current_desktop); } //delete unneeded buttons (if neccessary) for ( ; i < current_cnt; ++i) { b = m_buttons->buttons().constLast(); m_buttons->removeButton(b); mWidget.layout()->removeWidget(b); delete b; } } bool DesktopSwitch::isWindowHighlightable(WId window) { // this method was borrowed from the taskbar plugin QFlags<NET::WindowTypeMask> ignoreList; ignoreList |= NET::DesktopMask; ignoreList |= NET::DockMask; ignoreList |= NET::SplashMask; ignoreList |= NET::ToolbarMask; ignoreList |= NET::MenuMask; ignoreList |= NET::PopupMenuMask; ignoreList |= NET::NotificationMask; KWindowInfo info(window, NET::WMWindowType | NET::WMState, NET::WM2TransientFor); if (!info.valid()) return false; if (NET::typeMatchesMask(info.windowType(NET::AllTypesMask), ignoreList)) return false; if (info.state() & NET::SkipTaskbar) return false; // WM_TRANSIENT_FOR hint not set - normal window WId transFor = info.transientFor(); if (transFor == 0 || transFor == window || transFor == (WId) QX11Info::appRootWindow()) return true; info = KWindowInfo(transFor, NET::WMWindowType); QFlags<NET::WindowTypeMask> normalFlag; normalFlag |= NET::NormalMask; normalFlag |= NET::DialogMask; normalFlag |= NET::UtilityMask; return !NET::typeMatchesMask(info.windowType(NET::AllTypesMask), normalFlag); } DesktopSwitch::~DesktopSwitch() = default; void DesktopSwitch::setDesktop(int desktop) { KWindowSystem::setCurrentDesktop(desktop + 1); } void DesktopSwitch::onNumberOfDesktopsChanged(int count) { qDebug() << "Desktop count changed from" << m_desktopCount << "to" << count; m_desktopCount = count; refresh(); } void DesktopSwitch::onCurrentDesktopChanged(int current) { if (mShowOnlyActive) { int i = 1; const auto buttons = m_buttons->buttons(); for (const auto button : buttons) { if (current == i) { button->setChecked(true); button->setVisible(true); } else { button->setVisible(false); } ++i; } } else { QAbstractButton *button = m_buttons->button(current - 1); if (button) button->setChecked(true); } } void DesktopSwitch::onDesktopNamesChanged() { refresh(); } void DesktopSwitch::settingsChanged() { const int rows = settings()->value(QStringLiteral("rows"), 1).toInt(); const bool show_only_active = settings()->value(QStringLiteral("showOnlyActive"), false).toBool(); const int label_type = settings()->value(QStringLiteral("labelType"), DesktopSwitchButton::LABEL_TYPE_NUMBER).toInt(); const bool need_realign = mRows != rows || show_only_active != mShowOnlyActive; const bool need_refresh = mLabelType != static_cast<DesktopSwitchButton::LabelType>(label_type) || show_only_active != mShowOnlyActive; mRows = rows; mShowOnlyActive = show_only_active; mLabelType = static_cast<DesktopSwitchButton::LabelType>(label_type); if (need_realign) { // WARNING: Changing the desktop layout may call "LXQtPanel::realign", which calls // "DesktopSwitch::realign()". Therefore, the desktop layout should not be changed // inside the latter method. int columns = static_cast<int>(ceil(static_cast<float>(m_desktopCount) / mRows)); if (panel()->isHorizontal()) { mDesktops->setDesktopLayout(NET::OrientationHorizontal, columns, mRows, mWidget.isRightToLeft() ? NET::DesktopLayoutCornerTopRight : NET::DesktopLayoutCornerTopLeft); } else { mDesktops->setDesktopLayout(NET::OrientationHorizontal, mRows, columns, mWidget.isRightToLeft() ? NET::DesktopLayoutCornerTopRight : NET::DesktopLayoutCornerTopLeft); } realign(); // in case it isn't called when the desktop layout changes } if (need_refresh) refresh(); } void DesktopSwitch::realign() { mLayout->setEnabled(false); if (panel()->isHorizontal()) { mLayout->setRowCount(mShowOnlyActive ? 1 : mRows); mLayout->setColumnCount(0); } else { mLayout->setColumnCount(mShowOnlyActive ? 1 : mRows); mLayout->setRowCount(0); } mLayout->setEnabled(true); } QDialog *DesktopSwitch::configureDialog() { return new DesktopSwitchConfiguration(settings()); } DesktopSwitchWidget::DesktopSwitchWidget(): QFrame(), m_mouseWheelThresholdCounter(0) { } void DesktopSwitchWidget::wheelEvent(QWheelEvent *e) { // Without some sort of threshold which has to be passed, scrolling is too sensitive m_mouseWheelThresholdCounter -= e->delta(); // If the user hasn't scrolled far enough in one direction (positive or negative): do nothing if(abs(m_mouseWheelThresholdCounter) < 100) return; int max = KWindowSystem::numberOfDesktops(); int delta = e->delta() < 0 ? 1 : -1; int current = KWindowSystem::currentDesktop() + delta; if (current > max){ current = 1; } else if (current < 1) current = max; m_mouseWheelThresholdCounter = 0; KWindowSystem::setCurrentDesktop(current); }
1
6,816
Would you please update this by using `#if (QT_VERSION >= QT_VERSION_CHECK(5,15,0))` and `QSignalMapper::mappedInt` for Qt >= 5.15?
lxqt-lxqt-panel
cpp
@@ -262,6 +262,8 @@ func PopulateInstallRoot(installChroot *safechroot.Chroot, packagesToInstall []s filesystemPkg = "filesystem" ) + defer stopGPGAgent(installChroot) + ReportAction("Initializing RPM Database") installRoot := filepath.Join(rootMountPoint, installChroot.RootDir())
1
// Copyright (c) Microsoft Corporation. // Licensed under the MIT License. package installutils import ( "crypto/rand" "fmt" "os" "path" "path/filepath" "sort" "strconv" "strings" "syscall" "time" "microsoft.com/pkggen/imagegen/configuration" "microsoft.com/pkggen/imagegen/diskutils" "microsoft.com/pkggen/internal/file" "microsoft.com/pkggen/internal/jsonutils" "microsoft.com/pkggen/internal/logger" "microsoft.com/pkggen/internal/pkgjson" "microsoft.com/pkggen/internal/retry" "microsoft.com/pkggen/internal/safechroot" "microsoft.com/pkggen/internal/shell" ) const ( rootMountPoint = "/" rootUser = "root" // /boot directory should be only accesible by root. The directories need the execute bit as well. bootDirectoryFileMode = 0600 bootDirectoryDirMode = 0700 ) // PackageList represents the list of packages to install into an image type PackageList struct { Packages []string `json:"packages"` } // CreateMountPointPartitionMap creates a map between the mountpoint supplied in the config file and the device path // of the partition // - partDevPathMap is a map of partition IDs to partition device paths // - partIDToFsTypeMap is a map of partition IDs to filesystem type // - config is the SystemConfig from a config file // Output // - mountPointDevPathMap is a map of mountpoint to partition device path // - mountPointToFsTypeMap is a map of mountpoint to filesystem type // - mountPointToMountArgsMap is a map of mountpoint to mount arguments to be passed on a call to mount func CreateMountPointPartitionMap(partDevPathMap, partIDToFsTypeMap map[string]string, config configuration.SystemConfig) (mountPointDevPathMap, mountPointToFsTypeMap, mountPointToMountArgsMap map[string]string) { mountPointDevPathMap = make(map[string]string) mountPointToFsTypeMap = make(map[string]string) mountPointToMountArgsMap = make(map[string]string) // Go through each PartitionSetting for _, partitionSetting := range config.PartitionSettings { logger.Log.Tracef("%v[%v]", partitionSetting.ID, partitionSetting.MountPoint) partDevPath, ok := partDevPathMap[partitionSetting.ID] if ok { mountPointDevPathMap[partitionSetting.MountPoint] = partDevPath mountPointToFsTypeMap[partitionSetting.MountPoint] = partIDToFsTypeMap[partitionSetting.ID] mountPointToMountArgsMap[partitionSetting.MountPoint] = partitionSetting.MountOptions } logger.Log.Tracef("%v", mountPointDevPathMap) } return } // CreateInstallRoot walks through the map of mountpoints and mounts the partitions into installroot // - installRoot is the destination path to mount these partitions // - mountPointMap is the map of mountpoint to partition device path func CreateInstallRoot(installRoot string, mountPointMap, mountPointToMountArgsMap map[string]string) (installMap map[string]string, err error) { installMap = make(map[string]string) // Always mount root first err = mountSingleMountPoint(installRoot, rootMountPoint, mountPointMap[rootMountPoint], mountPointToMountArgsMap[rootMountPoint]) if err != nil { return } installMap[rootMountPoint] = mountPointMap[rootMountPoint] // Mount rest of the mountpoints for mountPoint, device := range mountPointMap { if mountPoint != "" && mountPoint != rootMountPoint { err = mountSingleMountPoint(installRoot, mountPoint, device, mountPointToMountArgsMap[mountPoint]) if err != nil { return } installMap[mountPoint] = device } } return } // DestroyInstallRoot unmounts each of the installroot mountpoints in order, ensuring that the root mountpoint is last // - installRoot is the path to the root where the mountpoints exist // - installMap is the map of mountpoints to partition device paths func DestroyInstallRoot(installRoot string, installMap map[string]string) (err error) { logger.Log.Trace("Destroying InstallRoot") // Convert the installMap into a slice of mount points so it can be sorted var allMountsToUnmount []string for mountPoint := range installMap { // Skip empty mount points if mountPoint == "" { continue } allMountsToUnmount = append(allMountsToUnmount, mountPoint) } // Sort the mount points // This way nested mounts will be handled correctly: // e.g.: /dev/pts is unmounted and then /dev is. sort.Sort(sort.Reverse(sort.StringSlice(allMountsToUnmount))) for _, mountPoint := range allMountsToUnmount { err = unmountSingleMountPoint(installRoot, mountPoint) if err != nil { return } } return } func mountSingleMountPoint(installRoot, mountPoint, device, extraOptions string) (err error) { mountPath := filepath.Join(installRoot, mountPoint) err = os.MkdirAll(mountPath, os.ModePerm) if err != nil { logger.Log.Warnf("Failed to create mountpoint: %v", err) return } err = mount(mountPath, device, extraOptions) return } func unmountSingleMountPoint(installRoot, mountPoint string) (err error) { mountPath := filepath.Join(installRoot, mountPoint) err = umount(mountPath) return } func mount(path, device, extraOptions string) (err error) { const squashErrors = false if extraOptions == "" { err = shell.ExecuteLive(squashErrors, "mount", device, path) } else { err = shell.ExecuteLive(squashErrors, "mount", "-o", extraOptions, device, path) } if err != nil { return } return } func umount(path string) (err error) { const ( retryAttempts = 3 retryDuration = time.Second unmountFlags = 0 ) err = retry.Run(func() error { return syscall.Unmount(path, unmountFlags) }, retryAttempts, retryDuration) return } // PackageNamesFromSingleSystemConfig goes through the packageslist field in the systemconfig and extracts the list of packages // from each of the packagelists // - systemConfig is the systemconfig field from the config file // Since kernel is not part of the packagelist, it is added separately from KernelOptions. func PackageNamesFromSingleSystemConfig(systemConfig configuration.SystemConfig) (finalPkgList []string, err error) { var packages PackageList for _, packageList := range systemConfig.PackageLists { // Read json logger.Log.Tracef("Processing packages from packagelist %v", packageList) packages, err = getPackagesFromJSON(packageList) if err != nil { return } logger.Log.Tracef("packages %v", packages) finalPkgList = append(finalPkgList, packages.Packages...) } logger.Log.Tracef("finalPkgList = %v", finalPkgList) return } // SelectKernelPackage selects the kernel to use for the current installation // based on the KernelOptions field of the system configuration. func SelectKernelPackage(systemConfig configuration.SystemConfig, isLiveInstall bool) (kernelPkg string, err error) { const ( defaultOption = "default" hypervOption = "hyperv" ) optionToUse := defaultOption // Only consider Hyper-V for an ISO if isLiveInstall { // Only check if running on Hyper V if there's a kernel option for it _, found := systemConfig.KernelOptions[hypervOption] if found { isHyperV, err := isRunningInHyperV() if err != nil { logger.Log.Warnf("Unable to detect if the current system is Hyper-V, using the default kernel") } else if isHyperV { optionToUse = hypervOption } } } kernelPkg = systemConfig.KernelOptions[optionToUse] if kernelPkg == "" { err = fmt.Errorf("no kernel for option (%s) set", optionToUse) return } return } // PackageNamesFromConfig takes the union of top level package names for every system configuration in a top level // config file. // - config is the config file to proccess func PackageNamesFromConfig(config configuration.Config) (packageList []*pkgjson.PackageVer, err error) { // For each system config, clone all packages that go into it for _, systemCfg := range config.SystemConfigs { var packagesToInstall []string // Get list of packages to install into image packagesToInstall, err = PackageNamesFromSingleSystemConfig(systemCfg) if err != nil { return } packages := make([]*pkgjson.PackageVer, 0, len(packagesToInstall)) for _, pkg := range packagesToInstall { packages = append(packages, &pkgjson.PackageVer{ Name: pkg, }) } packageList = append(packageList, packages...) } return } // PopulateInstallRoot fills the installroot with packages and configures the image for boot // - installChroot is a pointer to the install Chroot object // - packagesToInstall is a slice of packages to install // - config is the systemconfig field from the config file // - installMap is a map of mountpoints to physical device paths // - mountPointToFsTypeMap is a map of mountpoints to filesystem type // - mountPointToMountArgsMap is a map of mountpoints to mount options // - isRootFS specifies if the installroot is either backed by a directory (rootfs) or a raw disk // - encryptedRoot stores information about the encrypted root device if root encryption is enabled func PopulateInstallRoot(installChroot *safechroot.Chroot, packagesToInstall []string, config configuration.SystemConfig, installMap, mountPointToFsTypeMap, mountPointToMountArgsMap map[string]string, isRootFS bool, encryptedRoot diskutils.EncryptedRootDevice) (err error) { const ( filesystemPkg = "filesystem" ) ReportAction("Initializing RPM Database") installRoot := filepath.Join(rootMountPoint, installChroot.RootDir()) // Initialize RPM Database so we can install RPMs into the installroot err = initializeRpmDatabase(installRoot) if err != nil { return } // Calculate how many packages need to be installed so an accurate percent complete can be reported totalPackages, err := calculateTotalPackages(packagesToInstall, installRoot) if err != nil { return } // Keep a running total of how many packages have be installed through all the `tdnfInstall` invocations packagesInstalled := 0 // Install filesystem package first packagesInstalled, err = tdnfInstall(filesystemPkg, installRoot, packagesInstalled, totalPackages) if err != nil { return } hostname := config.Hostname if !isRootFS { // Add /etc/hostname err = updateHostname(installChroot.RootDir(), hostname) if err != nil { return } } // Install packages one-by-one to avoid exhausting memory // on low resource systems for _, pkg := range packagesToInstall { packagesInstalled, err = tdnfInstall(pkg, installRoot, packagesInstalled, totalPackages) if err != nil { return } } // Copy additional files err = copyAdditionalFiles(installChroot, config) if err != nil { return } if !isRootFS { // Configure system files err = configureSystemFiles(installChroot, hostname, installMap, mountPointToFsTypeMap, mountPointToMountArgsMap, encryptedRoot) if err != nil { return } // Add groups err = addGroups(installChroot, config.Groups) if err != nil { return } } // Add users err = addUsers(installChroot, config.Users) if err != nil { return } // Add machine-id err = addMachineID(installChroot) if err != nil { return } // Configure for encryption if config.Encryption.Enable { err = updateInitramfsForEncrypt(installChroot) if err != nil { return } } // Run post-install scripts from within the installroot chroot err = runPostInstallScripts(installChroot, config) return } func initializeRpmDatabase(installRoot string) (err error) { stdout, stderr, err := shell.Execute("rpm", "--root", installRoot, "--initdb") if err != nil { logger.Log.Warnf("Failed to create rpm database: %v", err) logger.Log.Warn(stdout) logger.Log.Warn(stderr) return } err = initializeTdnfConfiguration(installRoot) return } // initializeTdnfConfiguration installs the 'mariner-release' package // into the clean RPM root. The package is used by tdnf to properly set // the default values for its variables and internal configuration. func initializeTdnfConfiguration(installRoot string) (err error) { const ( squashErrors = false releasePackage = "mariner-release" ) logger.Log.Debugf("Downloading '%s' package to a clean RPM root under '%s'.", releasePackage, installRoot) err = shell.ExecuteLive(squashErrors, "tdnf", "download", "--alldeps", "--destdir", installRoot, releasePackage) if err != nil { logger.Log.Errorf("Failed to prepare the RPM database on downloading the 'mariner-release' package: %v", err) return } rpmSearch := filepath.Join(installRoot, "*.rpm") rpmFiles, err := filepath.Glob(rpmSearch) if err != nil { logger.Log.Errorf("Failed to prepare the RPM database while searching for RPM files: %v", err) return } defer func() { logger.Log.Tracef("Cleaning up leftover RPM files after installing 'mariner-release' package under '%s'.", installRoot) for _, file := range rpmFiles { err = os.Remove(file) if err != nil { logger.Log.Errorf("Failed to prepare the RPM database on removing leftover file (%s): %v", file, err) return } } }() logger.Log.Debugf("Installing 'mariner-release' package to a clean RPM root under '%s'.", installRoot) rpmArgs := []string{"-i", "--root", installRoot} rpmArgs = append(rpmArgs, rpmFiles...) err = shell.ExecuteLive(squashErrors, "rpm", rpmArgs...) if err != nil { logger.Log.Errorf("Failed to prepare the RPM database on installing the 'mariner-release' package: %v", err) return } return } func configureSystemFiles(installChroot *safechroot.Chroot, hostname string, installMap, mountPointToFsTypeMap, mountPointToMountArgsMap map[string]string, encryptedRoot diskutils.EncryptedRootDevice) (err error) { // Update hosts file err = updateHosts(installChroot.RootDir(), hostname) if err != nil { return } // Update fstab err = updateFstab(installChroot.RootDir(), installMap, mountPointToFsTypeMap, mountPointToMountArgsMap) if err != nil { return } // Update crypttab err = updateCrypttab(installChroot.RootDir(), installMap, encryptedRoot) if err != nil { return } return } func calculateTotalPackages(packages []string, installRoot string) (totalPackages int, err error) { allPackageNames := make(map[string]bool) const tdnfAssumeNoStdErr = "Error(1032) : Operation aborted.\n" // For every package calculate what dependencies would also be installed from it. for _, pkg := range packages { var ( stdout string stderr string ) // Issue an install request but stop right before actually performing the install (assumeno) stdout, stderr, err = shell.Execute("tdnf", "install", "--assumeno", "--nogpgcheck", pkg, "--installroot", installRoot) if err != nil { // tdnf aborts the process when it detects an install with --assumeno. if stderr == tdnfAssumeNoStdErr { err = nil } else { logger.Log.Error(stderr) return } } splitStdout := strings.Split(stdout, "\n") // Search for the list of packages to be installed, // it will be prefixed with a line "Installing:" and will // end with an empty line. inPackageList := false for _, line := range splitStdout { const ( packageListPrefix = "Installing:" packageNameDelimiter = " " ) const ( packageNameIndex = iota extraInformationIndex = iota totalPackageNameParts = iota ) if !inPackageList { inPackageList = strings.HasPrefix(line, packageListPrefix) continue } else if strings.TrimSpace(line) == "" { break } // Each package to be installed will list its name, followed by a space and then various extra information pkgSplit := strings.SplitN(line, packageNameDelimiter, totalPackageNameParts) if len(pkgSplit) != totalPackageNameParts { err = fmt.Errorf("unexpected TDNF package name output: %s", line) return } allPackageNames[pkgSplit[packageNameIndex]] = true } } totalPackages = len(allPackageNames) logger.Log.Debugf("All packages to be installed (%d): %v", totalPackages, allPackageNames) return } func addMachineID(installChroot *safechroot.Chroot) (err error) { const ( squashErrors = false setupProgram = "/bin/systemd-machine-id-setup" ) // Check if systemd-machine-id-setup is present before invoking it, // some images will not use systemd (such as a container) exists, _ := file.PathExists(filepath.Join(installChroot.RootDir(), setupProgram)) if !exists { logger.Log.Debugf("'%s' not found inside chroot '%s', skipping adding machine ID", setupProgram, installChroot.RootDir()) return } ReportAction("Configuring machine id") err = installChroot.UnsafeRun(func() error { return shell.ExecuteLive(squashErrors, setupProgram) }) return } func updateInitramfsForEncrypt(installChroot *safechroot.Chroot) (err error) { err = installChroot.UnsafeRun(func() (err error) { const ( libModDir = "/lib/modules" dracutModules = "dm crypt crypt-gpg crypt-loop lvm" initrdPrefix = "/boot/initrd.img-" cryptTabPath = "/etc/crypttab" ) initrdPattern := fmt.Sprintf("%v%v", initrdPrefix, "*") initrdImageSlice, err := filepath.Glob(initrdPattern) if err != nil { logger.Log.Warnf("Unable to get initrd image: %v", err) return } // Assume only one initrd image present if len(initrdImageSlice) != 1 { logger.Log.Warn("Unable to find one initrd image") logger.Log.Warnf("Initrd images found: %v", initrdImageSlice) err = fmt.Errorf("unable to find one intird image: %v", initrdImageSlice) return } initrdImage := initrdImageSlice[0] // Get the kernel version kernel := strings.TrimPrefix(initrdImage, initrdPrefix) // Construct list of files to install in initramfs installFiles := fmt.Sprintf("%v %v", cryptTabPath, diskutils.DefaultKeyFilePath) // Regenerate initramfs via Dracut dracutArgs := []string{ "-f", "--no-hostonly", "--fstab", "--kmoddir", filepath.Join(libModDir, kernel), "--add", dracutModules, "-I", installFiles, initrdImage, kernel, } _, stderr, err := shell.Execute("dracut", dracutArgs...) if err != nil { logger.Log.Warnf("Unable to execute dracut: %v", stderr) return } return }) return } func updateFstab(installRoot string, installMap, mountPointToFsTypeMap, mountPointToMountArgsMap map[string]string) (err error) { ReportAction("Configuring fstab") for mountPoint, devicePath := range installMap { if mountPoint != "" { err = addEntryToFstab(installRoot, mountPoint, devicePath, mountPointToFsTypeMap[mountPoint], mountPointToMountArgsMap[mountPoint]) if err != nil { return } } } return } func addEntryToFstab(installRoot, mountPoint, devicePath, fsType, mountArgs string) (err error) { const ( uuidPrefix = "UUID=" fstabPath = "/etc/fstab" rootfsMountPoint = "/" defaultOptions = "defaults" defaultDump = "0" disablePass = "0" rootPass = "1" defaultPass = "2" ) var options string if mountArgs == "" { options = defaultOptions } else { options = mountArgs } fullFstabPath := filepath.Join(installRoot, fstabPath) // Get the block device var device string if diskutils.IsEncryptedDevice(devicePath) { device = devicePath } else { uuid, err := GetUUID(devicePath) if err != nil { logger.Log.Warnf("Failed to get UUID for block device %v", devicePath) return err } device = fmt.Sprintf("%v%v", uuidPrefix, uuid) } // Note: Rootfs should always have a pass number of 1. All other mountpoints are either 0 or 2 pass := defaultPass if mountPoint == rootfsMountPoint { pass = rootPass } // Construct fstab entry and append to fstab file newEntry := fmt.Sprintf("%v %v %v %v %v %v\n", device, mountPoint, fsType, options, defaultDump, pass) err = file.Append(newEntry, fullFstabPath) if err != nil { logger.Log.Warnf("Failed to append to fstab file") return } return } func updateCrypttab(installRoot string, installMap map[string]string, encryptedRoot diskutils.EncryptedRootDevice) (err error) { ReportAction("Configuring Crypttab") for _, devicePath := range installMap { if diskutils.IsEncryptedDevice(devicePath) { err = addEntryToCrypttab(installRoot, devicePath, encryptedRoot) if err != nil { return } } } return } // Add an encryption mapping to crypttab func addEntryToCrypttab(installRoot string, devicePath string, encryptedRoot diskutils.EncryptedRootDevice) (err error) { const ( cryptTabPath = "/etc/crypttab" Options = "luks,discard" uuidPrefix = "UUID=" ) fullCryptTabPath := filepath.Join(installRoot, cryptTabPath) uuid := encryptedRoot.LuksUUID blockDevice := diskutils.GetLuksMappingName(uuid) encryptedUUID := fmt.Sprintf("%v%v", uuidPrefix, uuid) encryptionPassword := diskutils.DefaultKeyFilePath // Construct crypttab entry and append crypttab file newEntry := fmt.Sprintf("%v %v %v %v\n", blockDevice, encryptedUUID, encryptionPassword, Options) err = file.Append(newEntry, fullCryptTabPath) if err != nil { logger.Log.Warnf("Failed to append crypttab") return } return } // InstallGrubCfg installs the main grub config to the boot partition // - installRoot is the base install directory // - rootDevice holds the root partition // - bootUUID is the UUID for the boot partition // - encryptedRoot holds the encrypted root information if encrypted root is enabled // Note: this boot partition could be different than the boot partition specified in the bootloader. // This boot partition specifically indicates where to find the kernel, config files, and initrd func InstallGrubCfg(installRoot, rootDevice, bootUUID string, encryptedRoot diskutils.EncryptedRootDevice) (err error) { const ( assetGrubcfgFile = "/installer/grub2/grub.cfg" grubCfgFile = "boot/grub2/grub.cfg" ) // Copy the bootloader's grub.cfg and set the file permission installGrubCfgFile := filepath.Join(installRoot, grubCfgFile) err = file.CopyAndChangeMode(assetGrubcfgFile, installGrubCfgFile, bootDirectoryDirMode, bootDirectoryFileMode) if err != nil { return } // Add in bootUUID err = setGrubCfgBootUUID(bootUUID, installGrubCfgFile) if err != nil { logger.Log.Warnf("Failed to set bootUUID in grub.cfg: %v", err) return } // Add in rootDevice err = setGrubCfgRootDevice(rootDevice, installGrubCfgFile, encryptedRoot.LuksUUID) if err != nil { logger.Log.Warnf("Failed to set rootDevice in grub.cfg: %v", err) return } // Add in rootLuksUUID err = setGrubCfgLuksUUID(installGrubCfgFile, encryptedRoot.LuksUUID) if err != nil { logger.Log.Warnf("Failed to set luksUUID in grub.cfg: %v", err) return } // Add in logical volumes to active err = setGrubCfgLVM(installGrubCfgFile, encryptedRoot.LuksUUID) if err != nil { logger.Log.Warnf("Failed to set lvm.lv in grub.cfg: %v", err) return } return } func updateHostname(installRoot, hostname string) (err error) { ReportAction("Configuring hostname") // Update the environment variables to use the new hostname. env := append(shell.CurrentEnvironment(), fmt.Sprintf("HOSTNAME=%s", hostname)) shell.SetEnvironment(env) hostnameFilePath := filepath.Join(installRoot, "etc/hostname") err = file.Write(hostname, hostnameFilePath) if err != nil { logger.Log.Warnf("Failed to write hostname") return } return } func updateHosts(installRoot, hostname string) (err error) { const ( lineNumber = "6" hostsFile = "etc/hosts" ) ReportAction("Configuring hosts file") newHost := fmt.Sprintf("127.0.0.1 %v", hostname) hostsFilePath := filepath.Join(installRoot, hostsFile) err = sedInsert(lineNumber, newHost, hostsFilePath) if err != nil { logger.Log.Warnf("Failed to write hosts file") return } return } func addGroups(installChroot *safechroot.Chroot, groups []configuration.Group) (err error) { const squashErrors = false for _, group := range groups { logger.Log.Infof("Adding group (%s)", group.Name) ReportActionf("Adding group: %s", group.Name) var args = []string{group.Name} if group.GID != "" { args = append(args, "-g", group.GID) } err = installChroot.UnsafeRun(func() error { return shell.ExecuteLive(squashErrors, "groupadd", args...) }) } return } func addUsers(installChroot *safechroot.Chroot, users []configuration.User) (err error) { for _, user := range users { logger.Log.Infof("Adding user (%s)", user.Name) ReportActionf("Adding user: %s", user.Name) var homeDir string homeDir, err = createUserWithPassword(installChroot, user) if err != nil { return } err = configureUserGroupMembership(installChroot, user) if err != nil { return } err = provisionUserSSHCerts(installChroot, user, homeDir) if err != nil { return } err = configureUserStartupCommand(installChroot, user) if err != nil { return } } return } func createUserWithPassword(installChroot *safechroot.Chroot, user configuration.User) (homeDir string, err error) { const ( squashErrors = false rootHomeDir = "/root" userHomeDirPrefix = "/home" passwordExpiresBase = 10 postfixLength = 12 alphaNumeric = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" ) var ( hashedPassword string stdout string stderr string salt string ) // Get the hashed password for the user if user.PasswordHashed { hashedPassword = user.Password } else { salt, err = randomString(postfixLength, alphaNumeric) if err != nil { return } // Generate hashed password based on salt value provided. // -6 option indicates to use the SHA256/SHA512 algorithm stdout, stderr, err = shell.Execute("openssl", "passwd", "-6", "-salt", salt, user.Password) if err != nil { logger.Log.Warnf("Failed to generate hashed password") logger.Log.Warn(stderr) return } hashedPassword = strings.TrimSpace(stdout) } logger.Log.Tracef("hashed password: %v", hashedPassword) if strings.TrimSpace(hashedPassword) == "" { err = fmt.Errorf("empty password for user (%s) is not allowed", user.Name) return } // Create the user with the given hashed password if user.Name == rootUser { homeDir = rootHomeDir if user.UID != "" { logger.Log.Warnf("Ignoring UID for (%s) user, using default", rootUser) } // Update shadow file err = updateUserPassword(installChroot.RootDir(), user.Name, hashedPassword) } else { homeDir = filepath.Join(userHomeDirPrefix, user.Name) var args = []string{user.Name, "-m", "-p", hashedPassword} if user.UID != "" { args = append(args, "-u", user.UID) } err = installChroot.UnsafeRun(func() error { return shell.ExecuteLive(squashErrors, "useradd", args...) }) } if err != nil { return } // Update password expiration if user.PasswordExpiresDays != 0 { err = installChroot.UnsafeRun(func() error { return shell.ExecuteLive(squashErrors, "chage", "-M", strconv.FormatUint(user.PasswordExpiresDays, passwordExpiresBase), user.Name) }) } return } func configureUserGroupMembership(installChroot *safechroot.Chroot, user configuration.User) (err error) { const squashErrors = false // Update primary group if user.PrimaryGroup != "" { err = installChroot.UnsafeRun(func() error { return shell.ExecuteLive(squashErrors, "usermod", "-g", user.PrimaryGroup, user.Name) }) if err != nil { return } } // Update secondary groups if len(user.SecondaryGroups) != 0 { allGroups := strings.Join(user.SecondaryGroups, ",") err = installChroot.UnsafeRun(func() error { return shell.ExecuteLive(squashErrors, "usermod", "-a", "-G", allGroups, user.Name) }) if err != nil { return } } return } func configureUserStartupCommand(installChroot *safechroot.Chroot, user configuration.User) (err error) { const ( passwdFilePath = "etc/passwd" sedDelimiter = "|" ) if user.StartupCommand == "" { return } logger.Log.Debugf("Updating user '%s' startup command to '%s'.", user.Name, user.StartupCommand) findPattern := fmt.Sprintf(`^\(%s.*\):[^:]*$`, user.Name) replacePattern := fmt.Sprintf(`\1:%s`, user.StartupCommand) filePath := filepath.Join(installChroot.RootDir(), passwdFilePath) err = sed(findPattern, replacePattern, sedDelimiter, filePath) if err != nil { logger.Log.Errorf("Failed to update user's startup command.") return } return } func provisionUserSSHCerts(installChroot *safechroot.Chroot, user configuration.User, homeDir string) (err error) { const squashErrors = false userSSHKeyDir := filepath.Join(homeDir, ".ssh") for _, pubKey := range user.SSHPubKeyPaths { logger.Log.Infof("Adding ssh key (%s) to user (%s)", filepath.Base(pubKey), user.Name) relativeDst := filepath.Join(userSSHKeyDir, filepath.Base(pubKey)) fileToCopy := safechroot.FileToCopy{ Src: pubKey, Dest: relativeDst, } err = installChroot.AddFiles(fileToCopy) if err != nil { return } } if len(user.SSHPubKeyPaths) != 0 { const sshDirectoryPermission = "0700" // Change ownership of the folder to belong to the user and their primary group err = installChroot.UnsafeRun(func() (err error) { // Find the primary group of the user stdout, stderr, err := shell.Execute("id", "-g", user.Name) if err != nil { logger.Log.Warnf(stderr) return } primaryGroup := strings.TrimSpace(stdout) logger.Log.Debugf("Primary group for user (%s) is (%s)", user.Name, primaryGroup) ownership := fmt.Sprintf("%s:%s", user.Name, primaryGroup) err = shell.ExecuteLive(squashErrors, "chown", "-R", ownership, userSSHKeyDir) if err != nil { return } err = shell.ExecuteLive(squashErrors, "chmod", "-R", sshDirectoryPermission, userSSHKeyDir) return }) if err != nil { return } } return } func updateUserPassword(installRoot, username, password string) (err error) { const ( shadowFilePath = "etc/shadow" sedDelimiter = "|" ) findPattern := fmt.Sprintf("%v:x:", username) replacePattern := fmt.Sprintf("%v:%v:", username, password) filePath := filepath.Join(installRoot, shadowFilePath) err = sed(findPattern, replacePattern, sedDelimiter, filePath) if err != nil { logger.Log.Warnf("Failed to write hashed password to shadow file") return } return } func tdnfInstall(packageName, installRoot string, currentPackagesInstalled, totalPackages int) (packagesInstalled int, err error) { packagesInstalled = currentPackagesInstalled onStdout := func(args ...interface{}) { const tdnfInstallPrefix = "Installing/Updating: " // Only process lines that match tdnfInstallPrefix if len(args) == 0 { return } line := args[0].(string) if !strings.HasPrefix(line, tdnfInstallPrefix) { return } status := fmt.Sprintf("Installing: %s", strings.TrimPrefix(line, tdnfInstallPrefix)) ReportAction(status) packagesInstalled++ // Calculate and report what percentage of packages have been installed percentOfPackagesInstalled := float32(packagesInstalled) / float32(totalPackages) progress := int(percentOfPackagesInstalled * 100) ReportPercentComplete(progress) } err = shell.ExecuteLiveWithCallback(onStdout, logger.Log.Warn, "tdnf", "install", packageName, "--installroot", installRoot, "--nogpgcheck", "--assumeyes") if err != nil { logger.Log.Warnf("Failed to tdnf install: %v. Package name: %v", err, packageName) } return } func sed(find, replace, delimiter, file string) (err error) { const squashErrors = false replacement := fmt.Sprintf("s%s%s%s%s%s", delimiter, find, delimiter, replace, delimiter) return shell.ExecuteLive(squashErrors, "sed", "-i", replacement, file) } func sedInsert(line, replace, file string) (err error) { const squashErrors = false insertAtLine := fmt.Sprintf("%si%s", line, replace) return shell.ExecuteLive(squashErrors, "sed", "-i", insertAtLine, file) } func getPackagesFromJSON(file string) (pkgList PackageList, err error) { err = jsonutils.ReadJSONFile(file, &pkgList) if err != nil { logger.Log.Warnf("Could not read JSON file: %v", err) return } return } // InstallBootloader installs the proper bootloader for this type of image // - installChroot is a pointer to the install Chroot object // - bootType indicates the type of boot loader to add. // - bootUUID is the UUID of the boot partition // Note: this boot partition could be different than the boot partition specified in the main grub config. // This boot partition specifically indicates where to find the main grub cfg func InstallBootloader(installChroot *safechroot.Chroot, encryptEnabled bool, bootType, bootUUID, bootDevPath string) (err error) { const ( efiMountPoint = "/boot/efi" efiBootType = "efi" legacyBootType = "legacy" noneBootType = "none" ) ReportAction("Configuring bootloader") switch bootType { case legacyBootType: err = installLegacyBootloader(installChroot, bootDevPath) if err != nil { return } case efiBootType: efiPath := filepath.Join(installChroot.RootDir(), efiMountPoint) err = installEfiBootloader(encryptEnabled, efiPath, bootUUID) if err != nil { return } case noneBootType: // Nothing to do here default: err = fmt.Errorf("unknown boot type: %v", bootType) return } return } // Note: We assume that the /boot directory is present. Whether it is backed by an explicit "boot" partition or present // as part of a general "root" partition is assumed to have been done already. func installLegacyBootloader(installChroot *safechroot.Chroot, bootDevPath string) (err error) { const ( squashErrors = false ) // Since we do not have grub2-pc installed in the setup environment, we need to generate the legacy grub bootloader // inside of the install environment. This assumes the install environment has the grub2-pc package installed err = installChroot.UnsafeRun(func() (err error) { err = shell.ExecuteLive(squashErrors, "grub2-install", "--target=i386-pc", "--boot-directory=/boot", bootDevPath) err = shell.ExecuteLive(squashErrors, "chmod", "-R", "go-rwx", "/boot/grub2/") return }) return } // EnableCryptoDisk enables Grub to boot from an encrypted disk // - installChroot is the installation chroot func EnableCryptoDisk(installChroot *safechroot.Chroot) (err error) { const ( grubPath = "/etc/default/grub" grubCryptoDisk = "GRUB_ENABLE_CRYPTODISK=y\n" grubPreloadModules = `GRUB_PRELOAD_MODULES="lvm"` ) err = installChroot.UnsafeRun(func() error { err := file.Append(grubCryptoDisk, grubPath) if err != nil { logger.Log.Warnf("Failed to add grub cryptodisk: %v", err) return err } err = file.Append(grubPreloadModules, grubPath) if err != nil { logger.Log.Warnf("Failed to add grub preload modules: %v", err) return err } return err }) return } // GetUUID queries the UUID of the given partition // - device is the device path of the desired partition func GetUUID(device string) (stdout string, err error) { stdout, _, err = shell.Execute("blkid", device, "-s", "UUID", "-o", "value") if err != nil { return } logger.Log.Trace(stdout) stdout = strings.TrimSpace(stdout) return } // GetPartUUID queries the PARTUUID of the given partition // - device is the device path of the desired partition func GetPartUUID(device string) (stdout string, err error) { stdout, _, err = shell.Execute("blkid", device, "-s", "PARTUUID", "-o", "value") if err != nil { return } logger.Log.Trace(stdout) stdout = strings.TrimSpace(stdout) return } // installEfi copies the efi binaries and grub configuration to the appropriate // installRoot/boot/efi folder // It is expected that shim (bootx64.efi) and grub2 (grub2.efi) are installed // into the EFI directory via the package list installation mechanism. func installEfiBootloader(encryptEnabled bool, installRoot, bootUUID string) (err error) { const ( defaultCfgFilename = "grub.cfg" encryptCfgFilename = "grubEncrypt.cfg" efiAssetDir = "/installer/efi/x86_64" grubAssetDir = "/installer/efi/grub" efiFinalDir = "EFI/BOOT" grubFinalDir = "boot/grub2" ) // Copy the bootloader's grub.cfg grubAssetPath := filepath.Join(grubAssetDir, defaultCfgFilename) if encryptEnabled { grubAssetPath = filepath.Join(grubAssetDir, encryptCfgFilename) } grubFinalPath := filepath.Join(installRoot, grubFinalDir, defaultCfgFilename) err = file.CopyAndChangeMode(grubAssetPath, grubFinalPath, bootDirectoryDirMode, bootDirectoryFileMode) if err != nil { logger.Log.Warnf("Failed to copy grub.cfg: %v", err) return } // Add in bootUUID err = setGrubCfgBootUUID(bootUUID, grubFinalPath) if err != nil { logger.Log.Warnf("Failed to set bootUUID in grub.cfg: %v", err) return } // Add in encrypted volume if encryptEnabled { err = setGrubCfgEncryptedVolume(grubFinalPath) if err != nil { logger.Log.Warnf("Failed to set encrypted volume in grub.cfg: %v", err) return } } return } func copyAdditionalFiles(installChroot *safechroot.Chroot, config configuration.SystemConfig) (err error) { ReportAction("Copying additional files") for srcFile, dstFile := range config.AdditionalFiles { fileToCopy := safechroot.FileToCopy{ Src: srcFile, Dest: dstFile, } err = installChroot.AddFiles(fileToCopy) if err != nil { return } } return } func runPostInstallScripts(installChroot *safechroot.Chroot, config configuration.SystemConfig) (err error) { const squashErrors = false for _, script := range config.PostInstallScripts { // Copy the script from this chroot into the install chroot before running it scriptPath := script.Path fileToCopy := safechroot.FileToCopy{ Src: scriptPath, Dest: scriptPath, } installChroot.AddFiles(fileToCopy) if err != nil { return } ReportActionf("Running post-install script: %s", path.Base(script.Path)) logger.Log.Infof("Running post-install script: %s", script.Path) err = installChroot.UnsafeRun(func() error { err := shell.ExecuteLive(squashErrors, shell.ShellProgram, "-c", scriptPath, script.Args) if err != nil { return err } err = os.Remove(scriptPath) if err != nil { logger.Log.Errorf("Failed to cleanup post-install script (%s). Error: %s", scriptPath, err) } return err }) if err != nil { return } } return } func setGrubCfgLVM(grubPath, luksUUID string) (err error) { const ( lvmPrefix = "rd.lvm.lv=" lvmPattern = "{{.LVM}}" sedDelimiter = "@" ) var lvm string if luksUUID != "" { lvm = fmt.Sprintf("%v%v", lvmPrefix, diskutils.GetEncryptedRootVolPath()) } err = sed(lvmPattern, lvm, sedDelimiter, grubPath) if err != nil { logger.Log.Warnf("Failed to set grub.cfg's LVM setting: %v", err) } return } func setGrubCfgLuksUUID(grubPath, uuid string) (err error) { const ( luksUUIDPrefix = "luks.uuid=" luksUUIDPattern = "{{.LuksUUID}}" sedDelimiter = "/" ) var luksUUID string if uuid != "" { luksUUID = fmt.Sprintf("%v%v", luksUUIDPrefix, uuid) } err = sed(luksUUIDPattern, luksUUID, sedDelimiter, grubPath) if err != nil { logger.Log.Warnf("Failed to set grub.cfg's luksUUID: %v", err) return } return } func setGrubCfgBootUUID(bootUUID, grubPath string) (err error) { const ( bootUUIDPattern = "{{.BootUUID}}" sedDelimiter = "/" ) err = sed(bootUUIDPattern, bootUUID, sedDelimiter, grubPath) if err != nil { logger.Log.Warnf("Failed to set grub.cfg's bootUUID: %v", err) return } return } func setGrubCfgEncryptedVolume(grubPath string) (err error) { const ( encryptedVolPattern = "{{.EncryptedVolume}}" sedDelimiter = "@" lvmPrefix = "lvm/" ) encryptedVol := fmt.Sprintf("%v%v%v%v", "(", lvmPrefix, diskutils.GetEncryptedRootVol(), ")") err = sed(encryptedVolPattern, encryptedVol, sedDelimiter, grubPath) if err != nil { logger.Log.Warnf("Failed to grub.cfg's encryptedVolume: %v", err) return } return } func setGrubCfgRootDevice(rootDevice, grubPath, luksUUID string) (err error) { const ( rootDevicePattern = "{{.RootPartition}}" sedDelimiter = "@" ) if luksUUID != "" { rootDevice = diskutils.GetEncryptedRootVolMapping() } err = sed(rootDevicePattern, rootDevice, sedDelimiter, grubPath) if err != nil { logger.Log.Warnf("Failed to set grub.cfg's rootDevice: %v", err) return } return } // ExtractPartitionArtifacts scans through the SystemConfig and generates all the partition-based artifacts specified. // - workDirPath is the directory to place the artifacts // - partIDToDevPathMap is a map of partition IDs to partition device paths func ExtractPartitionArtifacts(workDirPath string, diskIndex int, disk configuration.Disk, partIDToDevPathMap map[string]string) (err error) { const ( ext4ArtifactType = "ext4" ) // Scan each partition for Artifacts for i, partition := range disk.Partitions { for _, artifact := range partition.Artifacts { if artifact.Type == ext4ArtifactType { devPath := partIDToDevPathMap[partition.ID] // Ext4 artifact type output is a .raw of the partition finalName := fmt.Sprintf("disk%d.partition%d.raw", diskIndex, i) err = createRawArtifact(workDirPath, devPath, finalName) if err != nil { return err } } } } return } func createRawArtifact(workDirPath, devPath, name string) (err error) { const ( defaultBlockSize = 1024 * 1024 // 1MB squashErrors = true ) fullPath := filepath.Join(workDirPath, name) ddArgs := []string{ fmt.Sprintf("if=%s", devPath), // Input file. fmt.Sprintf("of=%s", fullPath), // Output file. fmt.Sprintf("bs=%d", defaultBlockSize), // Size of one copied block. } return shell.ExecuteLive(squashErrors, "dd", ddArgs...) } // randomString generates a random string of the length specified // using the provided legalCharacters. crypto.rand is more secure // than math.rand and does not need to be seeded. func randomString(length int, legalCharacters string) (output string, err error) { b := make([]byte, length) _, err = rand.Read(b) if err != nil { return } count := byte(len(legalCharacters)) for i := range b { idx := b[i] % count b[i] = legalCharacters[idx] } output = string(b) return } // isRunningInHyperV checks if the program is running in a Hyper-V Virtual Machine. func isRunningInHyperV() (isHyperV bool, err error) { const ( dmesgHypervTag = "Hyper-V" ) stdout, stderr, err := shell.Execute("dmesg") if err != nil { logger.Log.Warnf("stderr: %v", stderr) return } logger.Log.Debugf("dmesg system: %s", stdout) // dmesg will print information about Hyper-V if it detects that Hyper-V is the hypervisor. // There will be multiple mentions of Hyper-V in the output (entry for BIOS as well as hypervisor) // and diagnostic information about hypervisor version. // Outside of Hyper-V, this name will not be reported. if strings.Contains(stdout, dmesgHypervTag) { logger.Log.Infof("Detected Hyper-V Host") isHyperV = true } return } //KernelPackages returns a list of kernel packages obtained from KernelOptions in the config's SystemConfigs func KernelPackages(config configuration.Config) []*pkgjson.PackageVer { var packageList []*pkgjson.PackageVer // Add all the provided kernels to the package list for _, cfg := range config.SystemConfigs { for name, kernelPath := range cfg.KernelOptions { // Ignore comments if name[0] == '_' { continue } kernelName := filepath.Base(kernelPath) logger.Log.Tracef("Processing kernel %s derived from %s (required for option %s)", kernelName, kernelPath, name) packageList = append(packageList, &pkgjson.PackageVer{Name: kernelName}) } } return packageList }
1
12,131
This type of change opens a question on how we can cleanly shutdown or prevent all running agents so we do not run the risk of an agent holding kernel mountpoints, which can cause unsafe unmount/chroot exit. Will open a github issue to track this. This specific change itself is fine. It is a pointed change specifically for stopping the offending agent.
microsoft-CBL-Mariner
go
@@ -3,6 +3,8 @@ package io.protostuff.runtime.model; +import com.google.protobuf.CodedOutputStream; + public final class ModelProtobuf { private ModelProtobuf() { }
1
// Generated by the protocol buffer compiler. DO NOT EDIT! // source: ModelProtobuf.proto package io.protostuff.runtime.model; public final class ModelProtobuf { private ModelProtobuf() { } public static void registerAllExtensions( com.google.protobuf.ExtensionRegistryLite registry) { } public static void registerAllExtensions( com.google.protobuf.ExtensionRegistry registry) { registerAllExtensions( (com.google.protobuf.ExtensionRegistryLite) registry); } public interface UserOrBuilder extends // @@protoc_insertion_point(interface_extends:io.protostuff.runtime.model.User) com.google.protobuf.MessageOrBuilder { /** * <code>string name = 1;</code> */ java.lang.String getName(); /** * <code>string name = 1;</code> */ com.google.protobuf.ByteString getNameBytes(); } /** * Protobuf type {@code io.protostuff.runtime.model.User} */ public static final class User extends com.google.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:io.protostuff.runtime.model.User) UserOrBuilder { // Use User.newBuilder() to construct. private User(com.google.protobuf.GeneratedMessageV3.Builder<?> builder) { super(builder); } private User() { name_ = ""; } @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } @SuppressWarnings("unused") private User( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { this(); int mutable_bitField0_ = 0; try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!input.skipField(tag)) { done = true; } break; } case 10: { java.lang.String s = input.readStringRequireUtf8(); name_ = s; break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return io.protostuff.runtime.model.ModelProtobuf.internal_static_io_protostuff_runtime_model_User_descriptor; } protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return io.protostuff.runtime.model.ModelProtobuf.internal_static_io_protostuff_runtime_model_User_fieldAccessorTable .ensureFieldAccessorsInitialized( io.protostuff.runtime.model.ModelProtobuf.User.class, io.protostuff.runtime.model.ModelProtobuf.User.Builder.class); } public static final int NAME_FIELD_NUMBER = 1; private volatile java.lang.Object name_; /** * <code>string name = 1;</code> */ public java.lang.String getName() { java.lang.Object ref = name_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); name_ = s; return s; } } /** * <code>string name = 1;</code> */ public com.google.protobuf.ByteString getNameBytes() { java.lang.Object ref = name_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); name_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) { return true; } if (isInitialized == 0) { return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { if (!getNameBytes().isEmpty()) { com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); } } public int getSerializedSize() { int size = memoizedSize; if (size != -1) { return size; } size = 0; if (!getNameBytes().isEmpty()) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); } memoizedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof io.protostuff.runtime.model.ModelProtobuf.User)) { return super.equals(obj); } io.protostuff.runtime.model.ModelProtobuf.User other = (io.protostuff.runtime.model.ModelProtobuf.User) obj; boolean result = true; result = result && getName() .equals(other.getName()); return result; } @SuppressWarnings("unchecked") @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); hash = (37 * hash) + NAME_FIELD_NUMBER; hash = (53 * hash) + getName().hashCode(); hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static io.protostuff.runtime.model.ModelProtobuf.User parseFrom( java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static io.protostuff.runtime.model.ModelProtobuf.User parseFrom( java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static io.protostuff.runtime.model.ModelProtobuf.User parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static io.protostuff.runtime.model.ModelProtobuf.User parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static io.protostuff.runtime.model.ModelProtobuf.User parseFrom( byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static io.protostuff.runtime.model.ModelProtobuf.User parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static io.protostuff.runtime.model.ModelProtobuf.User parseFrom( java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static io.protostuff.runtime.model.ModelProtobuf.User parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static io.protostuff.runtime.model.ModelProtobuf.User parseDelimitedFrom( java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static io.protostuff.runtime.model.ModelProtobuf.User parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static io.protostuff.runtime.model.ModelProtobuf.User parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static io.protostuff.runtime.model.ModelProtobuf.User parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(io.protostuff.runtime.model.ModelProtobuf.User prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code io.protostuff.runtime.model.User} */ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements // @@protoc_insertion_point(builder_implements:io.protostuff.runtime.model.User) io.protostuff.runtime.model.ModelProtobuf.UserOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return io.protostuff.runtime.model.ModelProtobuf.internal_static_io_protostuff_runtime_model_User_descriptor; } protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return io.protostuff.runtime.model.ModelProtobuf.internal_static_io_protostuff_runtime_model_User_fieldAccessorTable .ensureFieldAccessorsInitialized( io.protostuff.runtime.model.ModelProtobuf.User.class, io.protostuff.runtime.model.ModelProtobuf.User.Builder.class); } // Construct using io.protostuff.runtime.model.ModelProtobuf.User.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { } } public Builder clear() { super.clear(); name_ = ""; return this; } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return io.protostuff.runtime.model.ModelProtobuf.internal_static_io_protostuff_runtime_model_User_descriptor; } public io.protostuff.runtime.model.ModelProtobuf.User getDefaultInstanceForType() { return io.protostuff.runtime.model.ModelProtobuf.User.getDefaultInstance(); } public io.protostuff.runtime.model.ModelProtobuf.User build() { io.protostuff.runtime.model.ModelProtobuf.User result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } public io.protostuff.runtime.model.ModelProtobuf.User buildPartial() { io.protostuff.runtime.model.ModelProtobuf.User result = new io.protostuff.runtime.model.ModelProtobuf.User(this); result.name_ = name_; onBuilt(); return result; } public Builder clone() { return (Builder) super.clone(); } public Builder setField( com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { return (Builder) super.setField(field, value); } public Builder clearField( com.google.protobuf.Descriptors.FieldDescriptor field) { return (Builder) super.clearField(field); } public Builder clearOneof( com.google.protobuf.Descriptors.OneofDescriptor oneof) { return (Builder) super.clearOneof(oneof); } public Builder setRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, int index, Object value) { return (Builder) super.setRepeatedField(field, index, value); } public Builder addRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { return (Builder) super.addRepeatedField(field, value); } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof io.protostuff.runtime.model.ModelProtobuf.User) { return mergeFrom((io.protostuff.runtime.model.ModelProtobuf.User) other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(io.protostuff.runtime.model.ModelProtobuf.User other) { if (other == io.protostuff.runtime.model.ModelProtobuf.User.getDefaultInstance()) { return this; } if (!other.getName().isEmpty()) { name_ = other.name_; onChanged(); } onChanged(); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { io.protostuff.runtime.model.ModelProtobuf.User parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (io.protostuff.runtime.model.ModelProtobuf.User) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private java.lang.Object name_ = ""; /** * <code>string name = 1;</code> */ public java.lang.String getName() { java.lang.Object ref = name_; if (!(ref instanceof java.lang.String)) { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); name_ = s; return s; } else { return (java.lang.String) ref; } } /** * <code>string name = 1;</code> */ public com.google.protobuf.ByteString getNameBytes() { java.lang.Object ref = name_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); name_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * <code>string name = 1;</code> */ public Builder setName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } name_ = value; onChanged(); return this; } /** * <code>string name = 1;</code> */ public Builder clearName() { name_ = getDefaultInstance().getName(); onChanged(); return this; } /** * <code>string name = 1;</code> */ public Builder setNameBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } checkByteStringIsUtf8(value); name_ = value; onChanged(); return this; } public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return this; } public final Builder mergeUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return this; } // @@protoc_insertion_point(builder_scope:io.protostuff.runtime.model.User) } // @@protoc_insertion_point(class_scope:io.protostuff.runtime.model.User) private static final io.protostuff.runtime.model.ModelProtobuf.User DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new io.protostuff.runtime.model.ModelProtobuf.User(); } public static io.protostuff.runtime.model.ModelProtobuf.User getDefaultInstance() { return DEFAULT_INSTANCE; } private static final com.google.protobuf.Parser<User> PARSER = new com.google.protobuf.AbstractParser<User>() { public User parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new User(input, extensionRegistry); } }; public static com.google.protobuf.Parser<User> parser() { return PARSER; } @java.lang.Override public com.google.protobuf.Parser<User> getParserForType() { return PARSER; } public io.protostuff.runtime.model.ModelProtobuf.User getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } public interface RequestHeaderOrBuilder extends // @@protoc_insertion_point(interface_extends:io.protostuff.runtime.model.RequestHeader) com.google.protobuf.MessageOrBuilder { /** * <code>string destMicroservice = 1;</code> */ java.lang.String getDestMicroservice(); /** * <code>string destMicroservice = 1;</code> */ com.google.protobuf.ByteString getDestMicroserviceBytes(); /** * <code>int32 msgType = 2;</code> */ int getMsgType(); /** * <code>int32 flags = 3;</code> */ int getFlags(); /** * <code>string schemaId = 5;</code> */ java.lang.String getSchemaId(); /** * <code>string schemaId = 5;</code> */ com.google.protobuf.ByteString getSchemaIdBytes(); /** * <code>string operationName = 6;</code> */ java.lang.String getOperationName(); /** * <code>string operationName = 6;</code> */ com.google.protobuf.ByteString getOperationNameBytes(); /** * <code>map&lt;string, string&gt; cseContext = 7;</code> */ int getCseContextCount(); /** * <code>map&lt;string, string&gt; cseContext = 7;</code> */ boolean containsCseContext( java.lang.String key); /** * Use {@link #getCseContextMap()} instead. */ @java.lang.Deprecated java.util.Map<java.lang.String, java.lang.String> getCseContext(); /** * <code>map&lt;string, string&gt; cseContext = 7;</code> */ java.util.Map<java.lang.String, java.lang.String> getCseContextMap(); /** * <code>map&lt;string, string&gt; cseContext = 7;</code> */ java.lang.String getCseContextOrDefault( java.lang.String key, java.lang.String defaultValue); /** * <code>map&lt;string, string&gt; cseContext = 7;</code> */ java.lang.String getCseContextOrThrow( java.lang.String key); /** * <code>map&lt;string, .io.protostuff.runtime.model.User&gt; userMap = 8;</code> */ int getUserMapCount(); /** * <code>map&lt;string, .io.protostuff.runtime.model.User&gt; userMap = 8;</code> */ boolean containsUserMap( java.lang.String key); /** * Use {@link #getUserMapMap()} instead. */ @java.lang.Deprecated java.util.Map<java.lang.String, io.protostuff.runtime.model.ModelProtobuf.User> getUserMap(); /** * <code>map&lt;string, .io.protostuff.runtime.model.User&gt; userMap = 8;</code> */ java.util.Map<java.lang.String, io.protostuff.runtime.model.ModelProtobuf.User> getUserMapMap(); /** * <code>map&lt;string, .io.protostuff.runtime.model.User&gt; userMap = 8;</code> */ io.protostuff.runtime.model.ModelProtobuf.User getUserMapOrDefault( java.lang.String key, io.protostuff.runtime.model.ModelProtobuf.User defaultValue); /** * <code>map&lt;string, .io.protostuff.runtime.model.User&gt; userMap = 8;</code> */ io.protostuff.runtime.model.ModelProtobuf.User getUserMapOrThrow( java.lang.String key); /** * <code>repeated string list = 9;</code> */ java.util.List<java.lang.String> getListList(); /** * <code>repeated string list = 9;</code> */ int getListCount(); /** * <code>repeated string list = 9;</code> */ java.lang.String getList(int index); /** * <code>repeated string list = 9;</code> */ com.google.protobuf.ByteString getListBytes(int index); /** * <code>repeated .io.protostuff.runtime.model.User userList = 10;</code> */ java.util.List<io.protostuff.runtime.model.ModelProtobuf.User> getUserListList(); /** * <code>repeated .io.protostuff.runtime.model.User userList = 10;</code> */ io.protostuff.runtime.model.ModelProtobuf.User getUserList(int index); /** * <code>repeated .io.protostuff.runtime.model.User userList = 10;</code> */ int getUserListCount(); /** * <code>repeated .io.protostuff.runtime.model.User userList = 10;</code> */ java.util.List<? extends io.protostuff.runtime.model.ModelProtobuf.UserOrBuilder> getUserListOrBuilderList(); /** * <code>repeated .io.protostuff.runtime.model.User userList = 10;</code> */ io.protostuff.runtime.model.ModelProtobuf.UserOrBuilder getUserListOrBuilder( int index); } /** * Protobuf type {@code io.protostuff.runtime.model.RequestHeader} */ public static final class RequestHeader extends com.google.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:io.protostuff.runtime.model.RequestHeader) RequestHeaderOrBuilder { // Use RequestHeader.newBuilder() to construct. private RequestHeader(com.google.protobuf.GeneratedMessageV3.Builder<?> builder) { super(builder); } private RequestHeader() { destMicroservice_ = ""; msgType_ = 0; flags_ = 0; schemaId_ = ""; operationName_ = ""; list_ = com.google.protobuf.LazyStringArrayList.EMPTY; userList_ = java.util.Collections.emptyList(); } @java.lang.Override public final com.google.protobuf.UnknownFieldSet getUnknownFields() { return com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } private RequestHeader( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { this(); int mutable_bitField0_ = 0; try { boolean done = false; while (!done) { int tag = input.readTag(); switch (tag) { case 0: done = true; break; default: { if (!input.skipField(tag)) { done = true; } break; } case 10: { java.lang.String s = input.readStringRequireUtf8(); destMicroservice_ = s; break; } case 16: { msgType_ = input.readInt32(); break; } case 24: { flags_ = input.readInt32(); break; } case 42: { java.lang.String s = input.readStringRequireUtf8(); schemaId_ = s; break; } case 50: { java.lang.String s = input.readStringRequireUtf8(); operationName_ = s; break; } case 58: { if (!((mutable_bitField0_ & 0x00000020) == 0x00000020)) { cseContext_ = com.google.protobuf.MapField.newMapField( CseContextDefaultEntryHolder.defaultEntry); mutable_bitField0_ |= 0x00000020; } com.google.protobuf.MapEntry<java.lang.String, java.lang.String> cseContext__ = input.readMessage( CseContextDefaultEntryHolder.defaultEntry.getParserForType(), extensionRegistry); cseContext_.getMutableMap().put( cseContext__.getKey(), cseContext__.getValue()); break; } case 66: { if (!((mutable_bitField0_ & 0x00000040) == 0x00000040)) { userMap_ = com.google.protobuf.MapField.newMapField( UserMapDefaultEntryHolder.defaultEntry); mutable_bitField0_ |= 0x00000040; } com.google.protobuf.MapEntry<java.lang.String, io.protostuff.runtime.model.ModelProtobuf.User> userMap__ = input.readMessage( UserMapDefaultEntryHolder.defaultEntry.getParserForType(), extensionRegistry); userMap_.getMutableMap().put( userMap__.getKey(), userMap__.getValue()); break; } case 74: { java.lang.String s = input.readStringRequireUtf8(); if (!((mutable_bitField0_ & 0x00000080) == 0x00000080)) { list_ = new com.google.protobuf.LazyStringArrayList(); mutable_bitField0_ |= 0x00000080; } list_.add(s); break; } case 82: { if (!((mutable_bitField0_ & 0x00000100) == 0x00000100)) { userList_ = new java.util.ArrayList<io.protostuff.runtime.model.ModelProtobuf.User>(); mutable_bitField0_ |= 0x00000100; } userList_.add( input.readMessage(io.protostuff.runtime.model.ModelProtobuf.User.parser(), extensionRegistry)); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( e).setUnfinishedMessage(this); } finally { if (((mutable_bitField0_ & 0x00000080) == 0x00000080)) { list_ = list_.getUnmodifiableView(); } if (((mutable_bitField0_ & 0x00000100) == 0x00000100)) { userList_ = java.util.Collections.unmodifiableList(userList_); } makeExtensionsImmutable(); } } public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return io.protostuff.runtime.model.ModelProtobuf.internal_static_io_protostuff_runtime_model_RequestHeader_descriptor; } @SuppressWarnings({"rawtypes"}) protected com.google.protobuf.MapField internalGetMapField( int number) { switch (number) { case 7: return internalGetCseContext(); case 8: return internalGetUserMap(); default: throw new RuntimeException( "Invalid map field number: " + number); } } protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return io.protostuff.runtime.model.ModelProtobuf.internal_static_io_protostuff_runtime_model_RequestHeader_fieldAccessorTable .ensureFieldAccessorsInitialized( io.protostuff.runtime.model.ModelProtobuf.RequestHeader.class, io.protostuff.runtime.model.ModelProtobuf.RequestHeader.Builder.class); } @SuppressWarnings("unused") private int bitField0_; public static final int DESTMICROSERVICE_FIELD_NUMBER = 1; private volatile java.lang.Object destMicroservice_; /** * <code>string destMicroservice = 1;</code> */ public java.lang.String getDestMicroservice() { java.lang.Object ref = destMicroservice_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); destMicroservice_ = s; return s; } } /** * <code>string destMicroservice = 1;</code> */ public com.google.protobuf.ByteString getDestMicroserviceBytes() { java.lang.Object ref = destMicroservice_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); destMicroservice_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } public static final int MSGTYPE_FIELD_NUMBER = 2; private int msgType_; /** * <code>int32 msgType = 2;</code> */ public int getMsgType() { return msgType_; } public static final int FLAGS_FIELD_NUMBER = 3; private int flags_; /** * <code>int32 flags = 3;</code> */ public int getFlags() { return flags_; } public static final int SCHEMAID_FIELD_NUMBER = 5; private volatile java.lang.Object schemaId_; /** * <code>string schemaId = 5;</code> */ public java.lang.String getSchemaId() { java.lang.Object ref = schemaId_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); schemaId_ = s; return s; } } /** * <code>string schemaId = 5;</code> */ public com.google.protobuf.ByteString getSchemaIdBytes() { java.lang.Object ref = schemaId_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); schemaId_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } public static final int OPERATIONNAME_FIELD_NUMBER = 6; private volatile java.lang.Object operationName_; /** * <code>string operationName = 6;</code> */ public java.lang.String getOperationName() { java.lang.Object ref = operationName_; if (ref instanceof java.lang.String) { return (java.lang.String) ref; } else { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); operationName_ = s; return s; } } /** * <code>string operationName = 6;</code> */ public com.google.protobuf.ByteString getOperationNameBytes() { java.lang.Object ref = operationName_; if (ref instanceof java.lang.String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); operationName_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } public static final int CSECONTEXT_FIELD_NUMBER = 7; private static final class CseContextDefaultEntryHolder { static final com.google.protobuf.MapEntry<java.lang.String, java.lang.String> defaultEntry = com.google.protobuf.MapEntry.<java.lang.String, java.lang.String>newDefaultInstance( io.protostuff.runtime.model.ModelProtobuf.internal_static_io_protostuff_runtime_model_RequestHeader_CseContextEntry_descriptor, com.google.protobuf.WireFormat.FieldType.STRING, "", com.google.protobuf.WireFormat.FieldType.STRING, ""); } private com.google.protobuf.MapField<java.lang.String, java.lang.String> cseContext_; private com.google.protobuf.MapField<java.lang.String, java.lang.String> internalGetCseContext() { if (cseContext_ == null) { return com.google.protobuf.MapField.emptyMapField( CseContextDefaultEntryHolder.defaultEntry); } return cseContext_; } public int getCseContextCount() { return internalGetCseContext().getMap().size(); } /** * <code>map&lt;string, string&gt; cseContext = 7;</code> */ public boolean containsCseContext( java.lang.String key) { if (key == null) { throw new java.lang.NullPointerException(); } return internalGetCseContext().getMap().containsKey(key); } /** * Use {@link #getCseContextMap()} instead. */ @java.lang.Deprecated public java.util.Map<java.lang.String, java.lang.String> getCseContext() { return getCseContextMap(); } /** * <code>map&lt;string, string&gt; cseContext = 7;</code> */ public java.util.Map<java.lang.String, java.lang.String> getCseContextMap() { return internalGetCseContext().getMap(); } /** * <code>map&lt;string, string&gt; cseContext = 7;</code> */ public java.lang.String getCseContextOrDefault( java.lang.String key, java.lang.String defaultValue) { if (key == null) { throw new java.lang.NullPointerException(); } java.util.Map<java.lang.String, java.lang.String> map = internalGetCseContext().getMap(); return map.getOrDefault(key, defaultValue); } /** * <code>map&lt;string, string&gt; cseContext = 7;</code> */ public java.lang.String getCseContextOrThrow( java.lang.String key) { if (key == null) { throw new java.lang.NullPointerException(); } java.util.Map<java.lang.String, java.lang.String> map = internalGetCseContext().getMap(); if (!map.containsKey(key)) { throw new java.lang.IllegalArgumentException(); } return map.get(key); } public static final int USERMAP_FIELD_NUMBER = 8; private static final class UserMapDefaultEntryHolder { static final com.google.protobuf.MapEntry<java.lang.String, io.protostuff.runtime.model.ModelProtobuf.User> defaultEntry = com.google.protobuf.MapEntry.<java.lang.String, io.protostuff.runtime.model.ModelProtobuf.User>newDefaultInstance( io.protostuff.runtime.model.ModelProtobuf.internal_static_io_protostuff_runtime_model_RequestHeader_UserMapEntry_descriptor, com.google.protobuf.WireFormat.FieldType.STRING, "", com.google.protobuf.WireFormat.FieldType.MESSAGE, io.protostuff.runtime.model.ModelProtobuf.User.getDefaultInstance()); } private com.google.protobuf.MapField<java.lang.String, io.protostuff.runtime.model.ModelProtobuf.User> userMap_; private com.google.protobuf.MapField<java.lang.String, io.protostuff.runtime.model.ModelProtobuf.User> internalGetUserMap() { if (userMap_ == null) { return com.google.protobuf.MapField.emptyMapField( UserMapDefaultEntryHolder.defaultEntry); } return userMap_; } public int getUserMapCount() { return internalGetUserMap().getMap().size(); } /** * <code>map&lt;string, .io.protostuff.runtime.model.User&gt; userMap = 8;</code> */ public boolean containsUserMap( java.lang.String key) { if (key == null) { throw new java.lang.NullPointerException(); } return internalGetUserMap().getMap().containsKey(key); } /** * Use {@link #getUserMapMap()} instead. */ @java.lang.Deprecated public java.util.Map<java.lang.String, io.protostuff.runtime.model.ModelProtobuf.User> getUserMap() { return getUserMapMap(); } /** * <code>map&lt;string, .io.protostuff.runtime.model.User&gt; userMap = 8;</code> */ public java.util.Map<java.lang.String, io.protostuff.runtime.model.ModelProtobuf.User> getUserMapMap() { return internalGetUserMap().getMap(); } /** * <code>map&lt;string, .io.protostuff.runtime.model.User&gt; userMap = 8;</code> */ public io.protostuff.runtime.model.ModelProtobuf.User getUserMapOrDefault( java.lang.String key, io.protostuff.runtime.model.ModelProtobuf.User defaultValue) { if (key == null) { throw new java.lang.NullPointerException(); } java.util.Map<java.lang.String, io.protostuff.runtime.model.ModelProtobuf.User> map = internalGetUserMap().getMap(); return map.getOrDefault(key, defaultValue); } /** * <code>map&lt;string, .io.protostuff.runtime.model.User&gt; userMap = 8;</code> */ public io.protostuff.runtime.model.ModelProtobuf.User getUserMapOrThrow( java.lang.String key) { if (key == null) { throw new java.lang.NullPointerException(); } java.util.Map<java.lang.String, io.protostuff.runtime.model.ModelProtobuf.User> map = internalGetUserMap().getMap(); if (!map.containsKey(key)) { throw new java.lang.IllegalArgumentException(); } return map.get(key); } public static final int LIST_FIELD_NUMBER = 9; private com.google.protobuf.LazyStringList list_; /** * <code>repeated string list = 9;</code> */ public com.google.protobuf.ProtocolStringList getListList() { return list_; } /** * <code>repeated string list = 9;</code> */ public int getListCount() { return list_.size(); } /** * <code>repeated string list = 9;</code> */ public java.lang.String getList(int index) { return list_.get(index); } /** * <code>repeated string list = 9;</code> */ public com.google.protobuf.ByteString getListBytes(int index) { return list_.getByteString(index); } public static final int USERLIST_FIELD_NUMBER = 10; private java.util.List<io.protostuff.runtime.model.ModelProtobuf.User> userList_; /** * <code>repeated .io.protostuff.runtime.model.User userList = 10;</code> */ public java.util.List<io.protostuff.runtime.model.ModelProtobuf.User> getUserListList() { return userList_; } /** * <code>repeated .io.protostuff.runtime.model.User userList = 10;</code> */ public java.util.List<? extends io.protostuff.runtime.model.ModelProtobuf.UserOrBuilder> getUserListOrBuilderList() { return userList_; } /** * <code>repeated .io.protostuff.runtime.model.User userList = 10;</code> */ public int getUserListCount() { return userList_.size(); } /** * <code>repeated .io.protostuff.runtime.model.User userList = 10;</code> */ public io.protostuff.runtime.model.ModelProtobuf.User getUserList(int index) { return userList_.get(index); } /** * <code>repeated .io.protostuff.runtime.model.User userList = 10;</code> */ public io.protostuff.runtime.model.ModelProtobuf.UserOrBuilder getUserListOrBuilder( int index) { return userList_.get(index); } private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized == 1) { return true; } if (isInitialized == 0) { return false; } memoizedIsInitialized = 1; return true; } public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { if (!getDestMicroserviceBytes().isEmpty()) { com.google.protobuf.GeneratedMessageV3.writeString(output, 1, destMicroservice_); } if (msgType_ != 0) { output.writeInt32(2, msgType_); } if (flags_ != 0) { output.writeInt32(3, flags_); } if (!getSchemaIdBytes().isEmpty()) { com.google.protobuf.GeneratedMessageV3.writeString(output, 5, schemaId_); } if (!getOperationNameBytes().isEmpty()) { com.google.protobuf.GeneratedMessageV3.writeString(output, 6, operationName_); } com.google.protobuf.GeneratedMessageV3 .serializeStringMapTo( output, internalGetCseContext(), CseContextDefaultEntryHolder.defaultEntry, 7); com.google.protobuf.GeneratedMessageV3 .serializeStringMapTo( output, internalGetUserMap(), UserMapDefaultEntryHolder.defaultEntry, 8); for (int i = 0; i < list_.size(); i++) { com.google.protobuf.GeneratedMessageV3.writeString(output, 9, list_.getRaw(i)); } for (int i = 0; i < userList_.size(); i++) { output.writeMessage(10, userList_.get(i)); } } public int getSerializedSize() { int size = memoizedSize; if (size != -1) { return size; } size = 0; if (!getDestMicroserviceBytes().isEmpty()) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, destMicroservice_); } if (msgType_ != 0) { size += com.google.protobuf.CodedOutputStream .computeInt32Size(2, msgType_); } if (flags_ != 0) { size += com.google.protobuf.CodedOutputStream .computeInt32Size(3, flags_); } if (!getSchemaIdBytes().isEmpty()) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(5, schemaId_); } if (!getOperationNameBytes().isEmpty()) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(6, operationName_); } for (java.util.Map.Entry<java.lang.String, java.lang.String> entry : internalGetCseContext().getMap() .entrySet()) { com.google.protobuf.MapEntry<java.lang.String, java.lang.String> cseContext__ = CseContextDefaultEntryHolder.defaultEntry.newBuilderForType() .setKey(entry.getKey()) .setValue(entry.getValue()) .build(); size += com.google.protobuf.CodedOutputStream .computeMessageSize(7, cseContext__); } for (java.util.Map.Entry<java.lang.String, io.protostuff.runtime.model.ModelProtobuf.User> entry : internalGetUserMap() .getMap() .entrySet()) { com.google.protobuf.MapEntry<java.lang.String, io.protostuff.runtime.model.ModelProtobuf.User> userMap__ = UserMapDefaultEntryHolder.defaultEntry.newBuilderForType() .setKey(entry.getKey()) .setValue(entry.getValue()) .build(); size += com.google.protobuf.CodedOutputStream .computeMessageSize(8, userMap__); } { int dataSize = 0; for (int i = 0; i < list_.size(); i++) { dataSize += computeStringSizeNoTag(list_.getRaw(i)); } size += dataSize; size += 1 * getListList().size(); } for (int i = 0; i < userList_.size(); i++) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(10, userList_.get(i)); } memoizedSize = size; return size; } private static final long serialVersionUID = 0L; @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { return true; } if (!(obj instanceof io.protostuff.runtime.model.ModelProtobuf.RequestHeader)) { return super.equals(obj); } io.protostuff.runtime.model.ModelProtobuf.RequestHeader other = (io.protostuff.runtime.model.ModelProtobuf.RequestHeader) obj; boolean result = true; result = result && getDestMicroservice() .equals(other.getDestMicroservice()); result = result && (getMsgType() == other.getMsgType()); result = result && (getFlags() == other.getFlags()); result = result && getSchemaId() .equals(other.getSchemaId()); result = result && getOperationName() .equals(other.getOperationName()); result = result && internalGetCseContext().equals( other.internalGetCseContext()); result = result && internalGetUserMap().equals( other.internalGetUserMap()); result = result && getListList() .equals(other.getListList()); result = result && getUserListList() .equals(other.getUserListList()); return result; } @SuppressWarnings("unchecked") @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; hash = (19 * hash) + getDescriptor().hashCode(); hash = (37 * hash) + DESTMICROSERVICE_FIELD_NUMBER; hash = (53 * hash) + getDestMicroservice().hashCode(); hash = (37 * hash) + MSGTYPE_FIELD_NUMBER; hash = (53 * hash) + getMsgType(); hash = (37 * hash) + FLAGS_FIELD_NUMBER; hash = (53 * hash) + getFlags(); hash = (37 * hash) + SCHEMAID_FIELD_NUMBER; hash = (53 * hash) + getSchemaId().hashCode(); hash = (37 * hash) + OPERATIONNAME_FIELD_NUMBER; hash = (53 * hash) + getOperationName().hashCode(); if (!internalGetCseContext().getMap().isEmpty()) { hash = (37 * hash) + CSECONTEXT_FIELD_NUMBER; hash = (53 * hash) + internalGetCseContext().hashCode(); } if (!internalGetUserMap().getMap().isEmpty()) { hash = (37 * hash) + USERMAP_FIELD_NUMBER; hash = (53 * hash) + internalGetUserMap().hashCode(); } if (getListCount() > 0) { hash = (37 * hash) + LIST_FIELD_NUMBER; hash = (53 * hash) + getListList().hashCode(); } if (getUserListCount() > 0) { hash = (37 * hash) + USERLIST_FIELD_NUMBER; hash = (53 * hash) + getUserListList().hashCode(); } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; } public static io.protostuff.runtime.model.ModelProtobuf.RequestHeader parseFrom( java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static io.protostuff.runtime.model.ModelProtobuf.RequestHeader parseFrom( java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static io.protostuff.runtime.model.ModelProtobuf.RequestHeader parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static io.protostuff.runtime.model.ModelProtobuf.RequestHeader parseFrom( com.google.protobuf.ByteString data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static io.protostuff.runtime.model.ModelProtobuf.RequestHeader parseFrom( byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data); } public static io.protostuff.runtime.model.ModelProtobuf.RequestHeader parseFrom( byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return PARSER.parseFrom(data, extensionRegistry); } public static io.protostuff.runtime.model.ModelProtobuf.RequestHeader parseFrom( java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static io.protostuff.runtime.model.ModelProtobuf.RequestHeader parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public static io.protostuff.runtime.model.ModelProtobuf.RequestHeader parseDelimitedFrom( java.io.InputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input); } public static io.protostuff.runtime.model.ModelProtobuf.RequestHeader parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseDelimitedWithIOException(PARSER, input, extensionRegistry); } public static io.protostuff.runtime.model.ModelProtobuf.RequestHeader parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input); } public static io.protostuff.runtime.model.ModelProtobuf.RequestHeader parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { return com.google.protobuf.GeneratedMessageV3 .parseWithIOException(PARSER, input, extensionRegistry); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder() { return DEFAULT_INSTANCE.toBuilder(); } public static Builder newBuilder(io.protostuff.runtime.model.ModelProtobuf.RequestHeader prototype) { return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); } public Builder toBuilder() { return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); } @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** * Protobuf type {@code io.protostuff.runtime.model.RequestHeader} */ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder<Builder> implements // @@protoc_insertion_point(builder_implements:io.protostuff.runtime.model.RequestHeader) io.protostuff.runtime.model.ModelProtobuf.RequestHeaderOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return io.protostuff.runtime.model.ModelProtobuf.internal_static_io_protostuff_runtime_model_RequestHeader_descriptor; } @SuppressWarnings({"rawtypes"}) protected com.google.protobuf.MapField internalGetMapField( int number) { switch (number) { case 7: return internalGetCseContext(); case 8: return internalGetUserMap(); default: throw new RuntimeException( "Invalid map field number: " + number); } } @SuppressWarnings({"rawtypes"}) protected com.google.protobuf.MapField internalGetMutableMapField( int number) { switch (number) { case 7: return internalGetMutableCseContext(); case 8: return internalGetMutableUserMap(); default: throw new RuntimeException( "Invalid map field number: " + number); } } protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internalGetFieldAccessorTable() { return io.protostuff.runtime.model.ModelProtobuf.internal_static_io_protostuff_runtime_model_RequestHeader_fieldAccessorTable .ensureFieldAccessorsInitialized( io.protostuff.runtime.model.ModelProtobuf.RequestHeader.class, io.protostuff.runtime.model.ModelProtobuf.RequestHeader.Builder.class); } // Construct using io.protostuff.runtime.model.ModelProtobuf.RequestHeader.newBuilder() private Builder() { maybeForceBuilderInitialization(); } private Builder( com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { getUserListFieldBuilder(); } } public Builder clear() { super.clear(); destMicroservice_ = ""; msgType_ = 0; flags_ = 0; schemaId_ = ""; operationName_ = ""; internalGetMutableCseContext().clear(); internalGetMutableUserMap().clear(); list_ = com.google.protobuf.LazyStringArrayList.EMPTY; bitField0_ = (bitField0_ & ~0x00000080); if (userListBuilder_ == null) { userList_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000100); } else { userListBuilder_.clear(); } return this; } public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return io.protostuff.runtime.model.ModelProtobuf.internal_static_io_protostuff_runtime_model_RequestHeader_descriptor; } public io.protostuff.runtime.model.ModelProtobuf.RequestHeader getDefaultInstanceForType() { return io.protostuff.runtime.model.ModelProtobuf.RequestHeader.getDefaultInstance(); } public io.protostuff.runtime.model.ModelProtobuf.RequestHeader build() { io.protostuff.runtime.model.ModelProtobuf.RequestHeader result = buildPartial(); if (!result.isInitialized()) { throw newUninitializedMessageException(result); } return result; } @SuppressWarnings("unused") public io.protostuff.runtime.model.ModelProtobuf.RequestHeader buildPartial() { io.protostuff.runtime.model.ModelProtobuf.RequestHeader result = new io.protostuff.runtime.model.ModelProtobuf.RequestHeader(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; result.destMicroservice_ = destMicroservice_; result.msgType_ = msgType_; result.flags_ = flags_; result.schemaId_ = schemaId_; result.operationName_ = operationName_; result.cseContext_ = internalGetCseContext(); result.cseContext_.makeImmutable(); result.userMap_ = internalGetUserMap(); result.userMap_.makeImmutable(); if (((bitField0_ & 0x00000080) == 0x00000080)) { list_ = list_.getUnmodifiableView(); bitField0_ = (bitField0_ & ~0x00000080); } result.list_ = list_; if (userListBuilder_ == null) { if (((bitField0_ & 0x00000100) == 0x00000100)) { userList_ = java.util.Collections.unmodifiableList(userList_); bitField0_ = (bitField0_ & ~0x00000100); } result.userList_ = userList_; } else { result.userList_ = userListBuilder_.build(); } result.bitField0_ = to_bitField0_; onBuilt(); return result; } public Builder clone() { return (Builder) super.clone(); } public Builder setField( com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { return (Builder) super.setField(field, value); } public Builder clearField( com.google.protobuf.Descriptors.FieldDescriptor field) { return (Builder) super.clearField(field); } public Builder clearOneof( com.google.protobuf.Descriptors.OneofDescriptor oneof) { return (Builder) super.clearOneof(oneof); } public Builder setRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, int index, Object value) { return (Builder) super.setRepeatedField(field, index, value); } public Builder addRepeatedField( com.google.protobuf.Descriptors.FieldDescriptor field, Object value) { return (Builder) super.addRepeatedField(field, value); } public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof io.protostuff.runtime.model.ModelProtobuf.RequestHeader) { return mergeFrom((io.protostuff.runtime.model.ModelProtobuf.RequestHeader) other); } else { super.mergeFrom(other); return this; } } public Builder mergeFrom(io.protostuff.runtime.model.ModelProtobuf.RequestHeader other) { if (other == io.protostuff.runtime.model.ModelProtobuf.RequestHeader.getDefaultInstance()) { return this; } if (!other.getDestMicroservice().isEmpty()) { destMicroservice_ = other.destMicroservice_; onChanged(); } if (other.getMsgType() != 0) { setMsgType(other.getMsgType()); } if (other.getFlags() != 0) { setFlags(other.getFlags()); } if (!other.getSchemaId().isEmpty()) { schemaId_ = other.schemaId_; onChanged(); } if (!other.getOperationName().isEmpty()) { operationName_ = other.operationName_; onChanged(); } internalGetMutableCseContext().mergeFrom( other.internalGetCseContext()); internalGetMutableUserMap().mergeFrom( other.internalGetUserMap()); if (!other.list_.isEmpty()) { if (list_.isEmpty()) { list_ = other.list_; bitField0_ = (bitField0_ & ~0x00000080); } else { ensureListIsMutable(); list_.addAll(other.list_); } onChanged(); } if (userListBuilder_ == null) { if (!other.userList_.isEmpty()) { if (userList_.isEmpty()) { userList_ = other.userList_; bitField0_ = (bitField0_ & ~0x00000100); } else { ensureUserListIsMutable(); userList_.addAll(other.userList_); } onChanged(); } } else { if (!other.userList_.isEmpty()) { if (userListBuilder_.isEmpty()) { userListBuilder_.dispose(); userListBuilder_ = null; userList_ = other.userList_; bitField0_ = (bitField0_ & ~0x00000100); userListBuilder_ = com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? getUserListFieldBuilder() : null; } else { userListBuilder_.addAllMessages(other.userList_); } } } onChanged(); return this; } public final boolean isInitialized() { return true; } public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { io.protostuff.runtime.model.ModelProtobuf.RequestHeader parsedMessage = null; try { parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (io.protostuff.runtime.model.ModelProtobuf.RequestHeader) e.getUnfinishedMessage(); throw e.unwrapIOException(); } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); } } return this; } private int bitField0_; private java.lang.Object destMicroservice_ = ""; /** * <code>string destMicroservice = 1;</code> */ public java.lang.String getDestMicroservice() { java.lang.Object ref = destMicroservice_; if (!(ref instanceof java.lang.String)) { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); destMicroservice_ = s; return s; } else { return (java.lang.String) ref; } } /** * <code>string destMicroservice = 1;</code> */ public com.google.protobuf.ByteString getDestMicroserviceBytes() { java.lang.Object ref = destMicroservice_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); destMicroservice_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * <code>string destMicroservice = 1;</code> */ public Builder setDestMicroservice( java.lang.String value) { if (value == null) { throw new NullPointerException(); } destMicroservice_ = value; onChanged(); return this; } /** * <code>string destMicroservice = 1;</code> */ public Builder clearDestMicroservice() { destMicroservice_ = getDefaultInstance().getDestMicroservice(); onChanged(); return this; } /** * <code>string destMicroservice = 1;</code> */ public Builder setDestMicroserviceBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } checkByteStringIsUtf8(value); destMicroservice_ = value; onChanged(); return this; } private int msgType_; /** * <code>int32 msgType = 2;</code> */ public int getMsgType() { return msgType_; } /** * <code>int32 msgType = 2;</code> */ public Builder setMsgType(int value) { msgType_ = value; onChanged(); return this; } /** * <code>int32 msgType = 2;</code> */ public Builder clearMsgType() { msgType_ = 0; onChanged(); return this; } private int flags_; /** * <code>int32 flags = 3;</code> */ public int getFlags() { return flags_; } /** * <code>int32 flags = 3;</code> */ public Builder setFlags(int value) { flags_ = value; onChanged(); return this; } /** * <code>int32 flags = 3;</code> */ public Builder clearFlags() { flags_ = 0; onChanged(); return this; } private java.lang.Object schemaId_ = ""; /** * <code>string schemaId = 5;</code> */ public java.lang.String getSchemaId() { java.lang.Object ref = schemaId_; if (!(ref instanceof java.lang.String)) { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); schemaId_ = s; return s; } else { return (java.lang.String) ref; } } /** * <code>string schemaId = 5;</code> */ public com.google.protobuf.ByteString getSchemaIdBytes() { java.lang.Object ref = schemaId_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); schemaId_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * <code>string schemaId = 5;</code> */ public Builder setSchemaId( java.lang.String value) { if (value == null) { throw new NullPointerException(); } schemaId_ = value; onChanged(); return this; } /** * <code>string schemaId = 5;</code> */ public Builder clearSchemaId() { schemaId_ = getDefaultInstance().getSchemaId(); onChanged(); return this; } /** * <code>string schemaId = 5;</code> */ public Builder setSchemaIdBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } checkByteStringIsUtf8(value); schemaId_ = value; onChanged(); return this; } private java.lang.Object operationName_ = ""; /** * <code>string operationName = 6;</code> */ public java.lang.String getOperationName() { java.lang.Object ref = operationName_; if (!(ref instanceof java.lang.String)) { com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; java.lang.String s = bs.toStringUtf8(); operationName_ = s; return s; } else { return (java.lang.String) ref; } } /** * <code>string operationName = 6;</code> */ public com.google.protobuf.ByteString getOperationNameBytes() { java.lang.Object ref = operationName_; if (ref instanceof String) { com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8( (java.lang.String) ref); operationName_ = b; return b; } else { return (com.google.protobuf.ByteString) ref; } } /** * <code>string operationName = 6;</code> */ public Builder setOperationName( java.lang.String value) { if (value == null) { throw new NullPointerException(); } operationName_ = value; onChanged(); return this; } /** * <code>string operationName = 6;</code> */ public Builder clearOperationName() { operationName_ = getDefaultInstance().getOperationName(); onChanged(); return this; } /** * <code>string operationName = 6;</code> */ public Builder setOperationNameBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } checkByteStringIsUtf8(value); operationName_ = value; onChanged(); return this; } private com.google.protobuf.MapField<java.lang.String, java.lang.String> cseContext_; private com.google.protobuf.MapField<java.lang.String, java.lang.String> internalGetCseContext() { if (cseContext_ == null) { return com.google.protobuf.MapField.emptyMapField( CseContextDefaultEntryHolder.defaultEntry); } return cseContext_; } private com.google.protobuf.MapField<java.lang.String, java.lang.String> internalGetMutableCseContext() { onChanged(); if (cseContext_ == null) { cseContext_ = com.google.protobuf.MapField.newMapField( CseContextDefaultEntryHolder.defaultEntry); } if (!cseContext_.isMutable()) { cseContext_ = cseContext_.copy(); } return cseContext_; } public int getCseContextCount() { return internalGetCseContext().getMap().size(); } /** * <code>map&lt;string, string&gt; cseContext = 7;</code> */ public boolean containsCseContext( java.lang.String key) { if (key == null) { throw new java.lang.NullPointerException(); } return internalGetCseContext().getMap().containsKey(key); } /** * Use {@link #getCseContextMap()} instead. */ @java.lang.Deprecated public java.util.Map<java.lang.String, java.lang.String> getCseContext() { return getCseContextMap(); } /** * <code>map&lt;string, string&gt; cseContext = 7;</code> */ public java.util.Map<java.lang.String, java.lang.String> getCseContextMap() { return internalGetCseContext().getMap(); } /** * <code>map&lt;string, string&gt; cseContext = 7;</code> */ public java.lang.String getCseContextOrDefault( java.lang.String key, java.lang.String defaultValue) { if (key == null) { throw new java.lang.NullPointerException(); } java.util.Map<java.lang.String, java.lang.String> map = internalGetCseContext().getMap(); return map.getOrDefault(key, defaultValue); } /** * <code>map&lt;string, string&gt; cseContext = 7;</code> */ public java.lang.String getCseContextOrThrow( java.lang.String key) { if (key == null) { throw new java.lang.NullPointerException(); } java.util.Map<java.lang.String, java.lang.String> map = internalGetCseContext().getMap(); if (!map.containsKey(key)) { throw new java.lang.IllegalArgumentException(); } return map.get(key); } public Builder clearCseContext() { internalGetMutableCseContext().getMutableMap() .clear(); return this; } /** * <code>map&lt;string, string&gt; cseContext = 7;</code> */ public Builder removeCseContext( java.lang.String key) { if (key == null) { throw new java.lang.NullPointerException(); } internalGetMutableCseContext().getMutableMap() .remove(key); return this; } /** * Use alternate mutation accessors instead. */ @java.lang.Deprecated public java.util.Map<java.lang.String, java.lang.String> getMutableCseContext() { return internalGetMutableCseContext().getMutableMap(); } /** * <code>map&lt;string, string&gt; cseContext = 7;</code> */ public Builder putCseContext( java.lang.String key, java.lang.String value) { if (key == null) { throw new java.lang.NullPointerException(); } if (value == null) { throw new java.lang.NullPointerException(); } internalGetMutableCseContext().getMutableMap() .put(key, value); return this; } /** * <code>map&lt;string, string&gt; cseContext = 7;</code> */ public Builder putAllCseContext( java.util.Map<java.lang.String, java.lang.String> values) { internalGetMutableCseContext().getMutableMap() .putAll(values); return this; } private com.google.protobuf.MapField<java.lang.String, io.protostuff.runtime.model.ModelProtobuf.User> userMap_; private com.google.protobuf.MapField<java.lang.String, io.protostuff.runtime.model.ModelProtobuf.User> internalGetUserMap() { if (userMap_ == null) { return com.google.protobuf.MapField.emptyMapField( UserMapDefaultEntryHolder.defaultEntry); } return userMap_; } private com.google.protobuf.MapField<java.lang.String, io.protostuff.runtime.model.ModelProtobuf.User> internalGetMutableUserMap() { onChanged(); if (userMap_ == null) { userMap_ = com.google.protobuf.MapField.newMapField( UserMapDefaultEntryHolder.defaultEntry); } if (!userMap_.isMutable()) { userMap_ = userMap_.copy(); } return userMap_; } public int getUserMapCount() { return internalGetUserMap().getMap().size(); } /** * <code>map&lt;string, .io.protostuff.runtime.model.User&gt; userMap = 8;</code> */ public boolean containsUserMap( java.lang.String key) { if (key == null) { throw new java.lang.NullPointerException(); } return internalGetUserMap().getMap().containsKey(key); } /** * Use {@link #getUserMapMap()} instead. */ @java.lang.Deprecated public java.util.Map<java.lang.String, io.protostuff.runtime.model.ModelProtobuf.User> getUserMap() { return getUserMapMap(); } /** * <code>map&lt;string, .io.protostuff.runtime.model.User&gt; userMap = 8;</code> */ public java.util.Map<java.lang.String, io.protostuff.runtime.model.ModelProtobuf.User> getUserMapMap() { return internalGetUserMap().getMap(); } /** * <code>map&lt;string, .io.protostuff.runtime.model.User&gt; userMap = 8;</code> */ public io.protostuff.runtime.model.ModelProtobuf.User getUserMapOrDefault( java.lang.String key, io.protostuff.runtime.model.ModelProtobuf.User defaultValue) { if (key == null) { throw new java.lang.NullPointerException(); } java.util.Map<java.lang.String, io.protostuff.runtime.model.ModelProtobuf.User> map = internalGetUserMap().getMap(); return map.getOrDefault(key, defaultValue); } /** * <code>map&lt;string, .io.protostuff.runtime.model.User&gt; userMap = 8;</code> */ public io.protostuff.runtime.model.ModelProtobuf.User getUserMapOrThrow( java.lang.String key) { if (key == null) { throw new java.lang.NullPointerException(); } java.util.Map<java.lang.String, io.protostuff.runtime.model.ModelProtobuf.User> map = internalGetUserMap().getMap(); if (!map.containsKey(key)) { throw new java.lang.IllegalArgumentException(); } return map.get(key); } public Builder clearUserMap() { internalGetMutableUserMap().getMutableMap() .clear(); return this; } /** * <code>map&lt;string, .io.protostuff.runtime.model.User&gt; userMap = 8;</code> */ public Builder removeUserMap( java.lang.String key) { if (key == null) { throw new java.lang.NullPointerException(); } internalGetMutableUserMap().getMutableMap() .remove(key); return this; } /** * Use alternate mutation accessors instead. */ @java.lang.Deprecated public java.util.Map<java.lang.String, io.protostuff.runtime.model.ModelProtobuf.User> getMutableUserMap() { return internalGetMutableUserMap().getMutableMap(); } /** * <code>map&lt;string, .io.protostuff.runtime.model.User&gt; userMap = 8;</code> */ public Builder putUserMap( java.lang.String key, io.protostuff.runtime.model.ModelProtobuf.User value) { if (key == null) { throw new java.lang.NullPointerException(); } if (value == null) { throw new java.lang.NullPointerException(); } internalGetMutableUserMap().getMutableMap() .put(key, value); return this; } /** * <code>map&lt;string, .io.protostuff.runtime.model.User&gt; userMap = 8;</code> */ public Builder putAllUserMap( java.util.Map<java.lang.String, io.protostuff.runtime.model.ModelProtobuf.User> values) { internalGetMutableUserMap().getMutableMap() .putAll(values); return this; } private com.google.protobuf.LazyStringList list_ = com.google.protobuf.LazyStringArrayList.EMPTY; private void ensureListIsMutable() { if (!((bitField0_ & 0x00000080) == 0x00000080)) { list_ = new com.google.protobuf.LazyStringArrayList(list_); bitField0_ |= 0x00000080; } } /** * <code>repeated string list = 9;</code> */ public com.google.protobuf.ProtocolStringList getListList() { return list_.getUnmodifiableView(); } /** * <code>repeated string list = 9;</code> */ public int getListCount() { return list_.size(); } /** * <code>repeated string list = 9;</code> */ public java.lang.String getList(int index) { return list_.get(index); } /** * <code>repeated string list = 9;</code> */ public com.google.protobuf.ByteString getListBytes(int index) { return list_.getByteString(index); } /** * <code>repeated string list = 9;</code> */ public Builder setList( int index, java.lang.String value) { if (value == null) { throw new NullPointerException(); } ensureListIsMutable(); list_.set(index, value); onChanged(); return this; } /** * <code>repeated string list = 9;</code> */ public Builder addList( java.lang.String value) { if (value == null) { throw new NullPointerException(); } ensureListIsMutable(); list_.add(value); onChanged(); return this; } /** * <code>repeated string list = 9;</code> */ public Builder addAllList( java.lang.Iterable<java.lang.String> values) { ensureListIsMutable(); com.google.protobuf.AbstractMessageLite.Builder.addAll( values, list_); onChanged(); return this; } /** * <code>repeated string list = 9;</code> */ public Builder clearList() { list_ = com.google.protobuf.LazyStringArrayList.EMPTY; bitField0_ = (bitField0_ & ~0x00000080); onChanged(); return this; } /** * <code>repeated string list = 9;</code> */ public Builder addListBytes( com.google.protobuf.ByteString value) { if (value == null) { throw new NullPointerException(); } checkByteStringIsUtf8(value); ensureListIsMutable(); list_.add(value); onChanged(); return this; } private java.util.List<io.protostuff.runtime.model.ModelProtobuf.User> userList_ = java.util.Collections.emptyList(); private void ensureUserListIsMutable() { if (!((bitField0_ & 0x00000100) == 0x00000100)) { userList_ = new java.util.ArrayList<io.protostuff.runtime.model.ModelProtobuf.User>(userList_); bitField0_ |= 0x00000100; } } private com.google.protobuf.RepeatedFieldBuilderV3<io.protostuff.runtime.model.ModelProtobuf.User, io.protostuff.runtime.model.ModelProtobuf.User.Builder, io.protostuff.runtime.model.ModelProtobuf.UserOrBuilder> userListBuilder_; /** * <code>repeated .io.protostuff.runtime.model.User userList = 10;</code> */ public java.util.List<io.protostuff.runtime.model.ModelProtobuf.User> getUserListList() { if (userListBuilder_ == null) { return java.util.Collections.unmodifiableList(userList_); } else { return userListBuilder_.getMessageList(); } } /** * <code>repeated .io.protostuff.runtime.model.User userList = 10;</code> */ public int getUserListCount() { if (userListBuilder_ == null) { return userList_.size(); } else { return userListBuilder_.getCount(); } } /** * <code>repeated .io.protostuff.runtime.model.User userList = 10;</code> */ public io.protostuff.runtime.model.ModelProtobuf.User getUserList(int index) { if (userListBuilder_ == null) { return userList_.get(index); } else { return userListBuilder_.getMessage(index); } } /** * <code>repeated .io.protostuff.runtime.model.User userList = 10;</code> */ public Builder setUserList( int index, io.protostuff.runtime.model.ModelProtobuf.User value) { if (userListBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureUserListIsMutable(); userList_.set(index, value); onChanged(); } else { userListBuilder_.setMessage(index, value); } return this; } /** * <code>repeated .io.protostuff.runtime.model.User userList = 10;</code> */ public Builder setUserList( int index, io.protostuff.runtime.model.ModelProtobuf.User.Builder builderForValue) { if (userListBuilder_ == null) { ensureUserListIsMutable(); userList_.set(index, builderForValue.build()); onChanged(); } else { userListBuilder_.setMessage(index, builderForValue.build()); } return this; } /** * <code>repeated .io.protostuff.runtime.model.User userList = 10;</code> */ public Builder addUserList(io.protostuff.runtime.model.ModelProtobuf.User value) { if (userListBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureUserListIsMutable(); userList_.add(value); onChanged(); } else { userListBuilder_.addMessage(value); } return this; } /** * <code>repeated .io.protostuff.runtime.model.User userList = 10;</code> */ public Builder addUserList( int index, io.protostuff.runtime.model.ModelProtobuf.User value) { if (userListBuilder_ == null) { if (value == null) { throw new NullPointerException(); } ensureUserListIsMutable(); userList_.add(index, value); onChanged(); } else { userListBuilder_.addMessage(index, value); } return this; } /** * <code>repeated .io.protostuff.runtime.model.User userList = 10;</code> */ public Builder addUserList( io.protostuff.runtime.model.ModelProtobuf.User.Builder builderForValue) { if (userListBuilder_ == null) { ensureUserListIsMutable(); userList_.add(builderForValue.build()); onChanged(); } else { userListBuilder_.addMessage(builderForValue.build()); } return this; } /** * <code>repeated .io.protostuff.runtime.model.User userList = 10;</code> */ public Builder addUserList( int index, io.protostuff.runtime.model.ModelProtobuf.User.Builder builderForValue) { if (userListBuilder_ == null) { ensureUserListIsMutable(); userList_.add(index, builderForValue.build()); onChanged(); } else { userListBuilder_.addMessage(index, builderForValue.build()); } return this; } /** * <code>repeated .io.protostuff.runtime.model.User userList = 10;</code> */ public Builder addAllUserList( java.lang.Iterable<? extends io.protostuff.runtime.model.ModelProtobuf.User> values) { if (userListBuilder_ == null) { ensureUserListIsMutable(); com.google.protobuf.AbstractMessageLite.Builder.addAll( values, userList_); onChanged(); } else { userListBuilder_.addAllMessages(values); } return this; } /** * <code>repeated .io.protostuff.runtime.model.User userList = 10;</code> */ public Builder clearUserList() { if (userListBuilder_ == null) { userList_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000100); onChanged(); } else { userListBuilder_.clear(); } return this; } /** * <code>repeated .io.protostuff.runtime.model.User userList = 10;</code> */ public Builder removeUserList(int index) { if (userListBuilder_ == null) { ensureUserListIsMutable(); userList_.remove(index); onChanged(); } else { userListBuilder_.remove(index); } return this; } /** * <code>repeated .io.protostuff.runtime.model.User userList = 10;</code> */ public io.protostuff.runtime.model.ModelProtobuf.User.Builder getUserListBuilder( int index) { return getUserListFieldBuilder().getBuilder(index); } /** * <code>repeated .io.protostuff.runtime.model.User userList = 10;</code> */ public io.protostuff.runtime.model.ModelProtobuf.UserOrBuilder getUserListOrBuilder( int index) { if (userListBuilder_ == null) { return userList_.get(index); } else { return userListBuilder_.getMessageOrBuilder(index); } } /** * <code>repeated .io.protostuff.runtime.model.User userList = 10;</code> */ public java.util.List<? extends io.protostuff.runtime.model.ModelProtobuf.UserOrBuilder> getUserListOrBuilderList() { if (userListBuilder_ != null) { return userListBuilder_.getMessageOrBuilderList(); } else { return java.util.Collections.unmodifiableList(userList_); } } /** * <code>repeated .io.protostuff.runtime.model.User userList = 10;</code> */ public io.protostuff.runtime.model.ModelProtobuf.User.Builder addUserListBuilder() { return getUserListFieldBuilder().addBuilder( io.protostuff.runtime.model.ModelProtobuf.User.getDefaultInstance()); } /** * <code>repeated .io.protostuff.runtime.model.User userList = 10;</code> */ public io.protostuff.runtime.model.ModelProtobuf.User.Builder addUserListBuilder( int index) { return getUserListFieldBuilder().addBuilder( index, io.protostuff.runtime.model.ModelProtobuf.User.getDefaultInstance()); } /** * <code>repeated .io.protostuff.runtime.model.User userList = 10;</code> */ public java.util.List<io.protostuff.runtime.model.ModelProtobuf.User.Builder> getUserListBuilderList() { return getUserListFieldBuilder().getBuilderList(); } private com.google.protobuf.RepeatedFieldBuilderV3<io.protostuff.runtime.model.ModelProtobuf.User, io.protostuff.runtime.model.ModelProtobuf.User.Builder, io.protostuff.runtime.model.ModelProtobuf.UserOrBuilder> getUserListFieldBuilder() { if (userListBuilder_ == null) { userListBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3<io.protostuff.runtime.model.ModelProtobuf.User, io.protostuff.runtime.model.ModelProtobuf.User.Builder, io.protostuff.runtime.model.ModelProtobuf.UserOrBuilder>( userList_, ((bitField0_ & 0x00000100) == 0x00000100), getParentForChildren(), isClean()); userList_ = null; } return userListBuilder_; } public final Builder setUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return this; } public final Builder mergeUnknownFields( final com.google.protobuf.UnknownFieldSet unknownFields) { return this; } // @@protoc_insertion_point(builder_scope:io.protostuff.runtime.model.RequestHeader) } // @@protoc_insertion_point(class_scope:io.protostuff.runtime.model.RequestHeader) private static final io.protostuff.runtime.model.ModelProtobuf.RequestHeader DEFAULT_INSTANCE; static { DEFAULT_INSTANCE = new io.protostuff.runtime.model.ModelProtobuf.RequestHeader(); } public static io.protostuff.runtime.model.ModelProtobuf.RequestHeader getDefaultInstance() { return DEFAULT_INSTANCE; } private static final com.google.protobuf.Parser<RequestHeader> PARSER = new com.google.protobuf.AbstractParser<RequestHeader>() { public RequestHeader parsePartialFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { return new RequestHeader(input, extensionRegistry); } }; public static com.google.protobuf.Parser<RequestHeader> parser() { return PARSER; } @java.lang.Override public com.google.protobuf.Parser<RequestHeader> getParserForType() { return PARSER; } public io.protostuff.runtime.model.ModelProtobuf.RequestHeader getDefaultInstanceForType() { return DEFAULT_INSTANCE; } } private static final com.google.protobuf.Descriptors.Descriptor internal_static_io_protostuff_runtime_model_User_descriptor; private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_io_protostuff_runtime_model_User_fieldAccessorTable; private static final com.google.protobuf.Descriptors.Descriptor internal_static_io_protostuff_runtime_model_RequestHeader_descriptor; private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_io_protostuff_runtime_model_RequestHeader_fieldAccessorTable; private static final com.google.protobuf.Descriptors.Descriptor internal_static_io_protostuff_runtime_model_RequestHeader_CseContextEntry_descriptor; @SuppressWarnings("unused") private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_io_protostuff_runtime_model_RequestHeader_CseContextEntry_fieldAccessorTable; private static final com.google.protobuf.Descriptors.Descriptor internal_static_io_protostuff_runtime_model_RequestHeader_UserMapEntry_descriptor; @SuppressWarnings("unused") private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_io_protostuff_runtime_model_RequestHeader_UserMapEntry_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { return descriptor; } private static com.google.protobuf.Descriptors.FileDescriptor descriptor; static { java.lang.String[] descriptorData = { "\n\023ModelProtobuf.proto\022\033io.protostuff.run" + "time.model\"\024\n\004User\022\014\n\004name\030\001 \001(\t\"\325\003\n\rReq" + "uestHeader\022\030\n\020destMicroservice\030\001 \001(\t\022\017\n\007" + "msgType\030\002 \001(\005\022\r\n\005flags\030\003 \001(\005\022\020\n\010schemaId" + "\030\005 \001(\t\022\025\n\roperationName\030\006 \001(\t\022N\n\ncseCont" + "ext\030\007 \003(\0132:.io.protostuff.runtime.model." + "RequestHeader.CseContextEntry\022H\n\007userMap" + "\030\010 \003(\01327.io.protostuff.runtime.model.Req" + "uestHeader.UserMapEntry\022\014\n\004list\030\t \003(\t\0223\n" + "\010userList\030\n \003(\0132!.io.protostuff.runtime.", "model.User\0321\n\017CseContextEntry\022\013\n\003key\030\001 \001" + "(\t\022\r\n\005value\030\002 \001(\t:\0028\001\032Q\n\014UserMapEntry\022\013\n" + "\003key\030\001 \001(\t\0220\n\005value\030\002 \001(\0132!.io.protostuf" + "f.runtime.model.User:\0028\001b\006proto3" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { public com.google.protobuf.ExtensionRegistry assignDescriptors( com.google.protobuf.Descriptors.FileDescriptor root) { descriptor = root; return null; } }; com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] { }, assigner); internal_static_io_protostuff_runtime_model_User_descriptor = getDescriptor().getMessageTypes().get(0); internal_static_io_protostuff_runtime_model_User_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_io_protostuff_runtime_model_User_descriptor, new java.lang.String[] {"Name",}); internal_static_io_protostuff_runtime_model_RequestHeader_descriptor = getDescriptor().getMessageTypes().get(1); internal_static_io_protostuff_runtime_model_RequestHeader_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_io_protostuff_runtime_model_RequestHeader_descriptor, new java.lang.String[] {"DestMicroservice", "MsgType", "Flags", "SchemaId", "OperationName", "CseContext", "UserMap", "List", "UserList",}); internal_static_io_protostuff_runtime_model_RequestHeader_CseContextEntry_descriptor = internal_static_io_protostuff_runtime_model_RequestHeader_descriptor.getNestedTypes().get(0); internal_static_io_protostuff_runtime_model_RequestHeader_CseContextEntry_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_io_protostuff_runtime_model_RequestHeader_CseContextEntry_descriptor, new java.lang.String[] {"Key", "Value",}); internal_static_io_protostuff_runtime_model_RequestHeader_UserMapEntry_descriptor = internal_static_io_protostuff_runtime_model_RequestHeader_descriptor.getNestedTypes().get(1); internal_static_io_protostuff_runtime_model_RequestHeader_UserMapEntry_fieldAccessorTable = new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_io_protostuff_runtime_model_RequestHeader_UserMapEntry_descriptor, new java.lang.String[] {"Key", "Value",}); } // @@protoc_insertion_point(outer_class_scope) }
1
7,907
this file is model generated by protobuf, just for compatible test it's better not change it.
apache-servicecomb-java-chassis
java
@@ -8,8 +8,8 @@ class Analytics @user = user end - def track_cancelled - track(event: "Cancelled", properties: {}) + def track_cancelled(reason) + track(event: "Cancelled", properties: { reason: reason }) end def track_updated
1
class Analytics include AnalyticsHelper class_attribute :backend self.backend = AnalyticsRuby def initialize(user) @user = user end def track_cancelled track(event: "Cancelled", properties: {}) end def track_updated backend.identify(user_id: user.id, traits: identify_hash(user)) end def track_forum_access track(event: "Logged into Forum", properties: {}) end private attr_reader :user def track(event:, properties:) backend.track( event: event, user_id: user.id, properties: properties ) end end
1
13,590
It turns out we can send the reason as the property to Segment on the event. Then we can do whatever we want with it. In this case, I think we'll fire a web hook to Zapier which will add a note or message on the user in Intercom.
thoughtbot-upcase
rb
@@ -0,0 +1,17 @@ +class PromotedCatalog + def initialize(catalog) + @catalog = catalog + end + + def method_missing(message, *arguments) + catalog.send(message, *arguments).promoted + end + + def respond_to_missing?(message, include_all = false) + catalog.send(:respond_to?, message, include_all) + end + + private + + attr_reader :catalog +end
1
1
9,446
This class is very similar to `Catalog`. Is there a way to have this class compose a `Catalog` instance or extract a common class which they can both compose?
thoughtbot-upcase
rb
@@ -164,9 +164,7 @@ TEST("pull as device usm from host-allocated homogen table") { row_accessor<const float>{ data } // .pull(q, { 1, 3 }, sycl::usm::alloc::device); - const auto data_arr_host = - la::matrix<float>::wrap(q, data_arr_device.get_data(), { row_count, column_count }) - .to_host(); + const auto data_arr_host = la::matrix<float>::wrap(data_arr_device).to_host(); const float* data_arr_host_ptr = data_arr_host.get_data(); REQUIRE(data_arr_host_ptr[0] == 3.0f);
1
/******************************************************************************* * Copyright 2020-2021 Intel Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *******************************************************************************/ #include "oneapi/dal/table/column_accessor.hpp" #include "oneapi/dal/table/homogen.hpp" #include "oneapi/dal/table/row_accessor.hpp" #include "oneapi/dal/test/engine/common.hpp" #include "oneapi/dal/test/engine/linalg.hpp" namespace oneapi::dal { namespace te = dal::test::engine; namespace la = te::linalg; TEST("can read table data via row accessor") { using oneapi::dal::detail::empty_delete; double data[] = { 1.0, 2.0, 3.0, -1.0, -2.0, -3.0 }; homogen_table t{ data, 2, 3, empty_delete<const double>() }; const auto rows_block = row_accessor<const double>(t).pull({ 0, -1 }); REQUIRE(t.get_row_count() * t.get_column_count() == rows_block.get_count()); REQUIRE(data == rows_block.get_data()); for (std::int64_t i = 0; i < rows_block.get_count(); i++) { REQUIRE(rows_block[i] == data[i]); } } TEST("can read table data via row accessor with conversion") { using oneapi::dal::detail::empty_delete; float data[] = { 1.0f, 2.0f, 3.0f, -1.0f, -2.0f, -3.0f }; homogen_table t{ data, 2, 3, empty_delete<const float>() }; auto rows_block = row_accessor<const double>(t).pull({ 0, -1 }); REQUIRE(t.get_row_count() * t.get_column_count() == rows_block.get_count()); REQUIRE((void*)data != (void*)rows_block.get_data()); for (std::int64_t i = 0; i < rows_block.get_count(); i++) { REQUIRE(rows_block[i] == Approx(static_cast<double>(data[i]))); } } TEST("can_read table data via row accessor and array outside") { using oneapi::dal::detail::empty_delete; float data[] = { 1.0f, 2.0f, 3.0f, -1.0f, -2.0f, -3.0f }; homogen_table t{ data, 2, 3, empty_delete<const float>() }; auto arr = array<float>::empty(10); auto rows_ptr = row_accessor<const float>(t).pull(arr, { 0, -1 }); REQUIRE(t.get_row_count() * t.get_column_count() == arr.get_count()); REQUIRE(data == rows_ptr); REQUIRE(data == arr.get_data()); auto data_ptr = arr.get_data(); for (std::int64_t i = 0; i < arr.get_count(); i++) { REQUIRE(rows_ptr[i] == data[i]); REQUIRE(data_ptr[i] == data[i]); } } TEST("can read rows from column major table") { float data[] = { 1.0f, 2.0f, 3.0f, -1.0f, -2.0f, -3.0f }; auto t = homogen_table::wrap(data, 3, 2, data_layout::column_major); auto rows_data = row_accessor<const float>(t).pull({ 1, -1 }); REQUIRE(rows_data.get_count() == 2 * t.get_column_count()); REQUIRE(rows_data[0] == Approx(2.0f)); REQUIRE(rows_data[1] == Approx(-2.0f)); REQUIRE(rows_data[2] == Approx(3.0f)); REQUIRE(rows_data[3] == Approx(-3.0f)); } TEST("can read rows from column major table with conversion") { float data[] = { 1.0f, 2.0f, 3.0f, -1.0f, -2.0f, -3.0f }; auto t = homogen_table::wrap(data, 3, 2, data_layout::column_major); auto rows_data = row_accessor<const std::int32_t>(t).pull({ 1, 2 }); REQUIRE(rows_data.get_count() == 1 * t.get_column_count()); REQUIRE(rows_data[0] == 2); REQUIRE(rows_data[1] == -2); } TEST("pull throws exception if invalid range") { detail::homogen_table_builder b; b.reset(array<float>::zeros(3 * 2), 3, 2); row_accessor<float> acc{ b }; REQUIRE_THROWS_AS(acc.pull({ 1, 4 }), dal::range_error); } TEST("push throws exception if invalid range") { detail::homogen_table_builder b; b.reset(array<float>::zeros(3 * 2), 3, 2); row_accessor<float> acc{ b }; auto rows_data = acc.pull({ 1, 2 }); REQUIRE_THROWS_AS(acc.push(rows_data, { 0, 2 }), dal::range_error); REQUIRE_THROWS_AS(acc.push(rows_data, { 3, 4 }), dal::range_error); } #ifdef ONEDAL_DATA_PARALLEL TEST("pull with queue throws exception if invalid range") { DECLARE_TEST_POLICY(policy); auto& q = policy.get_queue(); detail::homogen_table_builder b; b.reset(array<float>::zeros(q, 3 * 2), 3, 2); row_accessor<float> acc{ b }; REQUIRE_THROWS_AS(acc.pull(q, { 1, 4 }), dal::range_error); } TEST("push with queue throws exception if invalid range") { DECLARE_TEST_POLICY(policy); auto& q = policy.get_queue(); detail::homogen_table_builder b; b.reset(array<float>::zeros(q, 3 * 2), 3, 2); row_accessor<float> acc{ b }; auto rows_data = acc.pull(q, { 1, 2 }); REQUIRE_THROWS_AS(acc.push(q, rows_data, { 0, 2 }), dal::range_error); REQUIRE_THROWS_AS(acc.push(q, rows_data, { 3, 4 }), dal::range_error); } TEST("pull as device usm from host-allocated homogen table") { DECLARE_TEST_POLICY(policy); auto& q = policy.get_queue(); const float data_ptr[] = { 1.0f, 2.0f, // 3.0f, -1.0f, // -2.0f, -3.0f }; const std::int64_t row_count = 3; const std::int64_t column_count = 2; const auto data = homogen_table::wrap(data_ptr, row_count, column_count); const auto data_arr_device = // row_accessor<const float>{ data } // .pull(q, { 1, 3 }, sycl::usm::alloc::device); const auto data_arr_host = la::matrix<float>::wrap(q, data_arr_device.get_data(), { row_count, column_count }) .to_host(); const float* data_arr_host_ptr = data_arr_host.get_data(); REQUIRE(data_arr_host_ptr[0] == 3.0f); REQUIRE(data_arr_host_ptr[1] == -1.0f); REQUIRE(data_arr_host_ptr[2] == -2.0f); REQUIRE(data_arr_host_ptr[3] == -3.0f); } TEST("pull as host from device-allocated homogen table") { DECLARE_TEST_POLICY(policy); auto& q = policy.get_queue(); constexpr std::int64_t row_count = 4; constexpr std::int64_t column_count = 3; const auto ary = la::matrix<float>::full( q, { row_count, column_count }, [](std::int64_t i) { return float(i); }, sycl::usm::alloc::device); const auto shared_table = homogen_table::wrap(q, ary.get_data(), row_count, column_count); const auto data_arr_host = // row_accessor<const float>{ shared_table }.pull({ 1, 3 }); const float* data_arr_host_ptr = data_arr_host.get_data(); REQUIRE(data_arr_host_ptr[0] == 3.0f); REQUIRE(data_arr_host_ptr[1] == 4.0f); REQUIRE(data_arr_host_ptr[2] == 5.0f); REQUIRE(data_arr_host_ptr[3] == 6.0f); } TEST("pull does not copy if alloc kind matches") { DECLARE_TEST_POLICY(policy); auto& q = policy.get_queue(); const auto alloc_kind = GENERATE(sycl::usm::alloc::device, // sycl::usm::alloc::host, sycl::usm::alloc::shared); constexpr std::int64_t row_count = 4; constexpr std::int64_t column_count = 3; const auto ary = la::matrix<float>::full( q, { row_count, column_count }, [](std::int64_t i) { return float(i); }, alloc_kind); const auto table = homogen_table::wrap(q, ary.get_data(), row_count, column_count); const auto data_arr_device = // row_accessor<const float>{ table } // .pull(q, { 0, 2 }, alloc_kind); REQUIRE(data_arr_device.get_data() == ary.get_data()); } TEST("pull does not copy if device usm requested from shared usm table") { DECLARE_TEST_POLICY(policy); auto& q = policy.get_queue(); constexpr std::int64_t row_count = 4; constexpr std::int64_t column_count = 3; const auto shared_ary = la::matrix<float>::full( q, { row_count, column_count }, [](std::int64_t i) { return float(i); }, sycl::usm::alloc::shared); const auto shared_table = homogen_table::wrap(q, shared_ary.get_data(), row_count, column_count); const auto data_arr_device = // row_accessor<const float>{ shared_table } // .pull(q, { 0, 2 }, sycl::usm::alloc::device); REQUIRE(data_arr_device.get_data() == shared_ary.get_data()); REQUIRE(sycl::get_pointer_type(data_arr_device.get_data(), q.get_context()) == sycl::usm::alloc::shared); } TEST("pull from column-major shared usm homogen table") { DECLARE_TEST_POLICY(policy); auto& q = policy.get_queue(); constexpr std::int64_t row_count = 4; constexpr std::int64_t column_count = 3; constexpr std::int64_t data_size = row_count * column_count; auto data = sycl::malloc_shared<float>(data_size, q); q.submit([&](sycl::handler& cgh) { cgh.parallel_for(sycl::range<1>(data_size), [=](sycl::id<1> idx) { data[idx[0]] = idx[0]; }); }).wait(); auto t = homogen_table::wrap(q, data, row_count, column_count, {}, data_layout::column_major); row_accessor<const float> acc{ t }; auto block = acc.pull(q, { 1, 3 }); REQUIRE(block.get_count() == 2 * column_count); REQUIRE(block[0] == Approx(1)); REQUIRE(block[1] == Approx(5)); REQUIRE(block[2] == Approx(9)); REQUIRE(block[3] == Approx(2)); REQUIRE(block[4] == Approx(6)); REQUIRE(block[5] == Approx(10)); sycl::free(data, q); } #endif } // namespace oneapi::dal
1
28,722
Why did you decide to drop the `queue` from parameters of `wrap` function? The agreement was that all functions for DPC++ case that accept non-host-accessible memory shall accept the queue directly.
oneapi-src-oneDAL
cpp
@@ -340,6 +340,11 @@ module RSpec # @return [Symbol] add_setting :detail_color + # @macro add_setting + # Hyperlink failed examples (default: `false`). + # @return [Boolean] + add_setting :hyperlink + # @macro add_setting # Don't print filter info i.e. "Run options: include {:focus=>true}" # (default `false`).
1
RSpec::Support.require_rspec_core "backtrace_formatter" RSpec::Support.require_rspec_core "ruby_project" RSpec::Support.require_rspec_core "formatters/deprecation_formatter" RSpec::Support.require_rspec_core "output_wrapper" module RSpec module Core # rubocop:disable Metrics/ClassLength # Stores runtime configuration information. # # Configuration options are loaded from multiple files and joined together # with command-line switches and the `SPEC_OPTS` environment variable. # # Precedence order (where later entries overwrite earlier entries on # conflicts): # # * Global (`$XDG_CONFIG_HOME/rspec/options`, or `~/.rspec` if it does # not exist) # * Project-specific (`./.rspec`) # * Local (`./.rspec-local`) # * Command-line options # * `SPEC_OPTS` # # For example, an option set in the local file will override an option set # in your global file. # # The global, project-specific and local files can all be overridden with a # separate custom file using the --options command-line parameter. # # @example Standard settings # RSpec.configure do |c| # c.drb = true # c.drb_port = 1234 # c.default_path = 'behavior' # end # # @example Hooks # RSpec.configure do |c| # c.before(:suite) { establish_connection } # c.before(:example) { log_in_as :authorized } # c.around(:example) { |ex| Database.transaction(&ex) } # end # # @see RSpec.configure # @see Hooks class Configuration include RSpec::Core::Hooks # Module that holds `attr_reader` declarations. It's in a separate # module to allow us to override those methods and use `super`. # @private Readers = Module.new include Readers # @private class MustBeConfiguredBeforeExampleGroupsError < StandardError; end # @private def self.define_reader(name) Readers.class_eval do remove_method name if method_defined?(name) attr_reader name end define_method(name) { value_for(name) { super() } } end # @private def self.define_aliases(name, alias_name) alias_method alias_name, name alias_method "#{alias_name}=", "#{name}=" define_predicate_for alias_name end # @private def self.define_predicate_for(*names) names.each { |name| alias_method "#{name}?", name } end # @private # # Invoked by the `add_setting` instance method. Use that method on a # `Configuration` instance rather than this class method. def self.add_setting(name, opts={}) raise "Use the instance add_setting method if you want to set a default" if opts.key?(:default) attr_writer name add_read_only_setting name Array(opts[:alias_with]).each do |alias_name| define_aliases(name, alias_name) end end # @private # # As `add_setting` but only add the reader. def self.add_read_only_setting(name, opts={}) raise "Use the instance add_setting method if you want to set a default" if opts.key?(:default) define_reader name define_predicate_for name end # @macro [attach] add_setting # @!attribute [rw] $1 # # @macro [attach] define_reader # @!attribute [r] $1 # @macro add_setting # Path to use if no path is provided to the `rspec` command (default: # `"spec"`). Allows you to just type `rspec` instead of `rspec spec` to # run all the examples in the `spec` directory. # # @note Other scripts invoking `rspec` indirectly will ignore this # setting. # @return [String] add_read_only_setting :default_path def default_path=(path) project_source_dirs << path @default_path = path end # @macro add_setting # Run examples over DRb (default: `false`). RSpec doesn't supply the DRb # server, but you can use tools like spork. # @return [Boolean] add_setting :drb # @macro add_setting # The drb_port (default: nil). add_setting :drb_port # @macro add_setting # Default: `$stderr`. add_setting :error_stream # Indicates if the DSL has been exposed off of modules and `main`. # Default: true # @return [Boolean] def expose_dsl_globally? Core::DSL.exposed_globally? end # Use this to expose the core RSpec DSL via `Module` and the `main` # object. It will be set automatically but you can override it to # remove the DSL. # Default: true def expose_dsl_globally=(value) if value Core::DSL.expose_globally! Core::SharedExampleGroup::TopLevelDSL.expose_globally! else Core::DSL.remove_globally! Core::SharedExampleGroup::TopLevelDSL.remove_globally! end end # Determines where deprecation warnings are printed. # Defaults to `$stderr`. # @return [IO, String] IO or filename to write to define_reader :deprecation_stream # Determines where deprecation warnings are printed. # @param value [IO, String] IO to write to or filename to write to def deprecation_stream=(value) if @reporter && !value.equal?(@deprecation_stream) warn "RSpec's reporter has already been initialized with " \ "#{deprecation_stream.inspect} as the deprecation stream, so your change to "\ "`deprecation_stream` will be ignored. You should configure it earlier for " \ "it to take effect, or use the `--deprecation-out` CLI option. " \ "(Called from #{CallerFilter.first_non_rspec_line})" else @deprecation_stream = value end end # @macro define_reader # The file path to use for persisting example statuses. Necessary for the # `--only-failures` and `--next-failure` CLI options. # # @overload example_status_persistence_file_path # @return [String] the file path # @overload example_status_persistence_file_path=(value) # @param value [String] the file path define_reader :example_status_persistence_file_path # Sets the file path to use for persisting example statuses. Necessary for the # `--only-failures` and `--next-failure` CLI options. def example_status_persistence_file_path=(value) @example_status_persistence_file_path = value clear_values_derived_from_example_status_persistence_file_path end # @macro define_reader # Indicates if the `--only-failures` (or `--next-failure`) flag is being used. define_reader :only_failures alias_method :only_failures?, :only_failures # @private def only_failures_but_not_configured? only_failures? && !example_status_persistence_file_path end # @macro add_setting # If specified, indicates the number of failures required before cleaning # up and exit (default: `nil`). add_setting :fail_fast # @macro add_setting # Prints the formatter output of your suite without running any # examples or hooks. add_setting :dry_run # @macro add_setting # The exit code to return if there are any failures (default: 1). # @return [Integer] add_setting :failure_exit_code # @macro add_setting # Whether or not to fail when there are no RSpec examples (default: false). # @return [Boolean] add_setting :fail_if_no_examples # @macro define_reader # Indicates files configured to be required. # @return [Array<String>] define_reader :requires # @macro define_reader # Returns dirs that have been prepended to the load path by the `-I` # command line option. # @return [Array<String>] define_reader :libs # @macro add_setting # Determines where RSpec will send its output. # Default: `$stdout`. # @return [IO, String] define_reader :output_stream # Set the output stream for reporter. # @attr value [IO, String] IO to write to or filename to write to, defaults to $stdout def output_stream=(value) if @reporter && !value.equal?(@output_stream) warn "RSpec's reporter has already been initialized with " \ "#{output_stream.inspect} as the output stream, so your change to "\ "`output_stream` will be ignored. You should configure it earlier for " \ "it to take effect. (Called from #{CallerFilter.first_non_rspec_line})" else @output_stream = value output_wrapper.output = @output_stream end end # @macro define_reader # Load files matching this pattern (default: `'**{,/*/**}/*_spec.rb'`). # @return [String] define_reader :pattern # Set pattern to match files to load. # @attr value [String] the filename pattern to filter spec files by def pattern=(value) update_pattern_attr :pattern, value end # @macro define_reader # Exclude files matching this pattern. # @return [String] define_reader :exclude_pattern # Set pattern to match files to exclude. # @attr value [String] the filename pattern to exclude spec files by def exclude_pattern=(value) update_pattern_attr :exclude_pattern, value end # @macro add_setting # Specifies which directories contain the source code for your project. # When a failure occurs, RSpec looks through the backtrace to find a # a line of source to print. It first looks for a line coming from # one of the project source directories so that, for example, it prints # the expectation or assertion call rather than the source code from # the expectation or assertion framework. # @return [Array<String>] add_setting :project_source_dirs # @macro add_setting # Report the times for the slowest examples (default: `false`). # Use this to specify the number of examples to include in the profile. # @return [Boolean] add_setting :profile_examples # @macro add_setting # Run all examples if none match the configured filters # (default: `false`). # @deprecated Use {#filter_run_when_matching} instead for the specific # filters that you want to be ignored if none match. add_setting :run_all_when_everything_filtered # @macro add_setting # Color to use to indicate success. Defaults to `:green` but can be set # to one of the following: `[:black, :white, :red, :green, :yellow, # :blue, :magenta, :cyan]` # @return [Symbol] add_setting :success_color # @macro add_setting # Color to use to print pending examples. Defaults to `:yellow` but can # be set to one of the following: `[:black, :white, :red, :green, # :yellow, :blue, :magenta, :cyan]` # @return [Symbol] add_setting :pending_color # @macro add_setting # Color to use to indicate failure. Defaults to `:red` but can be set to # one of the following: `[:black, :white, :red, :green, :yellow, :blue, # :magenta, :cyan]` # @return [Symbol] add_setting :failure_color # @macro add_setting # The default output color. Defaults to `:white` but can be set to one of # the following: `[:black, :white, :red, :green, :yellow, :blue, # :magenta, :cyan]` # @return [Symbol] add_setting :default_color # @macro add_setting # Color used when a pending example is fixed. Defaults to `:blue` but can # be set to one of the following: `[:black, :white, :red, :green, # :yellow, :blue, :magenta, :cyan]` # @return [Symbol] add_setting :fixed_color # @macro add_setting # Color used to print details. Defaults to `:cyan` but can be set to one # of the following: `[:black, :white, :red, :green, :yellow, :blue, # :magenta, :cyan]` # @return [Symbol] add_setting :detail_color # @macro add_setting # Don't print filter info i.e. "Run options: include {:focus=>true}" # (default `false`). # return [Boolean] add_setting :silence_filter_announcements # @deprecated This config option was added in RSpec 2 to pave the way # for this being the default behavior in RSpec 3. Now this option is # a no-op. def treat_symbols_as_metadata_keys_with_true_values=(_value) RSpec.deprecate( "RSpec::Core::Configuration#treat_symbols_as_metadata_keys_with_true_values=", :message => "RSpec::Core::Configuration#treat_symbols_as_metadata_keys_with_true_values= " \ "is deprecated, it is now set to true as default and " \ "setting it to false has no effect." ) end # @macro define_reader # Configures how RSpec treats metadata passed as part of a shared example # group definition. For example, given this shared example group definition: # # RSpec.shared_context "uses DB", :db => true do # around(:example) do |ex| # MyORM.transaction(:rollback => true, &ex) # end # end # # ...there are two ways RSpec can treat the `:db => true` metadata, each # of which has a corresponding config option: # # 1. `:trigger_inclusion`: this shared context will be implicitly included # in any groups (or examples) that have `:db => true` metadata. # 2. `:apply_to_host_groups`: the metadata will be inherited by the metadata # hash of all host groups and examples. # # `:trigger_inclusion` is the legacy behavior from before RSpec 3.5 but should # be considered deprecated. Instead, you can explicitly include a group with # `include_context`: # # RSpec.describe "My model" do # include_context "uses DB" # end # # ...or you can configure RSpec to include the context based on matching metadata # using an API that mirrors configured module inclusion: # # RSpec.configure do |rspec| # rspec.include_context "uses DB", :db => true # end # # `:apply_to_host_groups` is a new feature of RSpec 3.5 and will be the only # supported behavior in RSpec 4. # # @overload shared_context_metadata_behavior # @return [:trigger_inclusion, :apply_to_host_groups] the configured behavior # @overload shared_context_metadata_behavior=(value) # @param value [:trigger_inclusion, :apply_to_host_groups] sets the configured behavior define_reader :shared_context_metadata_behavior # @see shared_context_metadata_behavior def shared_context_metadata_behavior=(value) case value when :trigger_inclusion, :apply_to_host_groups @shared_context_metadata_behavior = value else raise ArgumentError, "Cannot set `RSpec.configuration." \ "shared_context_metadata_behavior` to `#{value.inspect}`. Only " \ "`:trigger_inclusion` and `:apply_to_host_groups` are valid values." end end # Record the start time of the spec suite to measure load time. # return [Time] add_setting :start_time # @macro add_setting # Use threadsafe options where available. # Currently this will place a mutex around memoized values such as let blocks. # return [Boolean] add_setting :threadsafe # @macro add_setting # Maximum count of failed source lines to display in the failure reports. # (default `10`). # return [Integer] add_setting :max_displayed_failure_line_count # Determines which bisect runner implementation gets used to run subsets # of the suite during a bisection. Your choices are: # # - `:shell`: Performs a spec run by shelling out, booting RSpec and your # application environment each time. This runner is the most widely # compatible runner, but is not as fast. On platforms that do not # support forking, this is the default. # - `:fork`: Pre-boots RSpec and your application environment in a parent # process, and then forks a child process for each spec run. This runner # tends to be significantly faster than the `:shell` runner but cannot # be used in some situations. On platforms that support forking, this # is the default. If you use this runner, you should ensure that all # of your one-time setup logic goes in a `before(:suite)` hook instead # of getting run at the top-level of a file loaded by `--require`. # # @note This option will only be used by `--bisect` if you set it in a file # loaded via `--require`. # # @return [Symbol] attr_reader :bisect_runner def bisect_runner=(value) if @bisect_runner_class && value != @bisect_runner raise "`config.bisect_runner = #{value.inspect}` can no longer take " \ "effect as the #{@bisect_runner.inspect} bisect runnner is already " \ "in use. This config setting must be set in a file loaded by a " \ "`--require` option (passed at the CLI or in a `.rspec` file) for " \ "it to have any effect." end @bisect_runner = value end # @private # @deprecated Use {#color_mode} = :on, instead of {#color} with {#tty} add_setting :tty # @private attr_writer :files_to_run # @private attr_accessor :filter_manager, :world # @private attr_accessor :static_config_filter_manager # @private attr_reader :backtrace_formatter, :ordering_manager, :loaded_spec_files # rubocop:disable Metrics/AbcSize, Metrics/MethodLength # Build an object to store runtime configuration options and set defaults def initialize # rubocop:disable Style/GlobalVars @start_time = $_rspec_core_load_started_at || ::RSpec::Core::Time.now # rubocop:enable Style/GlobalVars @expectation_frameworks = [] @include_modules = FilterableItemRepository::QueryOptimized.new(:any?) @extend_modules = FilterableItemRepository::QueryOptimized.new(:any?) @prepend_modules = FilterableItemRepository::QueryOptimized.new(:any?) @bisect_runner = RSpec::Support::RubyFeatures.fork_supported? ? :fork : :shell @bisect_runner_class = nil @before_suite_hooks = [] @after_suite_hooks = [] @mock_framework = nil @files_or_directories_to_run = [] @loaded_spec_files = Set.new @color = false @color_mode = :automatic @pattern = '**{,/*/**}/*_spec.rb' @exclude_pattern = '' @failure_exit_code = 1 @fail_if_no_examples = false @spec_files_loaded = false @backtrace_formatter = BacktraceFormatter.new @default_path = 'spec' @project_source_dirs = %w[ spec lib app ] @deprecation_stream = $stderr @output_stream = $stdout @reporter = nil @reporter_buffer = nil @filter_manager = FilterManager.new @static_config_filter_manager = FilterManager.new @ordering_manager = Ordering::ConfigurationManager.new @preferred_options = {} @failure_color = :red @success_color = :green @pending_color = :yellow @default_color = :white @fixed_color = :blue @detail_color = :cyan @profile_examples = false @requires = [] @libs = [] @derived_metadata_blocks = FilterableItemRepository::QueryOptimized.new(:any?) @threadsafe = true @max_displayed_failure_line_count = 10 @world = World::Null @shared_context_metadata_behavior = :trigger_inclusion define_built_in_hooks end # rubocop:enable Metrics/MethodLength, Metrics/AbcSize # @private # # Used to set higher priority option values from the command line. def force(hash) ordering_manager.force(hash) @preferred_options.merge!(hash) return unless hash.key?(:example_status_persistence_file_path) clear_values_derived_from_example_status_persistence_file_path end # @private def reset @spec_files_loaded = false reset_reporter end # @private def reset_reporter @reporter = nil @formatter_loader = nil @output_wrapper = nil end # @private def reset_filters self.filter_manager = FilterManager.new filter_manager.include_only( Metadata.deep_hash_dup(static_config_filter_manager.inclusions.rules) ) filter_manager.exclude_only( Metadata.deep_hash_dup(static_config_filter_manager.exclusions.rules) ) end # @overload add_setting(name) # @overload add_setting(name, opts) # @option opts [Symbol] :default # # Set a default value for the generated getter and predicate methods: # # add_setting(:foo, :default => "default value") # # @option opts [Symbol] :alias_with # # Use `:alias_with` to alias the setter, getter, and predicate to # another name, or names: # # add_setting(:foo, :alias_with => :bar) # add_setting(:foo, :alias_with => [:bar, :baz]) # # Adds a custom setting to the RSpec.configuration object. # # RSpec.configuration.add_setting :foo # # Used internally and by extension frameworks like rspec-rails, so they # can add config settings that are domain specific. For example: # # RSpec.configure do |c| # c.add_setting :use_transactional_fixtures, # :default => true, # :alias_with => :use_transactional_examples # end # # `add_setting` creates three methods on the configuration object, a # setter, a getter, and a predicate: # # RSpec.configuration.foo=(value) # RSpec.configuration.foo # RSpec.configuration.foo? # Returns true if foo returns anything but nil or false. def add_setting(name, opts={}) default = opts.delete(:default) (class << self; self; end).class_exec do add_setting(name, opts) end __send__("#{name}=", default) if default end # Returns the configured mock framework adapter module. # @return [Symbol] def mock_framework if @mock_framework.nil? begin mock_with :rspec rescue LoadError mock_with :nothing end end @mock_framework end # Delegates to mock_framework=(framework). def mock_framework=(framework) mock_with framework end # Regexps used to exclude lines from backtraces. # # Excludes lines from ruby (and jruby) source, installed gems, anything # in any "bin" directory, and any of the RSpec libs (outside gem # installs) by default. # # You can modify the list via the getter, or replace it with the setter. # # To override this behaviour and display a full backtrace, use # `--backtrace` on the command line, in a `.rspec` file, or in the # `rspec_options` attribute of RSpec's rake task. # @return [Array<Regexp>] def backtrace_exclusion_patterns @backtrace_formatter.exclusion_patterns end # Set regular expressions used to exclude lines in backtrace. # @param patterns [Array<Regexp>] set backtrace_formatter exlusion_patterns def backtrace_exclusion_patterns=(patterns) @backtrace_formatter.exclusion_patterns = patterns end # Regexps used to include lines in backtraces. # # Defaults to [Regexp.new Dir.getwd]. # # Lines that match an exclusion _and_ an inclusion pattern # will be included. # # You can modify the list via the getter, or replace it with the setter. # @return [Array<Regexp>] def backtrace_inclusion_patterns @backtrace_formatter.inclusion_patterns end # Set regular expressions used to include lines in backtrace. # @attr patterns [Array<Regexp>] set backtrace_formatter inclusion_patterns def backtrace_inclusion_patterns=(patterns) @backtrace_formatter.inclusion_patterns = patterns end # Adds {#backtrace_exclusion_patterns} that will filter lines from # the named gems from backtraces. # # @param gem_names [Array<String>] Names of the gems to filter # # @example # RSpec.configure do |config| # config.filter_gems_from_backtrace "rack", "rake" # end # # @note The patterns this adds will match the named gems in their common # locations (e.g. system gems, vendored with bundler, installed as a # :git dependency with bundler, etc) but is not guaranteed to work for # all possible gem locations. For example, if you have the gem source # in a directory with a completely unrelated name, and use bundler's # :path option, this will not filter it. def filter_gems_from_backtrace(*gem_names) gem_names.each do |name| @backtrace_formatter.filter_gem(name) end end # @private MOCKING_ADAPTERS = { :rspec => :RSpec, :flexmock => :Flexmock, :rr => :RR, :mocha => :Mocha, :nothing => :Null } # Sets the mock framework adapter module. # # `framework` can be a Symbol or a Module. # # Given any of `:rspec`, `:mocha`, `:flexmock`, or `:rr`, configures the # named framework. # # Given `:nothing`, configures no framework. Use this if you don't use # any mocking framework to save a little bit of overhead. # # Given a Module, includes that module in every example group. The module # should adhere to RSpec's mock framework adapter API: # # setup_mocks_for_rspec # - called before each example # # verify_mocks_for_rspec # - called after each example if the example hasn't yet failed. # Framework should raise an exception when expectations fail # # teardown_mocks_for_rspec # - called after verify_mocks_for_rspec (even if there are errors) # # If the module responds to `configuration` and `mock_with` receives a # block, it will yield the configuration object to the block e.g. # # config.mock_with OtherMockFrameworkAdapter do |mod_config| # mod_config.custom_setting = true # end def mock_with(framework) framework_module = if framework.is_a?(Module) framework else const_name = MOCKING_ADAPTERS.fetch(framework) do raise ArgumentError, "Unknown mocking framework: #{framework.inspect}. " \ "Pass a module or one of #{MOCKING_ADAPTERS.keys.inspect}" end RSpec::Support.require_rspec_core "mocking_adapters/#{const_name.to_s.downcase}" RSpec::Core::MockingAdapters.const_get(const_name) end new_name, old_name = [framework_module, @mock_framework].map do |mod| mod.respond_to?(:framework_name) ? mod.framework_name : :unnamed end unless new_name == old_name assert_no_example_groups_defined(:mock_framework) end if block_given? raise "#{framework_module} must respond to `configuration` so that " \ "mock_with can yield it." unless framework_module.respond_to?(:configuration) yield framework_module.configuration end @mock_framework = framework_module end # Returns the configured expectation framework adapter module(s) def expectation_frameworks if @expectation_frameworks.empty? begin expect_with :rspec rescue LoadError expect_with Module.new end end @expectation_frameworks end # Delegates to expect_with(framework). def expectation_framework=(framework) expect_with(framework) end # Sets the expectation framework module(s) to be included in each example # group. # # `frameworks` can be `:rspec`, `:test_unit`, `:minitest`, a custom # module, or any combination thereof: # # config.expect_with :rspec # config.expect_with :test_unit # config.expect_with :minitest # config.expect_with :rspec, :minitest # config.expect_with OtherExpectationFramework # # RSpec will translate `:rspec`, `:minitest`, and `:test_unit` into the # appropriate modules. # # ## Configuration # # If the module responds to `configuration`, `expect_with` will # yield the `configuration` object if given a block: # # config.expect_with OtherExpectationFramework do |custom_config| # custom_config.custom_setting = true # end def expect_with(*frameworks) modules = frameworks.map do |framework| case framework when Module framework when :rspec require 'rspec/expectations' # Tag this exception class so our exception formatting logic knows # that it satisfies the `MultipleExceptionError` interface. ::RSpec::Expectations::MultipleExpectationsNotMetError.__send__( :include, MultipleExceptionError::InterfaceTag ) ::RSpec::Matchers when :test_unit require 'rspec/core/test_unit_assertions_adapter' ::RSpec::Core::TestUnitAssertionsAdapter when :minitest require 'rspec/core/minitest_assertions_adapter' ::RSpec::Core::MinitestAssertionsAdapter else raise ArgumentError, "#{framework.inspect} is not supported" end end if (modules - @expectation_frameworks).any? assert_no_example_groups_defined(:expect_with) end if block_given? raise "expect_with only accepts a block with a single argument. " \ "Call expect_with #{modules.length} times, " \ "once with each argument, instead." if modules.length > 1 raise "#{modules.first} must respond to `configuration` so that " \ "expect_with can yield it." unless modules.first.respond_to?(:configuration) yield modules.first.configuration end @expectation_frameworks.push(*modules) end # Check if full backtrace is enabled. # @return [Boolean] is full backtrace enabled def full_backtrace? @backtrace_formatter.full_backtrace? end # Toggle full backtrace. # @attr true_or_false [Boolean] toggle full backtrace display def full_backtrace=(true_or_false) @backtrace_formatter.full_backtrace = true_or_false end # Enables color output if the output is a TTY. As of RSpec 3.6, this is # the default behavior and this option is retained only for backwards # compatibility. # # @deprecated No longer recommended because of complex behavior. Instead, # rely on the fact that TTYs will display color by default, or set # {#color_mode} to :on to display color on a non-TTY output. # @see color_mode # @see color_enabled? # @return [Boolean] def color value_for(:color) { @color } end # The mode for determining whether to display output in color. One of: # # - :automatic - the output will be in color if the output is a TTY (the # default) # - :on - the output will be in color, whether or not the output is a TTY # - :off - the output will not be in color # # @see color_enabled? # @return [Boolean] def color_mode value_for(:color_mode) { @color_mode } end # Check if color is enabled for a particular output. # @param output [IO] an output stream to use, defaults to the current # `output_stream` # @return [Boolean] def color_enabled?(output=output_stream) case color_mode when :on then true when :off then false else # automatic output_to_tty?(output) || (color && tty?) end end # Set the color mode. attr_writer :color_mode # Toggle output color. # # @deprecated No longer recommended because of complex behavior. Instead, # rely on the fact that TTYs will display color by default, or set # {:color_mode} to :on to display color on a non-TTY output. attr_writer :color # @private def libs=(libs) libs.map do |lib| @libs.unshift lib $LOAD_PATH.unshift lib end end # Run examples matching on `description` in all files to run. # @param description [String, Regexp] the pattern to filter on def full_description=(description) filter_run :full_description => Regexp.union(*Array(description).map { |d| Regexp.new(d) }) end # @return [Array] full description filter def full_description filter.fetch :full_description, nil end # @overload add_formatter(formatter) # @overload add_formatter(formatter, output) # # @param formatter [Class, String, Object] formatter to use. Can be any of the # string values supported from the CLI (`p`/`progress`, # `d`/`doc`/`documentation`, `h`/`html`, or `j`/`json`), any # class that implements the formatter protocol and has registered # itself with RSpec as a formatter, or a formatter instance. # @param output [String, IO] where the formatter will write its output. # Can be an IO object or a string path to a file. If not provided, # the configured `output_stream` (`$stdout`, by default) will be used. # # Adds a formatter to the set RSpec will use for this run. # # @see RSpec::Core::Formatters::Protocol def add_formatter(formatter, output=output_wrapper) formatter_loader.add(formatter, output) end alias_method :formatter=, :add_formatter # The formatter that will be used if no formatter has been set. # Defaults to 'progress'. def default_formatter formatter_loader.default_formatter end # Sets a fallback formatter to use if none other has been set. # # @example # # RSpec.configure do |rspec| # rspec.default_formatter = 'doc' # end def default_formatter=(value) formatter_loader.default_formatter = value end # Returns a duplicate of the formatters currently loaded in # the `FormatterLoader` for introspection. # # Note as this is a duplicate, any mutations will be disregarded. # # @return [Array] the formatters currently loaded def formatters formatter_loader.formatters.dup end # @private def formatter_loader @formatter_loader ||= Formatters::Loader.new(Reporter.new(self)) end # @private # # This buffer is used to capture all messages sent to the reporter during # reporter initialization. It can then replay those messages after the # formatter is correctly initialized. Otherwise, deprecation warnings # during formatter initialization can cause an infinite loop. class DeprecationReporterBuffer def initialize @calls = [] end def deprecation(*args) @calls << args end def play_onto(reporter) @calls.each do |args| reporter.deprecation(*args) end end end # @return [RSpec::Core::Reporter] the currently configured reporter def reporter # @reporter_buffer should only ever be set in this method to cover # initialization of @reporter. @reporter_buffer || @reporter ||= begin @reporter_buffer = DeprecationReporterBuffer.new formatter_loader.prepare_default output_wrapper, deprecation_stream @reporter_buffer.play_onto(formatter_loader.reporter) @reporter_buffer = nil formatter_loader.reporter end end # @api private # # Defaults `profile_examples` to 10 examples when `@profile_examples` is # `true`. def profile_examples profile = value_for(:profile_examples) { @profile_examples } if profile && !profile.is_a?(Integer) 10 else profile end end # @private def files_or_directories_to_run=(*files) files = files.flatten if (command == 'rspec' || Runner.running_in_drb?) && default_path && files.empty? files << default_path end @files_or_directories_to_run = files @files_to_run = nil end # The spec files RSpec will run. # @return [Array] specified files about to run def files_to_run @files_to_run ||= get_files_to_run(@files_or_directories_to_run) end # @private def last_run_statuses @last_run_statuses ||= Hash.new(UNKNOWN_STATUS).tap do |statuses| if (path = example_status_persistence_file_path) begin ExampleStatusPersister.load_from(path).inject(statuses) do |hash, example| status = example[:status] status = UNKNOWN_STATUS unless VALID_STATUSES.include?(status) hash[example.fetch(:example_id)] = status hash end rescue SystemCallError => e RSpec.warning "Could not read from #{path.inspect} (configured as " \ "`config.example_status_persistence_file_path`) due " \ "to a system error: #{e.inspect}. Please check that " \ "the config option is set to an accessible, valid " \ "file path", :call_site => nil end end end end # @private UNKNOWN_STATUS = "unknown".freeze # @private FAILED_STATUS = "failed".freeze # @private PASSED_STATUS = "passed".freeze # @private PENDING_STATUS = "pending".freeze # @private VALID_STATUSES = [UNKNOWN_STATUS, FAILED_STATUS, PASSED_STATUS, PENDING_STATUS] # @private def spec_files_with_failures @spec_files_with_failures ||= last_run_statuses.inject(Set.new) do |files, (id, status)| files << Example.parse_id(id).first if status == FAILED_STATUS files end.to_a end # Creates a method that delegates to `example` including the submitted # `args`. Used internally to add variants of `example` like `pending`: # @param name [String] example name alias # @param args [Array<Symbol>, Hash] metadata for the generated example # # @note The specific example alias below (`pending`) is already # defined for you. # @note Use with caution. This extends the language used in your # specs, but does not add any additional documentation. We use this # in RSpec to define methods like `focus` and `xit`, but we also add # docs for those methods. # # @example # RSpec.configure do |config| # config.alias_example_to :pending, :pending => true # end # # # This lets you do this: # # RSpec.describe Thing do # pending "does something" do # thing = Thing.new # end # end # # # ... which is the equivalent of # # RSpec.describe Thing do # it "does something", :pending => true do # thing = Thing.new # end # end def alias_example_to(name, *args) extra_options = Metadata.build_hash_from(args) RSpec::Core::ExampleGroup.define_example_method(name, extra_options) end # Creates a method that defines an example group with the provided # metadata. Can be used to define example group/metadata shortcuts. # # @example # RSpec.configure do |config| # config.alias_example_group_to :describe_model, :type => :model # end # # shared_context_for "model tests", :type => :model do # # define common model test helper methods, `let` declarations, etc # end # # # This lets you do this: # # RSpec.describe_model User do # end # # # ... which is the equivalent of # # RSpec.describe User, :type => :model do # end # # @note The defined aliased will also be added to the top level # (e.g. `main` and from within modules) if # `expose_dsl_globally` is set to true. # @see #alias_example_to # @see #expose_dsl_globally= def alias_example_group_to(new_name, *args) extra_options = Metadata.build_hash_from(args) RSpec::Core::ExampleGroup.define_example_group_method(new_name, extra_options) end # Define an alias for it_should_behave_like that allows different # language (like "it_has_behavior" or "it_behaves_like") to be # employed when including shared examples. # # @example # RSpec.configure do |config| # config.alias_it_behaves_like_to(:it_has_behavior, 'has behavior:') # end # # # allows the user to include a shared example group like: # # RSpec.describe Entity do # it_has_behavior 'sortability' do # let(:sortable) { Entity.new } # end # end # # # which is reported in the output as: # # Entity # # has behavior: sortability # # ...sortability examples here # # @note Use with caution. This extends the language used in your # specs, but does not add any additional documentation. We use this # in RSpec to define `it_should_behave_like` (for backward # compatibility), but we also add docs for that method. def alias_it_behaves_like_to(new_name, report_label='') RSpec::Core::ExampleGroup.define_nested_shared_group_method(new_name, report_label) end alias_method :alias_it_should_behave_like_to, :alias_it_behaves_like_to # Adds key/value pairs to the `inclusion_filter`. If `args` # includes any symbols that are not part of the hash, each symbol # is treated as a key in the hash with the value `true`. # # ### Note # # Filters set using this method can be overridden from the command line # or config files (e.g. `.rspec`). # # @example # # Given this declaration. # describe "something", :foo => 'bar' do # # ... # end # # # Any of the following will include that group. # config.filter_run_including :foo => 'bar' # config.filter_run_including :foo => /^ba/ # config.filter_run_including :foo => lambda {|v| v == 'bar'} # config.filter_run_including :foo => lambda {|v,m| m[:foo] == 'bar'} # # # Given a proc with an arity of 1, the lambda is passed the value # # related to the key, e.g. # config.filter_run_including :foo => lambda {|v| v == 'bar'} # # # Given a proc with an arity of 2, the lambda is passed the value # # related to the key, and the metadata itself e.g. # config.filter_run_including :foo => lambda {|v,m| m[:foo] == 'bar'} # # filter_run_including :foo # same as filter_run_including :foo => true def filter_run_including(*args) meta = Metadata.build_hash_from(args, :warn_about_example_group_filtering) filter_manager.include_with_low_priority meta static_config_filter_manager.include_with_low_priority Metadata.deep_hash_dup(meta) end alias_method :filter_run, :filter_run_including # Applies the provided filter only if any of examples match, in constrast # to {#filter_run}, which always applies even if no examples match, in # which case no examples will be run. This allows you to leave configured # filters in place that are intended only for temporary use. The most common # example is focus filtering: `config.filter_run_when_matching :focus`. # With that configured, you can temporarily focus an example or group # by tagging it with `:focus` metadata, or prefixing it with an `f` # (as in `fdescribe`, `fcontext` and `fit`) since those are aliases for # `describe`/`context`/`it` with `:focus` metadata. def filter_run_when_matching(*args) when_first_matching_example_defined(*args) do filter_run(*args) end end # Clears and reassigns the `inclusion_filter`. Set to `nil` if you don't # want any inclusion filter at all. # # ### Warning # # This overrides any inclusion filters/tags set on the command line or in # configuration files. def inclusion_filter=(filter) meta = Metadata.build_hash_from([filter], :warn_about_example_group_filtering) filter_manager.include_only meta end alias_method :filter=, :inclusion_filter= # Returns the `inclusion_filter`. If none has been set, returns an empty # hash. def inclusion_filter filter_manager.inclusions end alias_method :filter, :inclusion_filter # Adds key/value pairs to the `exclusion_filter`. If `args` # includes any symbols that are not part of the hash, each symbol # is treated as a key in the hash with the value `true`. # # ### Note # # Filters set using this method can be overridden from the command line # or config files (e.g. `.rspec`). # # @example # # Given this declaration. # describe "something", :foo => 'bar' do # # ... # end # # # Any of the following will exclude that group. # config.filter_run_excluding :foo => 'bar' # config.filter_run_excluding :foo => /^ba/ # config.filter_run_excluding :foo => lambda {|v| v == 'bar'} # config.filter_run_excluding :foo => lambda {|v,m| m[:foo] == 'bar'} # # # Given a proc with an arity of 1, the lambda is passed the value # # related to the key, e.g. # config.filter_run_excluding :foo => lambda {|v| v == 'bar'} # # # Given a proc with an arity of 2, the lambda is passed the value # # related to the key, and the metadata itself e.g. # config.filter_run_excluding :foo => lambda {|v,m| m[:foo] == 'bar'} # # filter_run_excluding :foo # same as filter_run_excluding :foo => true def filter_run_excluding(*args) meta = Metadata.build_hash_from(args, :warn_about_example_group_filtering) filter_manager.exclude_with_low_priority meta static_config_filter_manager.exclude_with_low_priority Metadata.deep_hash_dup(meta) end # Clears and reassigns the `exclusion_filter`. Set to `nil` if you don't # want any exclusion filter at all. # # ### Warning # # This overrides any exclusion filters/tags set on the command line or in # configuration files. def exclusion_filter=(filter) meta = Metadata.build_hash_from([filter], :warn_about_example_group_filtering) filter_manager.exclude_only meta end # Returns the `exclusion_filter`. If none has been set, returns an empty # hash. def exclusion_filter filter_manager.exclusions end # Tells RSpec to include `mod` in example groups. Methods defined in # `mod` are exposed to examples (not example groups). Use `filters` to # constrain the groups or examples in which to include the module. # # @example # # module AuthenticationHelpers # def login_as(user) # # ... # end # end # # module UserHelpers # def users(username) # # ... # end # end # # RSpec.configure do |config| # config.include(UserHelpers) # included in all groups # config.include(AuthenticationHelpers, :type => :request) # end # # describe "edit profile", :type => :request do # it "can be viewed by owning user" do # login_as users(:jdoe) # get "/profiles/jdoe" # assert_select ".username", :text => 'jdoe' # end # end # # @note Filtered module inclusions can also be applied to # individual examples that have matching metadata. Just like # Ruby's object model is that every object has a singleton class # which has only a single instance, RSpec's model is that every # example has a singleton example group containing just the one # example. # # @see #include_context # @see #extend # @see #prepend def include(mod, *filters) define_mixed_in_module(mod, filters, @include_modules, :include) do |group| safe_include(mod, group) end end # Tells RSpec to include the named shared example group in example groups. # Use `filters` to constrain the groups or examples in which to include # the example group. # # @example # # RSpec.shared_context "example users" do # let(:admin_user) { create_user(:admin) } # let(:guest_user) { create_user(:guest) } # end # # RSpec.configure do |config| # config.include_context "example users", :type => :request # end # # RSpec.describe "The admin page", :type => :request do # it "can be viewed by admins" do # login_with admin_user # get "/admin" # expect(response).to be_ok # end # # it "cannot be viewed by guests" do # login_with guest_user # get "/admin" # expect(response).to be_forbidden # end # end # # @note Filtered context inclusions can also be applied to # individual examples that have matching metadata. Just like # Ruby's object model is that every object has a singleton class # which has only a single instance, RSpec's model is that every # example has a singleton example group containing just the one # example. # # @see #include def include_context(shared_group_name, *filters) shared_module = world.shared_example_group_registry.find([:main], shared_group_name) include shared_module, *filters end # Tells RSpec to extend example groups with `mod`. Methods defined in # `mod` are exposed to example groups (not examples). Use `filters` to # constrain the groups to extend. # # Similar to `include`, but behavior is added to example groups, which # are classes, rather than the examples, which are instances of those # classes. # # @example # # module UiHelpers # def run_in_browser # # ... # end # end # # RSpec.configure do |config| # config.extend(UiHelpers, :type => :request) # end # # describe "edit profile", :type => :request do # run_in_browser # # it "does stuff in the client" do # # ... # end # end # # @see #include # @see #prepend def extend(mod, *filters) define_mixed_in_module(mod, filters, @extend_modules, :extend) do |group| safe_extend(mod, group) end end if RSpec::Support::RubyFeatures.module_prepends_supported? # Tells RSpec to prepend example groups with `mod`. Methods defined in # `mod` are exposed to examples (not example groups). Use `filters` to # constrain the groups in which to prepend the module. # # Similar to `include`, but module is included before the example group's class # in the ancestor chain. # # @example # # module OverrideMod # def override_me # "overridden" # end # end # # RSpec.configure do |config| # config.prepend(OverrideMod, :method => :prepend) # end # # describe "overriding example's class", :method => :prepend do # it "finds the user" do # self.class.class_eval do # def override_me # end # end # override_me # => "overridden" # # ... # end # end # # @see #include # @see #extend def prepend(mod, *filters) define_mixed_in_module(mod, filters, @prepend_modules, :prepend) do |group| safe_prepend(mod, group) end end end # @private # # Used internally to extend a group with modules using `include`, `prepend` and/or # `extend`. def configure_group(group) group.hooks.register_globals(group, hooks) configure_group_with group, @include_modules, :safe_include configure_group_with group, @extend_modules, :safe_extend configure_group_with group, @prepend_modules, :safe_prepend end # @private # # Used internally to extend the singleton class of a single example's # example group instance with modules using `include` and/or `extend`. def configure_example(example, example_hooks) example_hooks.register_global_singleton_context_hooks(example, hooks) singleton_group = example.example_group_instance.singleton_class # We replace the metadata so that SharedExampleGroupModule#included # has access to the example's metadata[:location]. singleton_group.with_replaced_metadata(example.metadata) do modules = @include_modules.items_for(example.metadata) modules.each do |mod| safe_include(mod, example.example_group_instance.singleton_class) end MemoizedHelpers.define_helpers_on(singleton_group) unless modules.empty? end end # @private def requires=(paths) directories = ['lib', default_path].select { |p| File.directory? p } RSpec::Core::RubyProject.add_to_load_path(*directories) paths.each { |path| load_file_handling_errors(:require, path) } @requires += paths end # @private def in_project_source_dir_regex regexes = project_source_dirs.map do |dir| /\A#{Regexp.escape(File.expand_path(dir))}\// end Regexp.union(regexes) end # @private def configure_mock_framework RSpec::Core::ExampleGroup.__send__(:include, mock_framework) conditionally_disable_mocks_monkey_patching end # @private def configure_expectation_framework expectation_frameworks.each do |framework| RSpec::Core::ExampleGroup.__send__(:include, framework) end conditionally_disable_expectations_monkey_patching end # @private def load_spec_files # Note which spec files world is already aware of. # This is generally only needed for when the user runs # `ruby path/to/spec.rb` (and loads `rspec/autorun`) -- # in that case, the spec file was loaded by `ruby` and # isn't loaded by us here so we only know about it because # of an example group being registered in it. world.registered_example_group_files.each do |f| loaded_spec_files << f # the registered files are already expended absolute paths end files_to_run.uniq.each do |f| file = File.expand_path(f) load_file_handling_errors(:load, file) loaded_spec_files << file end @spec_files_loaded = true end # @private DEFAULT_FORMATTER = lambda { |string| string } # Formats the docstring output using the block provided. # # @example # # This will strip the descriptions of both examples and example # # groups. # RSpec.configure do |config| # config.format_docstrings { |s| s.strip } # end def format_docstrings(&block) @format_docstrings_block = block_given? ? block : DEFAULT_FORMATTER end # @private def format_docstrings_block @format_docstrings_block ||= DEFAULT_FORMATTER end # @private def self.delegate_to_ordering_manager(*methods) methods.each do |method| define_method method do |*args, &block| ordering_manager.__send__(method, *args, &block) end end end # @!method seed=(value) # # Sets the seed value and sets the default global ordering to random. delegate_to_ordering_manager :seed= # @!method seed # Seed for random ordering (default: generated randomly each run). # # When you run specs with `--order random`, RSpec generates a random seed # for the randomization and prints it to the `output_stream` (assuming # you're using RSpec's built-in formatters). If you discover an ordering # dependency (i.e. examples fail intermittently depending on order), set # this (on Configuration or on the command line with `--seed`) to run # using the same seed while you debug the issue. # # We recommend, actually, that you use the command line approach so you # don't accidentally leave the seed encoded. delegate_to_ordering_manager :seed # @!method order=(value) # # Sets the default global ordering strategy. By default this can be one # of `:defined`, `:random`, but is customizable through the # `register_ordering` API. If order is set to `'rand:<seed>'`, # the seed will also be set. # # @see #register_ordering delegate_to_ordering_manager :order= # @!method register_ordering(name) # # Registers a named ordering strategy that can later be # used to order an example group's subgroups by adding # `:order => <name>` metadata to the example group. # # @param name [Symbol] The name of the ordering. # @yield Block that will order the given examples or example groups # @yieldparam list [Array<RSpec::Core::Example>, # Array<RSpec::Core::ExampleGroup>] The examples or groups to order # @yieldreturn [Array<RSpec::Core::Example>, # Array<RSpec::Core::ExampleGroup>] The re-ordered examples or groups # # @example # RSpec.configure do |rspec| # rspec.register_ordering :reverse do |list| # list.reverse # end # end # # RSpec.describe 'MyClass', :order => :reverse do # # ... # end # # @note Pass the symbol `:global` to set the ordering strategy that # will be used to order the top-level example groups and any example # groups that do not have declared `:order` metadata. # # @example # RSpec.configure do |rspec| # rspec.register_ordering :global do |examples| # acceptance, other = examples.partition do |example| # example.metadata[:type] == :acceptance # end # other + acceptance # end # end # # RSpec.describe 'MyClass', :type => :acceptance do # # will run last # end # # RSpec.describe 'MyClass' do # # will run first # end # delegate_to_ordering_manager :register_ordering # @private delegate_to_ordering_manager :seed_used?, :ordering_registry # Set Ruby warnings on or off. def warnings=(value) $VERBOSE = !!value end # @return [Boolean] Whether or not ruby warnings are enabled. def warnings? $VERBOSE end # @private RAISE_ERROR_WARNING_NOTIFIER = lambda { |message| raise message } # Turns warnings into errors. This can be useful when # you want RSpec to run in a 'strict' no warning situation. # # @example # # RSpec.configure do |rspec| # rspec.raise_on_warning = true # end def raise_on_warning=(value) if value RSpec::Support.warning_notifier = RAISE_ERROR_WARNING_NOTIFIER else RSpec::Support.warning_notifier = RSpec::Support::DEFAULT_WARNING_NOTIFIER end end # Exposes the current running example via the named # helper method. RSpec 2.x exposed this via `example`, # but in RSpec 3.0, the example is instead exposed via # an arg yielded to `it`, `before`, `let`, etc. However, # some extension gems (such as Capybara) depend on the # RSpec 2.x's `example` method, so this config option # can be used to maintain compatibility. # # @param method_name [Symbol] the name of the helper method # # @example # # RSpec.configure do |rspec| # rspec.expose_current_running_example_as :example # end # # RSpec.describe MyClass do # before do # # `example` can be used here because of the above config. # do_something if example.metadata[:type] == "foo" # end # end def expose_current_running_example_as(method_name) ExposeCurrentExample.module_exec do extend RSpec::SharedContext let(method_name) { |ex| ex } end include ExposeCurrentExample end # @private module ExposeCurrentExample; end # Turns deprecation warnings into errors, in order to surface # the full backtrace of the call site. This can be useful when # you need more context to address a deprecation than the # single-line call site normally provided. # # @example # # RSpec.configure do |rspec| # rspec.raise_errors_for_deprecations! # end def raise_errors_for_deprecations! self.deprecation_stream = Formatters::DeprecationFormatter::RaiseErrorStream.new end # Enables zero monkey patching mode for RSpec. It removes monkey # patching of the top-level DSL methods (`describe`, # `shared_examples_for`, etc) onto `main` and `Module`, instead # requiring you to prefix these methods with `RSpec.`. It enables # expect-only syntax for rspec-mocks and rspec-expectations. It # simply disables monkey patching on whatever pieces of RSpec # the user is using. # # @note It configures rspec-mocks and rspec-expectations only # if the user is using those (either explicitly or implicitly # by not setting `mock_with` or `expect_with` to anything else). # # @note If the user uses this options with `mock_with :mocha` # (or similiar) they will still have monkey patching active # in their test environment from mocha. # # @example # # # It disables all monkey patching. # RSpec.configure do |config| # config.disable_monkey_patching! # end # # # Is an equivalent to # RSpec.configure do |config| # config.expose_dsl_globally = false # # config.mock_with :rspec do |mocks| # mocks.syntax = :expect # mocks.patch_marshal_to_support_partial_doubles = false # end # # config.expect_with :rspec do |expectations| # expectations.syntax = :expect # end # end def disable_monkey_patching! self.expose_dsl_globally = false self.disable_monkey_patching = true conditionally_disable_mocks_monkey_patching conditionally_disable_expectations_monkey_patching end # @private attr_accessor :disable_monkey_patching # Defines a callback that can assign derived metadata values. # # @param filters [Array<Symbol>, Hash] metadata filters that determine # which example or group metadata hashes the callback will be triggered # for. If none are given, the callback will be run against the metadata # hashes of all groups and examples. # @yieldparam metadata [Hash] original metadata hash from an example or # group. Mutate this in your block as needed. # # @example # RSpec.configure do |config| # # Tag all groups and examples in the spec/unit directory with # # :type => :unit # config.define_derived_metadata(:file_path => %r{/spec/unit/}) do |metadata| # metadata[:type] = :unit # end # end def define_derived_metadata(*filters, &block) meta = Metadata.build_hash_from(filters, :warn_about_example_group_filtering) @derived_metadata_blocks.append(block, meta) end # Defines a callback that runs after the first example with matching # metadata is defined. If no examples are defined with matching metadata, # it will not get called at all. # # This can be used to ensure some setup is performed (such as bootstrapping # a DB or loading a specific file that adds significantly to the boot time) # if needed (as indicated by the presence of an example with matching metadata) # but avoided otherwise. # # @example # RSpec.configure do |config| # config.when_first_matching_example_defined(:db) do # # Load a support file that does some heavyweight setup, # # including bootstrapping the DB, but only if we have loaded # # any examples tagged with `:db`. # require 'support/db' # end # end def when_first_matching_example_defined(*filters) specified_meta = Metadata.build_hash_from(filters, :warn_about_example_group_filtering) callback = lambda do |example_or_group_meta| # Example groups do not have `:example_group` metadata # (instead they have `:parent_example_group` metadata). return unless example_or_group_meta.key?(:example_group) # Ensure the callback only fires once. @derived_metadata_blocks.delete(callback, specified_meta) yield end @derived_metadata_blocks.append(callback, specified_meta) end # @private def apply_derived_metadata_to(metadata) @derived_metadata_blocks.items_for(metadata).each do |block| block.call(metadata) end end # Defines a `before` hook. See {Hooks#before} for full docs. # # This method differs from {Hooks#before} in only one way: it supports # the `:suite` scope. Hooks with the `:suite` scope will be run once before # the first example of the entire suite is executed. # # @see #prepend_before # @see #after # @see #append_after def before(scope=nil, *meta, &block) handle_suite_hook(scope, meta) do @before_suite_hooks << Hooks::BeforeHook.new(block, {}) end || begin # defeat Ruby 2.5 lazy proc allocation to ensure # the methods below are passed the same proc instances # so `Hook` equality is preserved. For more info, see: # https://bugs.ruby-lang.org/issues/14045#note-5 block.__id__ add_hook_to_existing_matching_groups(meta, scope) { |g| g.before(scope, *meta, &block) } super(scope, *meta, &block) end end alias_method :append_before, :before # Adds `block` to the start of the list of `before` blocks in the same # scope (`:example`, `:context`, or `:suite`), in contrast to {#before}, # which adds the hook to the end of the list. # # See {Hooks#before} for full `before` hook docs. # # This method differs from {Hooks#prepend_before} in only one way: it supports # the `:suite` scope. Hooks with the `:suite` scope will be run once before # the first example of the entire suite is executed. # # @see #before # @see #after # @see #append_after def prepend_before(scope=nil, *meta, &block) handle_suite_hook(scope, meta) do @before_suite_hooks.unshift Hooks::BeforeHook.new(block, {}) end || begin # defeat Ruby 2.5 lazy proc allocation to ensure # the methods below are passed the same proc instances # so `Hook` equality is preserved. For more info, see: # https://bugs.ruby-lang.org/issues/14045#note-5 block.__id__ add_hook_to_existing_matching_groups(meta, scope) { |g| g.prepend_before(scope, *meta, &block) } super(scope, *meta, &block) end end # Defines a `after` hook. See {Hooks#after} for full docs. # # This method differs from {Hooks#after} in only one way: it supports # the `:suite` scope. Hooks with the `:suite` scope will be run once after # the last example of the entire suite is executed. # # @see #append_after # @see #before # @see #prepend_before def after(scope=nil, *meta, &block) handle_suite_hook(scope, meta) do @after_suite_hooks.unshift Hooks::AfterHook.new(block, {}) end || begin # defeat Ruby 2.5 lazy proc allocation to ensure # the methods below are passed the same proc instances # so `Hook` equality is preserved. For more info, see: # https://bugs.ruby-lang.org/issues/14045#note-5 block.__id__ add_hook_to_existing_matching_groups(meta, scope) { |g| g.after(scope, *meta, &block) } super(scope, *meta, &block) end end alias_method :prepend_after, :after # Adds `block` to the end of the list of `after` blocks in the same # scope (`:example`, `:context`, or `:suite`), in contrast to {#after}, # which adds the hook to the start of the list. # # See {Hooks#after} for full `after` hook docs. # # This method differs from {Hooks#append_after} in only one way: it supports # the `:suite` scope. Hooks with the `:suite` scope will be run once after # the last example of the entire suite is executed. # # @see #append_after # @see #before # @see #prepend_before def append_after(scope=nil, *meta, &block) handle_suite_hook(scope, meta) do @after_suite_hooks << Hooks::AfterHook.new(block, {}) end || begin # defeat Ruby 2.5 lazy proc allocation to ensure # the methods below are passed the same proc instances # so `Hook` equality is preserved. For more info, see: # https://bugs.ruby-lang.org/issues/14045#note-5 block.__id__ add_hook_to_existing_matching_groups(meta, scope) { |g| g.append_after(scope, *meta, &block) } super(scope, *meta, &block) end end # Registers `block` as an `around` hook. # # See {Hooks#around} for full `around` hook docs. def around(scope=nil, *meta, &block) # defeat Ruby 2.5 lazy proc allocation to ensure # the methods below are passed the same proc instances # so `Hook` equality is preserved. For more info, see: # https://bugs.ruby-lang.org/issues/14045#note-5 block.__id__ add_hook_to_existing_matching_groups(meta, scope) { |g| g.around(scope, *meta, &block) } super(scope, *meta, &block) end # @private def with_suite_hooks return yield if dry_run? begin run_suite_hooks("a `before(:suite)` hook", @before_suite_hooks) yield ensure run_suite_hooks("an `after(:suite)` hook", @after_suite_hooks) end end # @private # Holds the various registered hooks. Here we use a FilterableItemRepository # implementation that is specifically optimized for the read/write patterns # of the config object. def hooks @hooks ||= HookCollections.new(self, FilterableItemRepository::QueryOptimized) end # Invokes block before defining an example group def on_example_group_definition(&block) on_example_group_definition_callbacks << block end # @api private # Returns an array of blocks to call before defining an example group def on_example_group_definition_callbacks @on_example_group_definition_callbacks ||= [] end # @private def bisect_runner_class @bisect_runner_class ||= begin case bisect_runner when :fork RSpec::Support.require_rspec_core 'bisect/fork_runner' Bisect::ForkRunner when :shell RSpec::Support.require_rspec_core 'bisect/shell_runner' Bisect::ShellRunner else raise "Unsupported value for `bisect_runner` (#{bisect_runner.inspect}). " \ "Only `:fork` and `:shell` are supported." end end end private def load_file_handling_errors(method, file) __send__(method, file) rescue LoadError => ex relative_file = Metadata.relative_path(file) suggestions = DidYouMean.new(relative_file).call reporter.notify_non_example_exception(ex, "An error occurred while loading #{relative_file}.#{suggestions}") RSpec.world.wants_to_quit = true rescue Support::AllExceptionsExceptOnesWeMustNotRescue => ex relative_file = Metadata.relative_path(file) reporter.notify_non_example_exception(ex, "An error occurred while loading #{relative_file}.") RSpec.world.wants_to_quit = true end def handle_suite_hook(scope, meta) return nil unless scope == :suite unless meta.empty? # TODO: in RSpec 4, consider raising an error here. # We warn only for backwards compatibility. RSpec.warn_with "WARNING: `:suite` hooks do not support metadata since " \ "they apply to the suite as a whole rather than " \ "any individual example or example group that has metadata. " \ "The metadata you have provided (#{meta.inspect}) will be ignored." end yield end def run_suite_hooks(hook_description, hooks) context = SuiteHookContext.new(hook_description, reporter) hooks.each do |hook| begin hook.run(context) rescue Support::AllExceptionsExceptOnesWeMustNotRescue => ex context.set_exception(ex) # Do not run subsequent `before` hooks if one fails. # But for `after` hooks, we run them all so that all # cleanup bits get a chance to complete, minimizing the # chance that resources get left behind. break if hooks.equal?(@before_suite_hooks) end end end def get_files_to_run(paths) files = FlatMap.flat_map(paths_to_check(paths)) do |path| path = path.gsub(File::ALT_SEPARATOR, File::SEPARATOR) if File::ALT_SEPARATOR File.directory?(path) ? gather_directories(path) : extract_location(path) end.uniq return files unless only_failures? relative_files = files.map { |f| Metadata.relative_path(File.expand_path f) } intersection = (relative_files & spec_files_with_failures.to_a) intersection.empty? ? files : intersection end def paths_to_check(paths) return paths if pattern_might_load_specs_from_vendored_dirs? paths + [Dir.getwd] end def pattern_might_load_specs_from_vendored_dirs? pattern.split(File::SEPARATOR).first.include?('**') end def gather_directories(path) include_files = get_matching_files(path, pattern) exclude_files = get_matching_files(path, exclude_pattern) (include_files - exclude_files).uniq end def get_matching_files(path, pattern) raw_files = Dir[file_glob_from(path, pattern)] raw_files.map { |file| File.expand_path(file) }.sort end def file_glob_from(path, pattern) stripped = "{#{pattern.gsub(/\s*,\s*/, ',')}}" return stripped if pattern =~ /^(\.\/)?#{Regexp.escape path}/ || absolute_pattern?(pattern) File.join(path, stripped) end if RSpec::Support::OS.windows? # :nocov: def absolute_pattern?(pattern) pattern =~ /\A[A-Z]:\\/ || windows_absolute_network_path?(pattern) end def windows_absolute_network_path?(pattern) return false unless ::File::ALT_SEPARATOR pattern.start_with?(::File::ALT_SEPARATOR + ::File::ALT_SEPARATOR) end # :nocov: else def absolute_pattern?(pattern) pattern.start_with?(File::Separator) end end def extract_location(path) match = /^(.*?)((?:\:\d+)+)$/.match(path) if match captures = match.captures path = captures[0] lines = captures[1][1..-1].split(":").map(&:to_i) filter_manager.add_location path, lines else path, scoped_ids = Example.parse_id(path) filter_manager.add_ids(path, scoped_ids.split(/\s*,\s*/)) if scoped_ids end return [] if path == default_path File.expand_path(path) end def command $0.split(File::SEPARATOR).last end def value_for(key) @preferred_options.fetch(key) { yield } end def define_built_in_hooks around(:example, :aggregate_failures => true) do |procsy| begin aggregate_failures(nil, :hide_backtrace => true, &procsy) rescue Support::AllExceptionsExceptOnesWeMustNotRescue => exception procsy.example.set_aggregate_failures_exception(exception) end end end def assert_no_example_groups_defined(config_option) return unless world.example_groups.any? raise MustBeConfiguredBeforeExampleGroupsError.new( "RSpec's #{config_option} configuration option must be configured before " \ "any example groups are defined, but you have already defined a group." ) end def output_wrapper @output_wrapper ||= OutputWrapper.new(output_stream) end def output_to_tty?(output=output_stream) output.respond_to?(:tty?) && output.tty? end def conditionally_disable_mocks_monkey_patching return unless disable_monkey_patching && rspec_mocks_loaded? RSpec::Mocks.configuration.tap do |config| config.syntax = :expect config.patch_marshal_to_support_partial_doubles = false end end def conditionally_disable_expectations_monkey_patching return unless disable_monkey_patching && rspec_expectations_loaded? RSpec::Expectations.configuration.syntax = :expect end def rspec_mocks_loaded? defined?(RSpec::Mocks.configuration) end def rspec_expectations_loaded? defined?(RSpec::Expectations.configuration) end def update_pattern_attr(name, value) if @spec_files_loaded RSpec.warning "Configuring `#{name}` to #{value} has no effect since " \ "RSpec has already loaded the spec files." end instance_variable_set(:"@#{name}", value) @files_to_run = nil end def clear_values_derived_from_example_status_persistence_file_path @last_run_statuses = nil @spec_files_with_failures = nil end def configure_group_with(group, module_list, application_method) module_list.items_for(group.metadata).each do |mod| __send__(application_method, mod, group) end end def add_hook_to_existing_matching_groups(meta, scope, &block) # For example hooks, we have to apply it to each of the top level # groups, even if the groups do not match. When we apply it, we # apply it with the metadata, so it will only apply to examples # in the group that match the metadata. # #2280 for background and discussion. if scope == :example || scope == :each || scope.nil? world.example_groups.each(&block) else meta = Metadata.build_hash_from(meta.dup) on_existing_matching_groups(meta, &block) end end def on_existing_matching_groups(meta) world.traverse_example_group_trees_until do |group| metadata_applies_to_group?(meta, group).tap do |applies| yield group if applies end end end def metadata_applies_to_group?(meta, group) meta.empty? || MetadataFilter.apply?(:any?, meta, group.metadata) end if RSpec::Support::RubyFeatures.module_prepends_supported? def safe_prepend(mod, host) host.__send__(:prepend, mod) unless host < mod end end if RUBY_VERSION.to_f >= 1.9 def safe_include(mod, host) host.__send__(:include, mod) unless host < mod end def safe_extend(mod, host) host.extend(mod) unless host.singleton_class < mod end else # for 1.8.7 # :nocov: def safe_include(mod, host) host.__send__(:include, mod) unless host.included_modules.include?(mod) end def safe_extend(mod, host) host.extend(mod) unless (class << host; self; end).included_modules.include?(mod) end # :nocov: end def define_mixed_in_module(mod, filters, mod_list, config_method, &block) unless Module === mod raise TypeError, "`RSpec.configuration.#{config_method}` expects a module but got: #{mod.inspect}" end meta = Metadata.build_hash_from(filters, :warn_about_example_group_filtering) mod_list.append(mod, meta) on_existing_matching_groups(meta, &block) end end # rubocop:enable Metrics/ClassLength end end
1
17,324
I think this is a bad name, as its not clear what it does by name alone. If we merge this it should be something like `use_links_to_files_in_output` and should take either a custom scheme like `rspec` or default to `file`
rspec-rspec-core
rb
@@ -65,6 +65,7 @@ public final class BaselineErrorProne implements Plugin<Project> { .configure(ErrorProneOptions.class, errorProneOptions -> { errorProneOptions.check("Slf4jLogsafeArgs", CheckSeverity.OFF); errorProneOptions.check("PreferSafeLoggableExceptions", CheckSeverity.OFF); + errorProneOptions.check("Slf4jConstantLogMessage", CheckSeverity.OFF); })); });
1
/* * (c) Copyright 2017 Palantir Technologies Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.palantir.baseline.plugins; import com.google.common.base.Splitter; import com.google.common.collect.ImmutableList; import java.io.File; import java.util.AbstractList; import java.util.List; import java.util.Optional; import java.util.stream.Collectors; import net.ltgt.gradle.errorprone.CheckSeverity; import net.ltgt.gradle.errorprone.ErrorProneOptions; import net.ltgt.gradle.errorprone.ErrorPronePlugin; import org.gradle.api.JavaVersion; import org.gradle.api.Plugin; import org.gradle.api.Project; import org.gradle.api.file.FileCollection; import org.gradle.api.plugins.ExtensionAware; import org.gradle.api.tasks.compile.JavaCompile; import org.gradle.api.tasks.javadoc.Javadoc; import org.gradle.api.tasks.testing.Test; public final class BaselineErrorProne implements Plugin<Project> { private static final String ERROR_PRONE_JAVAC_VERSION = "9+181-r4173-1"; @Override public void apply(Project project) { project.getPluginManager().withPlugin("java", plugin -> { project.getPluginManager().apply(ErrorPronePlugin.class); String version = Optional.ofNullable(getClass().getPackage().getImplementationVersion()) .orElse("latest.release"); project.getDependencies().add( ErrorPronePlugin.CONFIGURATION_NAME, "com.palantir.baseline:baseline-error-prone:" + version); project.getTasks().withType(JavaCompile.class).configureEach(javaCompile -> ((ExtensionAware) javaCompile.getOptions()).getExtensions() .configure(ErrorProneOptions.class, errorProneOptions -> { errorProneOptions.setEnabled(true); errorProneOptions.setDisableWarningsInGeneratedCode(true); errorProneOptions.check("EqualsHashCode", CheckSeverity.ERROR); errorProneOptions.check("EqualsIncompatibleType", CheckSeverity.ERROR); errorProneOptions.check("StreamResourceLeak", CheckSeverity.ERROR); })); project.getPluginManager().withPlugin("java-gradle-plugin", appliedPlugin -> { project.getTasks().withType(JavaCompile.class).configureEach(javaCompile -> ((ExtensionAware) javaCompile.getOptions()).getExtensions() .configure(ErrorProneOptions.class, errorProneOptions -> { errorProneOptions.check("Slf4jLogsafeArgs", CheckSeverity.OFF); errorProneOptions.check("PreferSafeLoggableExceptions", CheckSeverity.OFF); })); }); // In case of java 8 we need to add errorprone javac compiler to bootstrap classpath of tasks that perform // compilation or code analysis. ErrorProneJavacPluginPlugin handles JavaCompile cases via errorproneJavac // configuration and we do similar thing for Test and Javadoc type tasks if (!JavaVersion.current().isJava9Compatible()) { project.getDependencies().add(ErrorPronePlugin.JAVAC_CONFIGURATION_NAME, "com.google.errorprone:javac:" + ERROR_PRONE_JAVAC_VERSION); project.getConfigurations() .named(ErrorPronePlugin.JAVAC_CONFIGURATION_NAME) .configure(conf -> { List<File> bootstrapClasspath = Splitter.on(File.pathSeparator) .splitToList(System.getProperty("sun.boot.class.path")) .stream() .map(File::new) .collect(Collectors.toList()); FileCollection errorProneFiles = conf.plus(project.files(bootstrapClasspath)); project.getTasks().withType(Test.class) .configureEach(test -> test.setBootstrapClasspath(errorProneFiles)); project.getTasks().withType(Javadoc.class) .configureEach(javadoc -> javadoc.getOptions() .setBootClasspath(new LazyConfigurationList(errorProneFiles))); }); } }); } private static final class LazyConfigurationList extends AbstractList<File> { private final FileCollection files; private List<File> fileList; private LazyConfigurationList(FileCollection files) { this.files = files; } @Override public File get(int index) { if (fileList == null) { fileList = ImmutableList.copyOf(files.getFiles()); } return fileList.get(index); } @Override public int size() { if (fileList == null) { fileList = ImmutableList.copyOf(files.getFiles()); } return fileList.size(); } } }
1
6,790
Arguably this is actually still valuable for perf, maybe it should just be a warning?
palantir-gradle-baseline
java
@@ -35,6 +35,14 @@ func (s *server) peerConnectHandler(w http.ResponseWriter, r *http.Request) { return } + s.Addressbook.Put(address, addr) + if err := s.TopologyDriver.AddPeer(address); err != nil { + s.Logger.Debugf("debug api: topologyDriver.AddPeer %s: %v", addr, err) + s.Logger.Errorf("unable to connect to peer %s", addr) + jsonhttp.InternalServerError(w, err) + return + } + jsonhttp.OK(w, peerConnectResponse{ Address: address.String(), })
1
// Copyright 2020 The Swarm Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package debugapi import ( "errors" "net/http" "github.com/ethersphere/bee/pkg/jsonhttp" "github.com/ethersphere/bee/pkg/p2p" "github.com/ethersphere/bee/pkg/swarm" "github.com/gorilla/mux" "github.com/multiformats/go-multiaddr" ) type peerConnectResponse struct { Address string `json:"address"` } func (s *server) peerConnectHandler(w http.ResponseWriter, r *http.Request) { addr, err := multiaddr.NewMultiaddr("/" + mux.Vars(r)["multi-address"]) if err != nil { s.Logger.Debugf("debug api: peer connect: parse multiaddress: %v", err) jsonhttp.BadRequest(w, err) return } address, err := s.P2P.Connect(r.Context(), addr) if err != nil { s.Logger.Debugf("debug api: peer connect %s: %v", addr, err) s.Logger.Errorf("unable to connect to peer %s", addr) jsonhttp.InternalServerError(w, err) return } jsonhttp.OK(w, peerConnectResponse{ Address: address.String(), }) } func (s *server) peerDisconnectHandler(w http.ResponseWriter, r *http.Request) { addr := mux.Vars(r)["address"] swarmAddr, err := swarm.ParseHexAddress(addr) if err != nil { s.Logger.Debugf("debug api: parse peer address %s: %v", addr, err) jsonhttp.BadRequest(w, "invalid peer address") return } if err := s.P2P.Disconnect(swarmAddr); err != nil { s.Logger.Debugf("debug api: peer disconnect %s: %v", addr, err) if errors.Is(err, p2p.ErrPeerNotFound) { jsonhttp.BadRequest(w, "peer not found") return } s.Logger.Errorf("unable to disconnect peer %s", addr) jsonhttp.InternalServerError(w, err) return } jsonhttp.OK(w, nil) } type peersResponse struct { Peers []p2p.Peer `json:"peers"` } func (s *server) peersHandler(w http.ResponseWriter, r *http.Request) { jsonhttp.OK(w, peersResponse{ Peers: s.P2P.Peers(), }) }
1
8,920
didn't way say that the topology driver will actually insert the entries to the address book?
ethersphere-bee
go
@@ -24,11 +24,13 @@ import { removeNode } from '../util'; * Fragments that have siblings. In most cases, it starts out as `oldChildren[0]._dom`. */ export function diffChildren(parentDom, newParentVNode, oldParentVNode, context, isSvg, excessDomChildren, mounts, ancestorComponent, oldDom) { - let childVNode, i, j, p, index, oldVNode, newDom, + let childVNode, i, j, oldVNode, newDom, nextDom, sibDom, focus; let newChildren = newParentVNode._children || toChildArray(newParentVNode.props.children, newParentVNode._children=[], coerceToVNode, true); - let oldChildren = oldParentVNode!=null && oldParentVNode!=EMPTY_OBJ && oldParentVNode._children || EMPTY_ARR; + // This is a compression of oldParentVNode!=null && oldParentVNode != EMPTY_OBJ && oldParentVNode._children || EMPTY_ARR + // as EMPTY_OBJ._children should be `undefined`. + let oldChildren = oldParentVNode!=null && oldParentVNode._children || EMPTY_ARR; let oldChildrenLength = oldChildren.length;
1
import { diff, unmount } from './index'; import { coerceToVNode, Fragment } from '../create-element'; import { EMPTY_OBJ, EMPTY_ARR } from '../constants'; import { removeNode } from '../util'; /** * Diff the children of a virtual node * @param {import('../internal').PreactElement} parentDom The DOM element whose * children are being diffed * @param {import('../internal').VNode} newParentVNode The new virtual * node whose children should be diff'ed against oldParentVNode * @param {import('../internal').VNode} oldParentVNode The old virtual * node whose children should be diff'ed against newParentVNode * @param {object} context The current context object * @param {boolean} isSvg Whether or not this DOM node is an SVG node * @param {Array<import('../internal').PreactElement>} excessDomChildren * @param {Array<import('../internal').Component>} mounts The list of components * which have mounted * @param {import('../internal').Component} ancestorComponent The direct parent * component to the ones being diffed * @param {Node | Text} oldDom The current attached DOM * element any new dom elements should be placed around. Likely `null` on first * render (except when hydrating). Can be a sibling DOM element when diffing * Fragments that have siblings. In most cases, it starts out as `oldChildren[0]._dom`. */ export function diffChildren(parentDom, newParentVNode, oldParentVNode, context, isSvg, excessDomChildren, mounts, ancestorComponent, oldDom) { let childVNode, i, j, p, index, oldVNode, newDom, nextDom, sibDom, focus; let newChildren = newParentVNode._children || toChildArray(newParentVNode.props.children, newParentVNode._children=[], coerceToVNode, true); let oldChildren = oldParentVNode!=null && oldParentVNode!=EMPTY_OBJ && oldParentVNode._children || EMPTY_ARR; let oldChildrenLength = oldChildren.length; // Only in very specific places should this logic be invoked (top level `render` and `diffElementNodes`). // I'm using `EMPTY_OBJ` to signal when `diffChildren` is invoked in these situations. I can't use `null` // for this purpose, because `null` is a valid value for `oldDom` which can mean to skip to this logic // (e.g. if mounting a new tree in which the old DOM should be ignored (usually for Fragments). if (oldDom == EMPTY_OBJ) { oldDom = null; if (excessDomChildren!=null) { for (i = 0; i < excessDomChildren.length; i++) { if (excessDomChildren[i]!=null) { oldDom = excessDomChildren[i]; break; } } } else { for (i = 0; i < oldChildrenLength; i++) { if (oldChildren[i] && oldChildren[i]._dom) { oldDom = oldChildren[i]._dom; break; } } } } for (i=0; i<newChildren.length; i++) { childVNode = newChildren[i] = coerceToVNode(newChildren[i]); oldVNode = index = null; // Check if we find a corresponding element in oldChildren and store the // index where the element was found. p = oldChildren[i]; if (childVNode!=null) { if (p===null || (p != null && (childVNode.key==null && p.key==null ? (childVNode.type === p.type) : (childVNode.key === p.key)))) { index = i; } else { for (j=0; j<oldChildrenLength; j++) { p = oldChildren[j]; if (p!=null) { if (childVNode.key==null && p.key==null ? (childVNode.type === p.type) : (childVNode.key === p.key)) { index = j; break; } } } } } // If we have found a corresponding old element we store it in a variable // and delete it from the array. That way the next iteration can skip this // element. if (index!=null) { oldVNode = oldChildren[index]; // We can't use `null` here because that is reserved for empty // placeholders (holes) oldChildren[index] = undefined; } nextDom = oldDom!=null && oldDom.nextSibling; // Morph the old element into the new one, but don't append it to the dom yet newDom = diff(oldVNode==null ? null : oldVNode._dom, parentDom, childVNode, oldVNode, context, isSvg, excessDomChildren, mounts, ancestorComponent, null, oldDom); // Only proceed if the vnode has not been unmounted by `diff()` above. if (childVNode!=null && newDom !=null) { // Store focus in case moving children around changes it. Note that we // can't just check once for every tree, because we have no way to // differentiate wether the focus was reset by the user in a lifecycle // hook or by reordering dom nodes. focus = document.activeElement; if (childVNode._lastDomChild != null) { // Only Fragments or components that return Fragment like VNodes will // have a non-null _lastDomChild. Continue the diff from the end of // this Fragment's DOM tree. newDom = childVNode._lastDomChild; } else if (excessDomChildren==oldVNode || newDom!=oldDom || newDom.parentNode==null) { // NOTE: excessDomChildren==oldVNode above: // This is a compression of excessDomChildren==null && oldVNode==null! // The values only have the same type when `null`. outer: if (oldDom==null || oldDom.parentNode!==parentDom) { parentDom.appendChild(newDom); } else { sibDom = oldDom; j = 0; while ((sibDom=sibDom.nextSibling) && j++<oldChildrenLength/2) { if (sibDom===newDom) { break outer; } } parentDom.insertBefore(newDom, oldDom); } } // Restore focus if it was changed if (focus!==document.activeElement) { focus.focus(); } oldDom = newDom!=null ? newDom.nextSibling : nextDom; } } // Remove children that are not part of any vnode. Only used by `hydrate` if (excessDomChildren!=null && newParentVNode.type!==Fragment) for (i=excessDomChildren.length; i--; ) if (excessDomChildren[i]!=null) removeNode(excessDomChildren[i]); // Remove remaining oldChildren if there are any. for (i=oldChildrenLength; i--; ) if (oldChildren[i]!=null) unmount(oldChildren[i], ancestorComponent); } /** * Flatten a virtual nodes children to a single dimensional array * @param {import('../index').ComponentChildren} children The unflattened * children of a virtual node * @param {Array<import('../internal').VNode | null>} [flattened] An flat array of children to modify * @param {typeof import('../create-element').coerceToVNode} [map] Function that * will be applied on each child if the `vnode` is not `null` * @param {boolean} [keepHoles] wether to coerce `undefined` to `null` or not. * This is needed for Components without children like `<Foo />`. */ export function toChildArray(children, flattened, map, keepHoles) { if (flattened == null) flattened = []; if (children==null || typeof children === 'boolean') { if (keepHoles) flattened.push(null); } else if (Array.isArray(children)) { for (let i=0; i < children.length; i++) { toChildArray(children[i], flattened, map, keepHoles); } } else { flattened.push(map ? map(children) : children); } return flattened; }
1
13,235
This seems safe to me. My guess is the original checks were a combination of before + after `oldParentVNode` became reliably a `(vnode | null)`.
preactjs-preact
js
@@ -148,6 +148,10 @@ std::string FlatCompiler::GetUsageString(const char *program_name) const { " --conform FILE Specify a schema the following schemas should be\n" " an evolution of. Gives errors if not.\n" " --conform-includes Include path for the schema given with --conform PATH\n" + " --filename-suffix The suffix appended to the generated file names.\n" + " Default is '_generated'.\n" + " --filename-ext The extension appended to the generated file names.\n" + " Default is language-specific (e.g., '.h' for C++)\n" " --include-prefix Prefix this path to any generated include statements.\n" " PATH\n" " --keep-prefix Keep original prefix of schema include statement.\n"
1
/* * Copyright 2014 Google Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "flatbuffers/flatc.h" #include <list> namespace flatbuffers { const char *FLATC_VERSION() { return FLATBUFFERS_VERSION(); } void FlatCompiler::ParseFile( flatbuffers::Parser &parser, const std::string &filename, const std::string &contents, std::vector<const char *> &include_directories) const { auto local_include_directory = flatbuffers::StripFileName(filename); include_directories.push_back(local_include_directory.c_str()); include_directories.push_back(nullptr); if (!parser.Parse(contents.c_str(), &include_directories[0], filename.c_str())) { Error(parser.error_, false, false); } if (!parser.error_.empty()) { Warn(parser.error_, false); } include_directories.pop_back(); include_directories.pop_back(); } void FlatCompiler::LoadBinarySchema(flatbuffers::Parser &parser, const std::string &filename, const std::string &contents) { if (!parser.Deserialize(reinterpret_cast<const uint8_t *>(contents.c_str()), contents.size())) { Error("failed to load binary schema: " + filename, false, false); } } void FlatCompiler::Warn(const std::string &warn, bool show_exe_name) const { params_.warn_fn(this, warn, show_exe_name); } void FlatCompiler::Error(const std::string &err, bool usage, bool show_exe_name) const { params_.error_fn(this, err, usage, show_exe_name); } std::string FlatCompiler::GetUsageString(const char *program_name) const { std::stringstream ss; ss << "Usage: " << program_name << " [OPTION]... FILE... [-- FILE...]\n"; for (size_t i = 0; i < params_.num_generators; ++i) { const Generator &g = params_.generators[i]; std::stringstream full_name; full_name << std::setw(16) << std::left << g.generator_opt_long; const char *name = g.generator_opt_short ? g.generator_opt_short : " "; const char *help = g.generator_help; ss << " " << full_name.str() << " " << name << " " << help << ".\n"; } // clang-format off // Output width // 12345678901234567890123456789012345678901234567890123456789012345678901234567890 ss << " -o PATH Prefix PATH to all generated files.\n" " -I PATH Search for includes in the specified path.\n" " -M Print make rules for generated files.\n" " --version Print the version number of flatc and exit.\n" " --strict-json Strict JSON: field names must be / will be quoted,\n" " no trailing commas in tables/vectors.\n" " --allow-non-utf8 Pass non-UTF-8 input through parser and emit nonstandard\n" " \\x escapes in JSON. (Default is to raise parse error on\n" " non-UTF-8 input.)\n" " --natural-utf8 Output strings with UTF-8 as human-readable strings.\n" " By default, UTF-8 characters are printed as \\uXXXX escapes.\n" " --defaults-json Output fields whose value is the default when\n" " writing JSON\n" " --unknown-json Allow fields in JSON that are not defined in the\n" " schema. These fields will be discared when generating\n" " binaries.\n" " --no-prefix Don\'t prefix enum values with the enum type in C++.\n" " --scoped-enums Use C++11 style scoped and strongly typed enums.\n" " also implies --no-prefix.\n" " --gen-includes (deprecated), this is the default behavior.\n" " If the original behavior is required (no include\n" " statements) use --no-includes.\n" " --no-includes Don\'t generate include statements for included\n" " schemas the generated file depends on (C++ / Python).\n" " --gen-mutable Generate accessors that can mutate buffers in-place.\n" " --gen-onefile Generate single output file for C# and Go.\n" " --gen-name-strings Generate type name functions for C++.\n" " --gen-object-api Generate an additional object-based API.\n" " --gen-compare Generate operator== for object-based API types.\n" " --gen-nullable Add Clang _Nullable for C++ pointer. or @Nullable for Java\n" " --java-checkerframe work Add @Pure for Java.\n" " --gen-generated Add @Generated annotation for Java\n" " --gen-all Generate not just code for the current schema files,\n" " but for all files it includes as well.\n" " If the language uses a single file for output (by default\n" " the case for C++ and JS), all code will end up in this one\n" " file.\n" " --cpp-include Adds an #include in generated file.\n" " --cpp-ptr-type T Set object API pointer type (default std::unique_ptr).\n" " --cpp-str-type T Set object API string type (default std::string).\n" " T::c_str(), T::length() and T::empty() must be supported.\n" " The custom type also needs to be constructible from std::string\n" " (see the --cpp-str-flex-ctor option to change this behavior).\n" " --cpp-str-flex-ctor Don't construct custom string types by passing std::string\n" " from Flatbuffers, but (char* + length).\n" " --cpp-std CPP_STD Generate a C++ code using features of selected C++ standard.\n" " Supported CPP_STD values:\n" " * 'c++0x' - generate code compatible with old compilers;\n" " * 'c++11' - use C++11 code generator (default);\n" " * 'c++17' - use C++17 features in generated code (experimental).\n" " --object-prefix Customise class prefix for C++ object-based API.\n" " --object-suffix Customise class suffix for C++ object-based API.\n" " Default value is \"T\".\n" " --no-js-exports Removes Node.js style export lines in JS.\n" " --goog-js-export Uses goog.exports* for closure compiler exporting in JS.\n" " --es6-js-export Uses ECMAScript 6 export style lines in JS.\n" " --go-namespace Generate the overrided namespace in Golang.\n" " --go-import Generate the overrided import for flatbuffers in Golang\n" " (default is \"github.com/google/flatbuffers/go\").\n" " --raw-binary Allow binaries without file_indentifier to be read.\n" " This may crash flatc given a mismatched schema.\n" " --size-prefixed Input binaries are size prefixed buffers.\n" " --proto Input is a .proto, translate to .fbs.\n" " --proto-namespace-suffix Add this namespace to any flatbuffers generated\n" " SUFFIX from protobufs.\n" " --oneof-union Translate .proto oneofs to flatbuffer unions.\n" " --grpc Generate GRPC interfaces for the specified languages.\n" " --schema Serialize schemas instead of JSON (use with -b).\n" " --bfbs-comments Add doc comments to the binary schema files.\n" " --bfbs-builtins Add builtin attributes to the binary schema files.\n" " --bfbs-gen-embed Generate code to embed the bfbs schema to the source.\n" " --conform FILE Specify a schema the following schemas should be\n" " an evolution of. Gives errors if not.\n" " --conform-includes Include path for the schema given with --conform PATH\n" " --include-prefix Prefix this path to any generated include statements.\n" " PATH\n" " --keep-prefix Keep original prefix of schema include statement.\n" " --no-fb-import Don't include flatbuffers import statement for TypeScript.\n" " --no-ts-reexport Don't re-export imported dependencies for TypeScript.\n" " --short-names Use short function names for JS and TypeScript.\n" " --reflect-types Add minimal type reflection to code generation.\n" " --reflect-names Add minimal type/name reflection.\n" " --root-type T Select or override the default root_type\n" " --force-defaults Emit default values in binary output from JSON\n" " --force-empty When serializing from object API representation,\n" " force strings and vectors to empty rather than null.\n" " --force-empty-vectors When serializing from object API representation,\n" " force vectors to empty rather than null.\n" " --flexbuffers Used with \"binary\" and \"json\" options, it generates\n" " data using schema-less FlexBuffers.\n" "FILEs may be schemas (must end in .fbs), binary schemas (must end in .bfbs),\n" "or JSON files (conforming to preceding schema). FILEs after the -- must be\n" "binary flatbuffer format files.\n" "Output files are named using the base file name of the input,\n" "and written to the current directory or the path given by -o.\n" "example: " << program_name << " -c -b schema1.fbs schema2.fbs data.json\n"; // 12345678901234567890123456789012345678901234567890123456789012345678901234567890 // clang-format on return ss.str(); } int FlatCompiler::Compile(int argc, const char **argv) { if (params_.generators == nullptr || params_.num_generators == 0) { return 0; } flatbuffers::IDLOptions opts; std::string output_path; bool any_generator = false; bool print_make_rules = false; bool raw_binary = false; bool schema_binary = false; bool grpc_enabled = false; std::vector<std::string> filenames; std::list<std::string> include_directories_storage; std::vector<const char *> include_directories; std::vector<const char *> conform_include_directories; std::vector<bool> generator_enabled(params_.num_generators, false); size_t binary_files_from = std::numeric_limits<size_t>::max(); std::string conform_to_schema; for (int argi = 0; argi < argc; argi++) { std::string arg = argv[argi]; if (arg[0] == '-') { if (filenames.size() && arg[1] != '-') Error("invalid option location: " + arg, true); if (arg == "-o") { if (++argi >= argc) Error("missing path following: " + arg, true); output_path = flatbuffers::ConCatPathFileName( flatbuffers::PosixPath(argv[argi]), ""); } else if (arg == "-I") { if (++argi >= argc) Error("missing path following: " + arg, true); include_directories_storage.push_back( flatbuffers::PosixPath(argv[argi])); include_directories.push_back( include_directories_storage.back().c_str()); } else if (arg == "--conform") { if (++argi >= argc) Error("missing path following: " + arg, true); conform_to_schema = flatbuffers::PosixPath(argv[argi]); } else if (arg == "--conform-includes") { if (++argi >= argc) Error("missing path following: " + arg, true); include_directories_storage.push_back( flatbuffers::PosixPath(argv[argi])); conform_include_directories.push_back( include_directories_storage.back().c_str()); } else if (arg == "--include-prefix") { if (++argi >= argc) Error("missing path following: " + arg, true); opts.include_prefix = flatbuffers::ConCatPathFileName( flatbuffers::PosixPath(argv[argi]), ""); } else if (arg == "--keep-prefix") { opts.keep_include_path = true; } else if (arg == "--strict-json") { opts.strict_json = true; } else if (arg == "--allow-non-utf8") { opts.allow_non_utf8 = true; } else if (arg == "--natural-utf8") { opts.natural_utf8 = true; } else if (arg == "--no-js-exports") { opts.skip_js_exports = true; } else if (arg == "--goog-js-export") { opts.use_goog_js_export_format = true; opts.use_ES6_js_export_format = false; } else if (arg == "--es6-js-export") { opts.use_goog_js_export_format = false; opts.use_ES6_js_export_format = true; } else if (arg == "--go-namespace") { if (++argi >= argc) Error("missing golang namespace" + arg, true); opts.go_namespace = argv[argi]; } else if (arg == "--go-import") { if (++argi >= argc) Error("missing golang import" + arg, true); opts.go_import = argv[argi]; } else if (arg == "--defaults-json") { opts.output_default_scalars_in_json = true; } else if (arg == "--unknown-json") { opts.skip_unexpected_fields_in_json = true; } else if (arg == "--no-prefix") { opts.prefixed_enums = false; } else if (arg == "--scoped-enums") { opts.prefixed_enums = false; opts.scoped_enums = true; } else if (arg == "--no-union-value-namespacing") { opts.union_value_namespacing = false; } else if (arg == "--gen-mutable") { opts.mutable_buffer = true; } else if (arg == "--gen-name-strings") { opts.generate_name_strings = true; } else if (arg == "--gen-object-api") { opts.generate_object_based_api = true; } else if (arg == "--gen-compare") { opts.gen_compare = true; } else if (arg == "--cpp-include") { if (++argi >= argc) Error("missing include following: " + arg, true); opts.cpp_includes.push_back(argv[argi]); } else if (arg == "--cpp-ptr-type") { if (++argi >= argc) Error("missing type following: " + arg, true); opts.cpp_object_api_pointer_type = argv[argi]; } else if (arg == "--cpp-str-type") { if (++argi >= argc) Error("missing type following: " + arg, true); opts.cpp_object_api_string_type = argv[argi]; } else if (arg == "--cpp-str-flex-ctor") { opts.cpp_object_api_string_flexible_constructor = true; } else if (arg == "--gen-nullable") { opts.gen_nullable = true; } else if (arg == "--java-checkerframework") { opts.java_checkerframework = true; } else if (arg == "--gen-generated") { opts.gen_generated = true; } else if (arg == "--object-prefix") { if (++argi >= argc) Error("missing prefix following: " + arg, true); opts.object_prefix = argv[argi]; } else if (arg == "--object-suffix") { if (++argi >= argc) Error("missing suffix following: " + arg, true); opts.object_suffix = argv[argi]; } else if (arg == "--gen-all") { opts.generate_all = true; opts.include_dependence_headers = false; } else if (arg == "--gen-includes") { // Deprecated, remove this option some time in the future. Warn("warning: --gen-includes is deprecated (it is now default)\n"); } else if (arg == "--no-includes") { opts.include_dependence_headers = false; } else if (arg == "--gen-onefile") { opts.one_file = true; } else if (arg == "--raw-binary") { raw_binary = true; } else if (arg == "--size-prefixed") { opts.size_prefixed = true; } else if (arg == "--") { // Separator between text and binary inputs. binary_files_from = filenames.size(); } else if (arg == "--proto") { opts.proto_mode = true; } else if (arg == "--proto-namespace-suffix") { if (++argi >= argc) Error("missing namespace suffix" + arg, true); opts.proto_namespace_suffix = argv[argi]; } else if (arg == "--oneof-union") { opts.proto_oneof_union = true; } else if (arg == "--schema") { schema_binary = true; } else if (arg == "-M") { print_make_rules = true; } else if (arg == "--version") { printf("flatc version %s\n", FLATC_VERSION()); exit(0); } else if (arg == "--grpc") { grpc_enabled = true; } else if (arg == "--bfbs-comments") { opts.binary_schema_comments = true; } else if (arg == "--bfbs-builtins") { opts.binary_schema_builtins = true; } else if (arg == "--bfbs-gen-embed") { opts.binary_schema_gen_embed = true; } else if (arg == "--no-fb-import") { opts.skip_flatbuffers_import = true; } else if (arg == "--no-ts-reexport") { opts.reexport_ts_modules = false; } else if (arg == "--short-names") { opts.js_ts_short_names = true; } else if (arg == "--reflect-types") { opts.mini_reflect = IDLOptions::kTypes; } else if (arg == "--reflect-names") { opts.mini_reflect = IDLOptions::kTypesAndNames; } else if (arg == "--root-type") { if (++argi >= argc) Error("missing type following: " + arg, true); opts.root_type = argv[argi]; } else if (arg == "--force-defaults") { opts.force_defaults = true; } else if (arg == "--force-empty") { opts.set_empty_strings_to_null = false; opts.set_empty_vectors_to_null = false; } else if (arg == "--force-empty-vectors") { opts.set_empty_vectors_to_null = false; } else if (arg == "--java-primitive-has-method") { opts.java_primitive_has_method = true; } else if (arg == "--cs-gen-json-serializer") { opts.cs_gen_json_serializer = true; } else if (arg == "--flexbuffers") { opts.use_flexbuffers = true; } else if (arg == "--cpp-std") { if (++argi >= argc) Error("missing C++ standard specification" + arg, true); opts.cpp_std = argv[argi]; } else { for (size_t i = 0; i < params_.num_generators; ++i) { if (arg == params_.generators[i].generator_opt_long || (params_.generators[i].generator_opt_short && arg == params_.generators[i].generator_opt_short)) { generator_enabled[i] = true; any_generator = true; opts.lang_to_generate |= params_.generators[i].lang; goto found; } } Error("unknown commandline argument: " + arg, true); found:; } } else { filenames.push_back(flatbuffers::PosixPath(argv[argi])); } } if (!filenames.size()) Error("missing input files", false, true); if (opts.proto_mode) { if (any_generator) Error("cannot generate code directly from .proto files", true); } else if (!any_generator && conform_to_schema.empty()) { Error("no options: specify at least one generator.", true); } flatbuffers::Parser conform_parser; if (!conform_to_schema.empty()) { std::string contents; if (!flatbuffers::LoadFile(conform_to_schema.c_str(), true, &contents)) Error("unable to load schema: " + conform_to_schema); if (flatbuffers::GetExtension(conform_to_schema) == reflection::SchemaExtension()) { LoadBinarySchema(conform_parser, conform_to_schema, contents); } else { ParseFile(conform_parser, conform_to_schema, contents, conform_include_directories); } } std::unique_ptr<flatbuffers::Parser> parser(new flatbuffers::Parser(opts)); for (auto file_it = filenames.begin(); file_it != filenames.end(); ++file_it) { auto &filename = *file_it; std::string contents; if (!flatbuffers::LoadFile(filename.c_str(), true, &contents)) Error("unable to load file: " + filename); bool is_binary = static_cast<size_t>(file_it - filenames.begin()) >= binary_files_from; auto ext = flatbuffers::GetExtension(filename); auto is_schema = ext == "fbs" || ext == "proto"; auto is_binary_schema = ext == reflection::SchemaExtension(); if (is_binary) { parser->builder_.Clear(); parser->builder_.PushFlatBuffer( reinterpret_cast<const uint8_t *>(contents.c_str()), contents.length()); if (!raw_binary) { // Generally reading binaries that do not correspond to the schema // will crash, and sadly there's no way around that when the binary // does not contain a file identifier. // We'd expect that typically any binary used as a file would have // such an identifier, so by default we require them to match. if (!parser->file_identifier_.length()) { Error("current schema has no file_identifier: cannot test if \"" + filename + "\" matches the schema, use --raw-binary to read this file" " anyway."); } else if (!flatbuffers::BufferHasIdentifier( contents.c_str(), parser->file_identifier_.c_str(), opts.size_prefixed)) { Error("binary \"" + filename + "\" does not have expected file_identifier \"" + parser->file_identifier_ + "\", use --raw-binary to read this file anyway."); } } } else { // Check if file contains 0 bytes. if (!opts.use_flexbuffers && !is_binary_schema && contents.length() != strlen(contents.c_str())) { Error("input file appears to be binary: " + filename, true); } if (is_schema) { // If we're processing multiple schemas, make sure to start each // one from scratch. If it depends on previous schemas it must do // so explicitly using an include. parser.reset(new flatbuffers::Parser(opts)); } if (is_binary_schema) { LoadBinarySchema(*parser.get(), filename, contents); } if (opts.use_flexbuffers) { if (opts.lang_to_generate == IDLOptions::kJson) { parser->flex_root_ = flexbuffers::GetRoot( reinterpret_cast<const uint8_t *>(contents.c_str()), contents.size()); } else { parser->flex_builder_.Clear(); ParseFile(*parser.get(), filename, contents, include_directories); } } else { ParseFile(*parser.get(), filename, contents, include_directories); if (!is_schema && !parser->builder_.GetSize()) { // If a file doesn't end in .fbs, it must be json/binary. Ensure we // didn't just parse a schema with a different extension. Error("input file is neither json nor a .fbs (schema) file: " + filename, true); } } if ((is_schema || is_binary_schema) && !conform_to_schema.empty()) { auto err = parser->ConformTo(conform_parser); if (!err.empty()) Error("schemas don\'t conform: " + err); } if (schema_binary || opts.binary_schema_gen_embed) { parser->Serialize(); } if (schema_binary) { parser->file_extension_ = reflection::SchemaExtension(); } } std::string filebase = flatbuffers::StripPath(flatbuffers::StripExtension(filename)); for (size_t i = 0; i < params_.num_generators; ++i) { parser->opts.lang = params_.generators[i].lang; if (generator_enabled[i]) { if (!print_make_rules) { flatbuffers::EnsureDirExists(output_path); if ((!params_.generators[i].schema_only || (is_schema || is_binary_schema)) && !params_.generators[i].generate(*parser.get(), output_path, filebase)) { Error(std::string("Unable to generate ") + params_.generators[i].lang_name + " for " + filebase); } } else { if (params_.generators[i].make_rule == nullptr) { Error(std::string("Cannot generate make rule for ") + params_.generators[i].lang_name); } else { std::string make_rule = params_.generators[i].make_rule( *parser.get(), output_path, filename); if (!make_rule.empty()) printf("%s\n", flatbuffers::WordWrap(make_rule, 80, " ", " \\").c_str()); } } if (grpc_enabled) { if (params_.generators[i].generateGRPC != nullptr) { if (!params_.generators[i].generateGRPC(*parser.get(), output_path, filebase)) { Error(std::string("Unable to generate GRPC interface for") + params_.generators[i].lang_name); } } else { Warn(std::string("GRPC interface generator not implemented for ") + params_.generators[i].lang_name); } } } } if (!opts.root_type.empty()) { if (!parser->SetRootType(opts.root_type.c_str())) Error("unknown root type: " + opts.root_type); else if (parser->root_struct_def_->fixed) Error("root type must be a table"); } if (opts.proto_mode) GenerateFBS(*parser.get(), output_path, filebase); // We do not want to generate code for the definitions in this file // in any files coming up next. parser->MarkGenerated(); } return 0; } } // namespace flatbuffers
1
17,838
also in the `.md` please.
google-flatbuffers
java
@@ -7,7 +7,9 @@ module Unix::Pkg end def install_package name - if self['platform'] =~ /(fedora)|(centos)|(el)/ + if self['platform'] =~ /(el-4)|(redhat-4)/ + @logger.debug("Package installation not supported on rhel4") + elsif self['platform'] =~ /(fedora)|(centos)|(el)|(redhat)/ execute("yum -y install #{name}") elsif self['platform'] =~ /(ubuntu)|(debian)/ execute("apt-get update")
1
module Unix::Pkg include PuppetAcceptance::CommandFactory def check_for_package name result = exec(PuppetAcceptance::Command.new("which #{name}"), :acceptable_exit_codes => (0...127)) result.exit_code == 0 end def install_package name if self['platform'] =~ /(fedora)|(centos)|(el)/ execute("yum -y install #{name}") elsif self['platform'] =~ /(ubuntu)|(debian)/ execute("apt-get update") execute("apt-get install -y #{name}") else raise "Package #{name} cannot be installed on #{host}" end end end
1
4,513
RHEL is an acronym and is usually all caps. Why are you adding a redhat regex matcher?
voxpupuli-beaker
rb
@@ -193,16 +193,6 @@ public class VectorizedColumnIterator extends BaseColumnIterator { } } - public class FixedWidthTypeBinaryBatchReader extends BatchReader { - @Override - protected int nextBatchOf( - final FieldVector vector, final int expectedBatchSize, final int numValsInVector, final int typeWidth, - NullabilityHolder holder) { - return vectorizedPageIterator.fixedWidthBinaryPageReader().nextBatch(vector, expectedBatchSize, numValsInVector, - typeWidth, holder); - } - } - public class BooleanBatchReader extends BatchReader { @Override protected int nextBatchOf(
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg.arrow.vectorized.parquet; import org.apache.arrow.vector.FieldVector; import org.apache.arrow.vector.IntVector; import org.apache.iceberg.arrow.vectorized.NullabilityHolder; import org.apache.iceberg.parquet.BaseColumnIterator; import org.apache.iceberg.parquet.BasePageIterator; import org.apache.iceberg.relocated.com.google.common.base.Preconditions; import org.apache.parquet.column.ColumnDescriptor; import org.apache.parquet.column.Dictionary; import org.apache.parquet.column.page.PageReader; /** * Vectorized version of the ColumnIterator that reads column values in data pages of a column in a row group in a * batched fashion. */ public class VectorizedColumnIterator extends BaseColumnIterator { private final VectorizedPageIterator vectorizedPageIterator; private int batchSize; public VectorizedColumnIterator(ColumnDescriptor desc, String writerVersion, boolean setArrowValidityVector) { super(desc); Preconditions.checkArgument(desc.getMaxRepetitionLevel() == 0, "Only non-nested columns are supported for vectorized reads"); this.vectorizedPageIterator = new VectorizedPageIterator(desc, writerVersion, setArrowValidityVector); } public void setBatchSize(int batchSize) { this.batchSize = batchSize; } public Dictionary setRowGroupInfo(PageReader store, boolean allPagesDictEncoded) { // setPageSource can result in a data page read. If that happens, we need // to know in advance whether all the pages in the row group are dictionary encoded or not this.vectorizedPageIterator.setAllPagesDictEncoded(allPagesDictEncoded); super.setPageSource(store); return dictionary; } @Override protected BasePageIterator pageIterator() { return vectorizedPageIterator; } public boolean producesDictionaryEncodedVector() { return vectorizedPageIterator.producesDictionaryEncodedVector(); } public abstract class BatchReader { public void nextBatch(FieldVector fieldVector, int typeWidth, NullabilityHolder holder) { int rowsReadSoFar = 0; while (rowsReadSoFar < batchSize && hasNext()) { advance(); int rowsInThisBatch = nextBatchOf(fieldVector, batchSize - rowsReadSoFar, rowsReadSoFar, typeWidth, holder); rowsReadSoFar += rowsInThisBatch; triplesRead += rowsInThisBatch; fieldVector.setValueCount(rowsReadSoFar); } } protected abstract int nextBatchOf( FieldVector vector, int expectedBatchSize, int numValsInVector, int typeWidth, NullabilityHolder holder); } public class IntegerBatchReader extends BatchReader { @Override protected int nextBatchOf( final FieldVector vector, final int expectedBatchSize, final int numValsInVector, final int typeWidth, NullabilityHolder holder) { return vectorizedPageIterator.intPageReader() .nextBatch(vector, expectedBatchSize, numValsInVector, typeWidth, holder); } } public class DictionaryBatchReader extends BatchReader { @Override protected int nextBatchOf( final FieldVector vector, final int expectedBatchSize, final int numValsInVector, final int typeWidth, NullabilityHolder holder) { return vectorizedPageIterator.nextBatchDictionaryIds((IntVector) vector, expectedBatchSize, numValsInVector, holder); } } public class LongBatchReader extends BatchReader { @Override protected int nextBatchOf( final FieldVector vector, final int expectedBatchSize, final int numValsInVector, final int typeWidth, NullabilityHolder holder) { return vectorizedPageIterator.longPageReader() .nextBatch(vector, expectedBatchSize, numValsInVector, typeWidth, holder); } } public class TimestampMillisBatchReader extends BatchReader { @Override protected int nextBatchOf( final FieldVector vector, final int expectedBatchSize, final int numValsInVector, final int typeWidth, NullabilityHolder holder) { return vectorizedPageIterator.timestampMillisPageReader().nextBatch(vector, expectedBatchSize, numValsInVector, typeWidth, holder); } } public class FloatBatchReader extends BatchReader { @Override protected int nextBatchOf( final FieldVector vector, final int expectedBatchSize, final int numValsInVector, final int typeWidth, NullabilityHolder holder) { return vectorizedPageIterator.floatPageReader() .nextBatch(vector, expectedBatchSize, numValsInVector, typeWidth, holder); } } public class DoubleBatchReader extends BatchReader { @Override protected int nextBatchOf( final FieldVector vector, final int expectedBatchSize, final int numValsInVector, final int typeWidth, NullabilityHolder holder) { return vectorizedPageIterator.doublePageReader() .nextBatch(vector, expectedBatchSize, numValsInVector, typeWidth, holder); } } public class IntBackedDecimalBatchReader extends BatchReader { @Override protected int nextBatchOf( final FieldVector vector, final int expectedBatchSize, final int numValsInVector, final int typeWidth, NullabilityHolder holder) { return vectorizedPageIterator.intBackedDecimalPageReader() .nextBatch(vector, expectedBatchSize, numValsInVector, typeWidth, holder); } } public class LongBackedDecimalBatchReader extends BatchReader { @Override protected int nextBatchOf( final FieldVector vector, final int expectedBatchSize, final int numValsInVector, final int typeWidth, NullabilityHolder holder) { return vectorizedPageIterator.longBackedDecimalPageReader().nextBatch(vector, expectedBatchSize, numValsInVector, typeWidth, holder); } } public class FixedLengthDecimalBatchReader extends BatchReader { @Override protected int nextBatchOf( final FieldVector vector, final int expectedBatchSize, final int numValsInVector, final int typeWidth, NullabilityHolder holder) { return vectorizedPageIterator.fixedLengthDecimalPageReader().nextBatch(vector, expectedBatchSize, numValsInVector, typeWidth, holder); } } public class FixedSizeBinaryBatchReader extends BatchReader { @Override protected int nextBatchOf( final FieldVector vector, final int expectedBatchSize, final int numValsInVector, final int typeWidth, NullabilityHolder holder) { return vectorizedPageIterator.fixedSizeBinaryPageReader().nextBatch(vector, expectedBatchSize, numValsInVector, typeWidth, holder); } } public class VarWidthTypeBatchReader extends BatchReader { @Override protected int nextBatchOf( final FieldVector vector, final int expectedBatchSize, final int numValsInVector, final int typeWidth, NullabilityHolder holder) { return vectorizedPageIterator.varWidthTypePageReader().nextBatch(vector, expectedBatchSize, numValsInVector, typeWidth, holder); } } public class FixedWidthTypeBinaryBatchReader extends BatchReader { @Override protected int nextBatchOf( final FieldVector vector, final int expectedBatchSize, final int numValsInVector, final int typeWidth, NullabilityHolder holder) { return vectorizedPageIterator.fixedWidthBinaryPageReader().nextBatch(vector, expectedBatchSize, numValsInVector, typeWidth, holder); } } public class BooleanBatchReader extends BatchReader { @Override protected int nextBatchOf( final FieldVector vector, final int expectedBatchSize, final int numValsInVector, final int typeWidth, NullabilityHolder holder) { return vectorizedPageIterator.booleanPageReader().nextBatch(vector, expectedBatchSize, numValsInVector, typeWidth, holder); } } public IntegerBatchReader integerBatchReader() { return new IntegerBatchReader(); } public DictionaryBatchReader dictionaryBatchReader() { return new DictionaryBatchReader(); } public LongBatchReader longBatchReader() { return new LongBatchReader(); } public TimestampMillisBatchReader timestampMillisBatchReader() { return new TimestampMillisBatchReader(); } public FloatBatchReader floatBatchReader() { return new FloatBatchReader(); } public DoubleBatchReader doubleBatchReader() { return new DoubleBatchReader(); } public IntBackedDecimalBatchReader intBackedDecimalBatchReader() { return new IntBackedDecimalBatchReader(); } public LongBackedDecimalBatchReader longBackedDecimalBatchReader() { return new LongBackedDecimalBatchReader(); } public FixedLengthDecimalBatchReader fixedLengthDecimalBatchReader() { return new FixedLengthDecimalBatchReader(); } public FixedSizeBinaryBatchReader fixedSizeBinaryBatchReader() { return new FixedSizeBinaryBatchReader(); } public VarWidthTypeBatchReader varWidthTypeBatchReader() { return new VarWidthTypeBatchReader(); } public FixedWidthTypeBinaryBatchReader fixedWidthTypeBinaryBatchReader() { return new FixedWidthTypeBinaryBatchReader(); } public BooleanBatchReader booleanBatchReader() { return new BooleanBatchReader(); } }
1
41,182
Maybe I am daft but it looks like you removed fixed width readers but I don't see where you added any readers?
apache-iceberg
java
@@ -16,6 +16,14 @@ import ( const DefaultManifestType = ManifestMantarayContentType +const ( + RootPath = "/" + WebsiteIndexDocumentSuffixKey = "website-index-document" + WebsiteErrorDocumentPathKey = "website-error-document" + EntryMetadataContentTypeKey = "Content-Type" + EntryMetadataFilenameKey = "Filename" +) + var ( // ErrNotFound is returned when an Entry is not found in the manifest. ErrNotFound = errors.New("manifest: not found")
1
// Copyright 2020 The Swarm Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package manifest contains the abstractions needed for // collection representation in Swarm. package manifest import ( "context" "errors" "github.com/ethersphere/bee/pkg/file" "github.com/ethersphere/bee/pkg/swarm" ) const DefaultManifestType = ManifestMantarayContentType var ( // ErrNotFound is returned when an Entry is not found in the manifest. ErrNotFound = errors.New("manifest: not found") // ErrInvalidManifestType is returned when an unknown manifest type // is provided to the function. ErrInvalidManifestType = errors.New("manifest: invalid type") // ErrMissingReference is returned when the reference for the manifest file // is missing. ErrMissingReference = errors.New("manifest: missing reference") ) // StoreSizeFunc is a callback on every content size that will be stored by // the Store function. type StoreSizeFunc func(int64) error // Interface for operations with manifest. type Interface interface { // Type returns manifest implementation type information Type() string // Add a manifest entry to the specified path. Add(context.Context, string, Entry) error // Remove a manifest entry on the specified path. Remove(context.Context, string) error // Lookup returns a manifest entry if one is found in the specified path. Lookup(context.Context, string) (Entry, error) // HasPrefix tests whether the specified prefix path exists. HasPrefix(context.Context, string) (bool, error) // Store stores the manifest, returning the resulting address. Store(context.Context, ...StoreSizeFunc) (swarm.Address, error) // IterateAddresses is used to iterate over chunks addresses for // the manifest. IterateAddresses(context.Context, swarm.AddressIterFunc) error } // Entry represents a single manifest entry. type Entry interface { // Reference returns the address of the file. Reference() swarm.Address // Metadata returns the metadata of the file. Metadata() map[string]string } // NewDefaultManifest creates a new manifest with default type. func NewDefaultManifest( ls file.LoadSaver, encrypted bool, ) (Interface, error) { return NewManifest(DefaultManifestType, ls, encrypted) } // NewDefaultManifest creates a new manifest with default type. func NewDefaultManifestReference( reference swarm.Address, ls file.LoadSaver, ) (Interface, error) { return NewManifestReference(DefaultManifestType, reference, ls) } // NewManifest creates a new manifest. func NewManifest( manifestType string, ls file.LoadSaver, encrypted bool, ) (Interface, error) { switch manifestType { case ManifestSimpleContentType: return NewSimpleManifest(ls) case ManifestMantarayContentType: return NewMantarayManifest(ls, encrypted) default: return nil, ErrInvalidManifestType } } // NewManifestReference loads existing manifest. func NewManifestReference( manifestType string, reference swarm.Address, ls file.LoadSaver, ) (Interface, error) { switch manifestType { case ManifestSimpleContentType: return NewSimpleManifestReference(reference, ls) case ManifestMantarayContentType: return NewMantarayManifestReference(reference, ls) default: return nil, ErrInvalidManifestType } } type manifestEntry struct { reference swarm.Address metadata map[string]string } // NewEntry creates a new manifest entry. func NewEntry(reference swarm.Address, metadata map[string]string) Entry { return &manifestEntry{ reference: reference, metadata: metadata, } } func (e *manifestEntry) Reference() swarm.Address { return e.reference } func (e *manifestEntry) Metadata() map[string]string { return e.metadata }
1
14,464
@acud The reason I moved these here is because I need them in the traversal tests. The traversal package doesn't need this. I don't see any particular reason why any package apart from api would need this. api package imports traversal, so I cannot import api in traversal pkg. Also if these change in future, the traversal tests need to change. So maybe enough reason to move these here? That way both packages can access these as they both depend on manifest
ethersphere-bee
go
@@ -560,6 +560,7 @@ class AbstractTab(QWidget): self._mouse_event_filter = mouse.MouseEventFilter( self, widget_class=self.WIDGET_CLASS, parent=self) self.backend = None + self.pin = False # FIXME:qtwebengine Should this be public api via self.hints? # Also, should we get it out of objreg?
1
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2016 Florian Bruhin (The Compiler) <[email protected]> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. """Base class for a wrapper over QWebView/QWebEngineView.""" import itertools from PyQt5.QtCore import pyqtSignal, pyqtSlot, QUrl, QObject, QSizeF from PyQt5.QtGui import QIcon from PyQt5.QtWidgets import QWidget, QApplication from qutebrowser.keyinput import modeman from qutebrowser.config import config from qutebrowser.utils import (utils, objreg, usertypes, message, log, qtutils, urlutils) from qutebrowser.misc import miscwidgets from qutebrowser.browser import mouse, hints tab_id_gen = itertools.count(0) def create(win_id, parent=None): """Get a QtWebKit/QtWebEngine tab object. Args: win_id: The window ID where the tab will be shown. parent: The Qt parent to set. """ # Importing modules here so we don't depend on QtWebEngine without the # argument and to avoid circular imports. mode_manager = modeman.instance(win_id) if objreg.get('args').backend == 'webengine': from qutebrowser.browser.webengine import webenginetab tab_class = webenginetab.WebEngineTab else: from qutebrowser.browser.webkit import webkittab tab_class = webkittab.WebKitTab return tab_class(win_id=win_id, mode_manager=mode_manager, parent=parent) def init(args): """Initialize backend-specific modules.""" if args.backend == 'webengine': from qutebrowser.browser.webengine import webenginetab webenginetab.init() else: from qutebrowser.browser.webkit import webkittab webkittab.init() class WebTabError(Exception): """Base class for various errors.""" class UnsupportedOperationError(WebTabError): """Raised when an operation is not supported with the given backend.""" class TabData: """A simple namespace with a fixed set of attributes. Attributes: keep_icon: Whether the (e.g. cloned) icon should not be cleared on page load. inspector: The QWebInspector used for this webview. viewing_source: Set if we're currently showing a source view. open_target: How the next clicked link should be opened. override_target: Override for open_target for fake clicks (like hints). """ def __init__(self): self.keep_icon = False self.viewing_source = False self.inspector = None self.open_target = usertypes.ClickTarget.normal self.override_target = None def combined_target(self): if self.override_target is not None: return self.override_target else: return self.open_target class AbstractPrinting: """Attribute of AbstractTab for printing the page.""" def __init__(self): self._widget = None def check_pdf_support(self): raise NotImplementedError def check_printer_support(self): raise NotImplementedError def to_pdf(self, filename): raise NotImplementedError def to_printer(self, printer): raise NotImplementedError class AbstractSearch(QObject): """Attribute of AbstractTab for doing searches. Attributes: text: The last thing this view was searched for. _flags: The flags of the last search (needs to be set by subclasses). _widget: The underlying WebView widget. """ def __init__(self, parent=None): super().__init__(parent) self._widget = None self.text = None def search(self, text, *, ignore_case=False, reverse=False, result_cb=None): """Find the given text on the page. Args: text: The text to search for. ignore_case: Search case-insensitively. (True/False/'smart') reverse: Reverse search direction. result_cb: Called with a bool indicating whether a match was found. """ raise NotImplementedError def clear(self): """Clear the current search.""" raise NotImplementedError def prev_result(self, *, result_cb=None): """Go to the previous result of the current search. Args: result_cb: Called with a bool indicating whether a match was found. """ raise NotImplementedError def next_result(self, *, result_cb=None): """Go to the next result of the current search. Args: result_cb: Called with a bool indicating whether a match was found. """ raise NotImplementedError class AbstractZoom(QObject): """Attribute of AbstractTab for controlling zoom. Attributes: _neighborlist: A NeighborList with the zoom levels. _default_zoom_changed: Whether the zoom was changed from the default. """ def __init__(self, win_id, parent=None): super().__init__(parent) self._widget = None self._win_id = win_id self._default_zoom_changed = False self._init_neighborlist() objreg.get('config').changed.connect(self._on_config_changed) # # FIXME:qtwebengine is this needed? # # For some reason, this signal doesn't get disconnected automatically # # when the WebView is destroyed on older PyQt versions. # # See https://github.com/The-Compiler/qutebrowser/issues/390 # self.destroyed.connect(functools.partial( # cfg.changed.disconnect, self.init_neighborlist)) @pyqtSlot(str, str) def _on_config_changed(self, section, option): if section == 'ui' and option in ['zoom-levels', 'default-zoom']: if not self._default_zoom_changed: factor = float(config.get('ui', 'default-zoom')) / 100 self._set_factor_internal(factor) self._default_zoom_changed = False self._init_neighborlist() def _init_neighborlist(self): """Initialize self._neighborlist.""" levels = config.get('ui', 'zoom-levels') self._neighborlist = usertypes.NeighborList( levels, mode=usertypes.NeighborList.Modes.edge) self._neighborlist.fuzzyval = config.get('ui', 'default-zoom') def offset(self, offset): """Increase/Decrease the zoom level by the given offset. Args: offset: The offset in the zoom level list. Return: The new zoom percentage. """ level = self._neighborlist.getitem(offset) self.set_factor(float(level) / 100, fuzzyval=False) return level def set_factor(self, factor, *, fuzzyval=True): """Zoom to a given zoom factor. Args: factor: The zoom factor as float. fuzzyval: Whether to set the NeighborLists fuzzyval. """ if fuzzyval: self._neighborlist.fuzzyval = int(factor * 100) if factor < 0: raise ValueError("Can't zoom to factor {}!".format(factor)) self._default_zoom_changed = True self._set_factor_internal(factor) def factor(self): raise NotImplementedError def set_default(self): default_zoom = config.get('ui', 'default-zoom') self._set_factor_internal(float(default_zoom) / 100) class AbstractCaret(QObject): """Attribute of AbstractTab for caret browsing.""" def __init__(self, win_id, tab, mode_manager, parent=None): super().__init__(parent) self._tab = tab self._win_id = win_id self._widget = None self.selection_enabled = False mode_manager.entered.connect(self._on_mode_entered) mode_manager.left.connect(self._on_mode_left) def _on_mode_entered(self, mode): raise NotImplementedError def _on_mode_left(self): raise NotImplementedError def move_to_next_line(self, count=1): raise NotImplementedError def move_to_prev_line(self, count=1): raise NotImplementedError def move_to_next_char(self, count=1): raise NotImplementedError def move_to_prev_char(self, count=1): raise NotImplementedError def move_to_end_of_word(self, count=1): raise NotImplementedError def move_to_next_word(self, count=1): raise NotImplementedError def move_to_prev_word(self, count=1): raise NotImplementedError def move_to_start_of_line(self): raise NotImplementedError def move_to_end_of_line(self): raise NotImplementedError def move_to_start_of_next_block(self, count=1): raise NotImplementedError def move_to_start_of_prev_block(self, count=1): raise NotImplementedError def move_to_end_of_next_block(self, count=1): raise NotImplementedError def move_to_end_of_prev_block(self, count=1): raise NotImplementedError def move_to_start_of_document(self): raise NotImplementedError def move_to_end_of_document(self): raise NotImplementedError def toggle_selection(self): raise NotImplementedError def drop_selection(self): raise NotImplementedError def has_selection(self): raise NotImplementedError def selection(self, html=False): raise NotImplementedError def follow_selected(self, *, tab=False): raise NotImplementedError class AbstractScroller(QObject): """Attribute of AbstractTab to manage scroll position.""" perc_changed = pyqtSignal(int, int) def __init__(self, tab, parent=None): super().__init__(parent) self._tab = tab self._widget = None self.perc_changed.connect(self._log_scroll_pos_change) @pyqtSlot() def _log_scroll_pos_change(self): log.webview.vdebug("Scroll position changed to {}".format( self.pos_px())) def _init_widget(self, widget): self._widget = widget def pos_px(self): raise NotImplementedError def pos_perc(self): raise NotImplementedError def to_perc(self, x=None, y=None): raise NotImplementedError def to_point(self, point): raise NotImplementedError def delta(self, x=0, y=0): raise NotImplementedError def delta_page(self, x=0, y=0): raise NotImplementedError def up(self, count=1): raise NotImplementedError def down(self, count=1): raise NotImplementedError def left(self, count=1): raise NotImplementedError def right(self, count=1): raise NotImplementedError def top(self): raise NotImplementedError def bottom(self): raise NotImplementedError def page_up(self, count=1): raise NotImplementedError def page_down(self, count=1): raise NotImplementedError def at_top(self): raise NotImplementedError def at_bottom(self): raise NotImplementedError class AbstractHistory: """The history attribute of a AbstractTab.""" def __init__(self, tab): self._tab = tab self._history = None def __len__(self): return len(self._history) def __iter__(self): return iter(self._history.items()) def current_idx(self): raise NotImplementedError def back(self): raise NotImplementedError def forward(self): raise NotImplementedError def can_go_back(self): raise NotImplementedError def can_go_forward(self): raise NotImplementedError def serialize(self): """Serialize into an opaque format understood by self.deserialize.""" raise NotImplementedError def deserialize(self, data): """Serialize from a format produced by self.serialize.""" raise NotImplementedError def load_items(self, items): """Deserialize from a list of WebHistoryItems.""" raise NotImplementedError class AbstractElements: """Finding and handling of elements on the page.""" def __init__(self, tab): self._widget = None self._tab = tab def find_css(self, selector, callback, *, only_visible=False): """Find all HTML elements matching a given selector async. Args: callback: The callback to be called when the search finished. selector: The CSS selector to search for. only_visible: Only show elements which are visible on screen. """ raise NotImplementedError def find_id(self, elem_id, callback): """Find the HTML element with the given ID async. Args: callback: The callback to be called when the search finished. elem_id: The ID to search for. """ raise NotImplementedError def find_focused(self, callback): """Find the focused element on the page async. Args: callback: The callback to be called when the search finished. Called with a WebEngineElement or None. """ raise NotImplementedError def find_at_pos(self, pos, callback): """Find the element at the given position async. This is also called "hit test" elsewhere. Args: pos: The QPoint to get the element for. callback: The callback to be called when the search finished. Called with a WebEngineElement or None. """ raise NotImplementedError class AbstractTab(QWidget): """A wrapper over the given widget to hide its API and expose another one. We use this to unify QWebView and QWebEngineView. Class attributes: WIDGET_CLASS: The class of the main widget recieving events. Needs to be overridden by subclasses. Attributes: history: The AbstractHistory for the current tab. registry: The ObjectRegistry associated with this tab. _load_status: loading status of this page Accessible via load_status() method. _has_ssl_errors: Whether SSL errors happened. Needs to be set by subclasses. for properties, see WebView/WebEngineView docs. Signals: See related Qt signals. new_tab_requested: Emitted when a new tab should be opened with the given URL. load_status_changed: The loading status changed """ window_close_requested = pyqtSignal() link_hovered = pyqtSignal(str) load_started = pyqtSignal() load_progress = pyqtSignal(int) load_finished = pyqtSignal(bool) icon_changed = pyqtSignal(QIcon) title_changed = pyqtSignal(str) load_status_changed = pyqtSignal(str) new_tab_requested = pyqtSignal(QUrl) url_changed = pyqtSignal(QUrl) shutting_down = pyqtSignal() contents_size_changed = pyqtSignal(QSizeF) add_history_item = pyqtSignal(QUrl, QUrl, str) # url, requested url, title WIDGET_CLASS = None def __init__(self, win_id, mode_manager, parent=None): self.win_id = win_id self.tab_id = next(tab_id_gen) super().__init__(parent) self.registry = objreg.ObjectRegistry() tab_registry = objreg.get('tab-registry', scope='window', window=win_id) tab_registry[self.tab_id] = self objreg.register('tab', self, registry=self.registry) # self.history = AbstractHistory(self) # self.scroller = AbstractScroller(self, parent=self) # self.caret = AbstractCaret(win_id=win_id, tab=self, # mode_manager=mode_manager, parent=self) # self.zoom = AbstractZoom(win_id=win_id) # self.search = AbstractSearch(parent=self) # self.printing = AbstractPrinting() # self.elements = AbstractElements(self) self.data = TabData() self._layout = miscwidgets.WrapperLayout(self) self._widget = None self._progress = 0 self._has_ssl_errors = False self._mode_manager = mode_manager self._load_status = usertypes.LoadStatus.none self._mouse_event_filter = mouse.MouseEventFilter( self, widget_class=self.WIDGET_CLASS, parent=self) self.backend = None # FIXME:qtwebengine Should this be public api via self.hints? # Also, should we get it out of objreg? hintmanager = hints.HintManager(win_id, self.tab_id, parent=self) objreg.register('hintmanager', hintmanager, scope='tab', window=self.win_id, tab=self.tab_id) def _set_widget(self, widget): # pylint: disable=protected-access self._widget = widget self._layout.wrap(self, widget) self.history._history = widget.history() self.scroller._init_widget(widget) self.caret._widget = widget self.zoom._widget = widget self.search._widget = widget self.printing._widget = widget self.elements._widget = widget self._install_event_filter() def _install_event_filter(self): raise NotImplementedError def _set_load_status(self, val): """Setter for load_status.""" if not isinstance(val, usertypes.LoadStatus): raise TypeError("Type {} is no LoadStatus member!".format(val)) log.webview.debug("load status for {}: {}".format(repr(self), val)) self._load_status = val self.load_status_changed.emit(val.name) def _event_target(self): """Return the widget events should be sent to.""" raise NotImplementedError def send_event(self, evt): """Send the given event to the underlying widget. The event will be sent via QApplication.postEvent. Note that a posted event may not be re-used in any way! """ # This only gives us some mild protection against re-using events, but # it's certainly better than a segfault. if getattr(evt, 'posted', False): raise AssertionError("Can't re-use an event which was already " "posted!") recipient = self._event_target() evt.posted = True QApplication.postEvent(recipient, evt) @pyqtSlot(QUrl) def _on_link_clicked(self, url): log.webview.debug("link clicked: url {}, override target {}, " "open_target {}".format( url.toDisplayString(), self.data.override_target, self.data.open_target)) if not url.isValid(): msg = urlutils.get_errstring(url, "Invalid link clicked") message.error(msg) self.data.open_target = usertypes.ClickTarget.normal return False target = self.data.combined_target() if target == usertypes.ClickTarget.normal: return elif target == usertypes.ClickTarget.tab: win_id = self.win_id bg_tab = False elif target == usertypes.ClickTarget.tab_bg: win_id = self.win_id bg_tab = True elif target == usertypes.ClickTarget.window: from qutebrowser.mainwindow import mainwindow window = mainwindow.MainWindow() window.show() win_id = window.win_id bg_tab = False else: raise ValueError("Invalid ClickTarget {}".format(target)) tabbed_browser = objreg.get('tabbed-browser', scope='window', window=win_id) tabbed_browser.tabopen(url, background=bg_tab) self.data.open_target = usertypes.ClickTarget.normal @pyqtSlot(QUrl) def _on_url_changed(self, url): """Update title when URL has changed and no title is available.""" if url.isValid() and not self.title(): self.title_changed.emit(url.toDisplayString()) self.url_changed.emit(url) @pyqtSlot() def _on_load_started(self): self._progress = 0 self._has_ssl_errors = False self.data.viewing_source = False self._set_load_status(usertypes.LoadStatus.loading) self.load_started.emit() def _handle_auto_insert_mode(self, ok): """Handle auto-insert-mode after loading finished.""" if not config.get('input', 'auto-insert-mode') or not ok: return cur_mode = self._mode_manager.mode if cur_mode == usertypes.KeyMode.insert: return def _auto_insert_mode_cb(elem): """Called from JS after finding the focused element.""" if elem is None: log.webview.debug("No focused element!") return if elem.is_editable(): modeman.enter(self.win_id, usertypes.KeyMode.insert, 'load finished', only_if_normal=True) self.elements.find_focused(_auto_insert_mode_cb) @pyqtSlot(bool) def _on_load_finished(self, ok): if ok and not self._has_ssl_errors: if self.url().scheme() == 'https': self._set_load_status(usertypes.LoadStatus.success_https) else: self._set_load_status(usertypes.LoadStatus.success) elif ok: self._set_load_status(usertypes.LoadStatus.warn) else: self._set_load_status(usertypes.LoadStatus.error) self.load_finished.emit(ok) if not self.title(): self.title_changed.emit(self.url().toDisplayString()) self._handle_auto_insert_mode(ok) @pyqtSlot() def _on_history_trigger(self): """Emit add_history_item when triggered by backend-specific signal.""" raise NotImplementedError @pyqtSlot(int) def _on_load_progress(self, perc): self._progress = perc self.load_progress.emit(perc) @pyqtSlot() def _on_ssl_errors(self): self._has_ssl_errors = True def url(self, requested=False): raise NotImplementedError def progress(self): return self._progress def load_status(self): return self._load_status def _openurl_prepare(self, url): qtutils.ensure_valid(url) self.title_changed.emit(url.toDisplayString()) def openurl(self, url): raise NotImplementedError def reload(self, *, force=False): raise NotImplementedError def stop(self): raise NotImplementedError def clear_ssl_errors(self): raise NotImplementedError def dump_async(self, callback, *, plain=False): """Dump the current page to a file ascync. The given callback will be called with the result when dumping is complete. """ raise NotImplementedError def run_js_async(self, code, callback=None, *, world=None): """Run javascript async. The given callback will be called with the result when running JS is complete. Args: code: The javascript code to run. callback: The callback to call with the result, or None. world: A world ID (int or usertypes.JsWorld member) to run the JS in the main world or in another isolated world. """ raise NotImplementedError def shutdown(self): raise NotImplementedError def title(self): raise NotImplementedError def icon(self): raise NotImplementedError def set_html(self, html, base_url): raise NotImplementedError def __repr__(self): try: url = utils.elide(self.url().toDisplayString(QUrl.EncodeUnicode), 100) except AttributeError: url = '<AttributeError>' return utils.get_repr(self, tab_id=self.tab_id, url=url)
1
16,977
I'd prefer this to be called `pinned` - also it should probably be in the `TabData` class instead, which is accessible as the `.data` attribute of the `BrowserTab` object - otherwise, `BrowserTab` would have a lot of quite "random" attributes :wink:
qutebrowser-qutebrowser
py
@@ -24,4 +24,7 @@ final class ChromeDriverCommand { private ChromeDriverCommand() {} static final String LAUNCH_APP = "launchApp"; + static final String SEND_COMMANDS_FOR_DOWNLOAD_CHROME_HEAD_LESS + = "sendCommandForDownloadChromeHeadLess"; + }
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package org.openqa.selenium.chrome; /** * Constants for the ChromeDriver specific command IDs. */ final class ChromeDriverCommand { private ChromeDriverCommand() {} static final String LAUNCH_APP = "launchApp"; }
1
15,148
Nit: `Headless` is one word, not two, and so doesn't need camelcasing in this way.
SeleniumHQ-selenium
java
@@ -1,6 +1,9 @@ -/*global Audit, runRules, cleanupPlugins */ +/*global Audit */ /*eslint indent: 0*/ -function runCommand(data, keepalive, callback) { +import cleanupPlugins from './cleanup-plugins'; +import runRules from './run-rules'; + +const runCommand = (data, keepalive, callback) => { 'use strict'; var resolve = callback; var reject = function(err) {
1
/*global Audit, runRules, cleanupPlugins */ /*eslint indent: 0*/ function runCommand(data, keepalive, callback) { 'use strict'; var resolve = callback; var reject = function(err) { if (err instanceof Error === false) { err = new Error(err); } callback(err); }; var context = (data && data.context) || {}; if (context.hasOwnProperty('include') && !context.include.length) { context.include = [document]; } var options = (data && data.options) || {}; switch (data.command) { case 'rules': return runRules( context, options, function(results, cleanup) { resolve(results); // Cleanup AFTER resolve so that selectors can be generated cleanup(); }, reject ); case 'cleanup-plugin': return cleanupPlugins(resolve, reject); default: // go through the registered commands if ( axe._audit && axe._audit.commands && axe._audit.commands[data.command] ) { return axe._audit.commands[data.command](data, callback); } } } /** * Sets up Rules, Messages and default options for Checks, must be invoked before attempting analysis * @param {Object} audit The "audit specification" object * @private */ axe._load = function(audit) { 'use strict'; axe.utils.respondable.subscribe('axe.ping', function( data, keepalive, respond ) { respond({ axe: true }); }); axe.utils.respondable.subscribe('axe.start', runCommand); axe._audit = new Audit(audit); };
1
15,459
Why not import audit as well?
dequelabs-axe-core
js
@@ -92,7 +92,9 @@ class CategoryDataFixture extends AbstractReferenceFixture . '(black-and-white), or in color, and in two or three dimensions', ] ); - $categoryData->parent = $this->getReference(self::CATEGORY_ELECTRONICS); + /** @var \Shopsys\ShopBundle\Model\Category\Category $categoryElectronics */ + $categoryElectronics = $this->getReference(self::CATEGORY_ELECTRONICS); + $categoryData->parent = $categoryElectronics; $this->createCategory($categoryData, self::CATEGORY_TV); $categoryData->name = [
1
<?php namespace Shopsys\ShopBundle\DataFixtures\Demo; use Doctrine\Common\Persistence\ObjectManager; use Shopsys\FrameworkBundle\Component\DataFixture\AbstractReferenceFixture; use Shopsys\FrameworkBundle\Component\Domain\Domain; use Shopsys\FrameworkBundle\Model\Category\CategoryData; use Shopsys\FrameworkBundle\Model\Category\CategoryDataFactoryInterface; use Shopsys\FrameworkBundle\Model\Category\CategoryFacade; class CategoryDataFixture extends AbstractReferenceFixture { public const CATEGORY_ELECTRONICS = 'category_electronics'; public const CATEGORY_TV = 'category_tv'; public const CATEGORY_PHOTO = 'category_photo'; public const CATEGORY_PRINTERS = 'category_printers'; public const CATEGORY_PC = 'category_pc'; public const CATEGORY_PHONES = 'category_phones'; public const CATEGORY_COFFEE = 'category_coffee'; public const CATEGORY_BOOKS = 'category_books'; public const CATEGORY_TOYS = 'category_toys'; public const CATEGORY_GARDEN_TOOLS = 'category_garden_tools'; public const CATEGORY_FOOD = 'category_food'; /** * @var \Shopsys\FrameworkBundle\Model\Category\CategoryFacade */ protected $categoryFacade; /** * @var \Shopsys\FrameworkBundle\Model\Category\CategoryDataFactoryInterface */ protected $categoryDataFactory; /** * @var \Shopsys\FrameworkBundle\Component\Domain\Domain */ protected $domain; /** * @param \Shopsys\FrameworkBundle\Model\Category\CategoryFacade $categoryFacade * @param \Shopsys\FrameworkBundle\Model\Category\CategoryDataFactoryInterface $categoryDataFactory * @param \Shopsys\FrameworkBundle\Component\Domain\Domain $domain */ public function __construct( CategoryFacade $categoryFacade, CategoryDataFactoryInterface $categoryDataFactory, Domain $domain ) { $this->categoryFacade = $categoryFacade; $this->categoryDataFactory = $categoryDataFactory; $this->domain = $domain; } /** * @param \Doctrine\Common\Persistence\ObjectManager $manager */ public function load(ObjectManager $manager) { /** * Root category is created in database migration. * @see \Shopsys\FrameworkBundle\Migrations\Version20180603135345 */ $rootCategory = $this->categoryFacade->getRootCategory(); $categoryData = $this->categoryDataFactory->create(); $emptyDescriptionsForAllDomains = $this->createDomainKeyedArray(); $categoryData->name = [ 'cs' => 'Elektro', 'en' => 'Electronics', ]; $categoryData->descriptions = array_merge( $emptyDescriptionsForAllDomains, [ Domain::FIRST_DOMAIN_ID => 'Our electronics include devices used for entertainment (flat screen TVs, DVD players, DVD movies, iPods, ' . 'video games, remote control cars, etc.), communications (telephones, cell phones, e-mail-capable laptops, etc.) ' . 'and home office activities (e.g., desktop computers, printers, paper shredders, etc.).', ] ); $categoryData->parent = $rootCategory; $this->createCategory($categoryData, self::CATEGORY_ELECTRONICS); $categoryData->name = [ 'cs' => 'Televize, audio', 'en' => 'TV, audio', ]; $categoryData->descriptions = array_merge( $emptyDescriptionsForAllDomains, [ Domain::FIRST_DOMAIN_ID => 'Television or TV is a telecommunication medium used for transmitting sound with moving images in monochrome ' . '(black-and-white), or in color, and in two or three dimensions', ] ); $categoryData->parent = $this->getReference(self::CATEGORY_ELECTRONICS); $this->createCategory($categoryData, self::CATEGORY_TV); $categoryData->name = [ 'cs' => 'Fotoaparáty', 'en' => 'Cameras & Photo', ]; $categoryData->descriptions = array_merge( $emptyDescriptionsForAllDomains, [ Domain::FIRST_DOMAIN_ID => 'A camera is an optical instrument for recording or capturing images, which may be stored locally, ' . 'transmitted to another location, or both.', ] ); $this->createCategory($categoryData, self::CATEGORY_PHOTO); $categoryData->name = [ 'cs' => 'Tiskárny', 'en' => 'Printers', ]; $categoryData->descriptions = array_merge( $emptyDescriptionsForAllDomains, [ Domain::FIRST_DOMAIN_ID => 'A printer is a peripheral which makes a persistent human readable representation of graphics or text on paper ' . 'or similar physical media.', ] ); $this->createCategory($categoryData, self::CATEGORY_PRINTERS); $categoryData->name = [ 'cs' => 'Počítače & příslušenství', 'en' => 'Personal Computers & accessories', ]; $categoryData->descriptions = array_merge( $emptyDescriptionsForAllDomains, [ Domain::FIRST_DOMAIN_ID => 'A personal computer (PC) is a general-purpose computer whose size, capabilities, and original sale price ' . 'make it useful for individuals, and is intended to be operated directly by an end-user with no intervening computer ' . 'time-sharing models that allowed larger, more expensive minicomputer and mainframe systems to be used by many people, ' . 'usually at the same time.', ] ); $this->createCategory($categoryData, self::CATEGORY_PC); $categoryData->name = [ 'cs' => 'Mobilní telefony', 'en' => 'Mobile Phones', ]; $categoryData->descriptions = array_merge( $emptyDescriptionsForAllDomains, [ Domain::FIRST_DOMAIN_ID => 'A telephone is a telecommunications device that permits two or more users to conduct a conversation when they are ' . 'too far apart to be heard directly. A telephone converts sound, typically and most efficiently the human voice, ' . 'into electronic signals suitable for transmission via cables or other transmission media over long distances, ' . 'and replays such signals simultaneously in audible form to its user.', ] ); $this->createCategory($categoryData, self::CATEGORY_PHONES); $categoryData->name = [ 'cs' => 'Kávovary', 'en' => 'Coffee Machines', ]; $categoryData->descriptions = array_merge( $emptyDescriptionsForAllDomains, [ Domain::FIRST_DOMAIN_ID => 'Coffeemakers or coffee machines are cooking appliances used to brew coffee. While there are many different types ' . 'of coffeemakers using a number of different brewing principles, in the most common devices, coffee grounds ' . 'are placed in a paper or metal filter inside a funnel, which is set over a glass or ceramic coffee pot, ' . 'a cooking pot in the kettle family. Cold water is poured into a separate chamber, which is then heated up to the ' . 'boiling point, and directed into the funnel.', ] ); $this->createCategory($categoryData, self::CATEGORY_COFFEE); $categoryData->name = [ 'cs' => 'Knihy', 'en' => 'Books', ]; $categoryData->descriptions = array_merge( $emptyDescriptionsForAllDomains, [ Domain::FIRST_DOMAIN_ID => 'A book is a set of written, printed, illustrated, or blank sheets, made of ink, paper, parchment, or other ' . 'materials, fastened together to hinge at one side. A single sheet within a book is a leaf, and each side of a leaf ' . 'is a page. A set of text-filled or illustrated pages produced in electronic format is known as an electronic book, ' . 'or e-book.', ] ); $categoryData->parent = $rootCategory; $this->createCategory($categoryData, self::CATEGORY_BOOKS); $categoryData->name = [ 'cs' => 'Hračky a další', 'en' => 'Toys', ]; $categoryData->descriptions = array_merge( $emptyDescriptionsForAllDomains, [ Domain::FIRST_DOMAIN_ID => 'A toy is an item that can be used for play. Toys are generally played with by children and pets. ' . 'Playing with toys is an enjoyable means of training young children for life in society. Different materials are ' . 'used to make toys enjoyable to all ages. ', ] ); $this->createCategory($categoryData, self::CATEGORY_TOYS); $categoryData->name = [ 'cs' => 'Zahradní náčiní', 'en' => 'Garden tools', ]; $categoryData->descriptions = array_merge( $emptyDescriptionsForAllDomains, [ Domain::FIRST_DOMAIN_ID => 'A garden tool is any one of many tools made for gardens and gardening and overlaps with the range of tools ' . 'made for agriculture and horticulture. Garden tools can also be hand tools and power tools.', ] ); $this->createCategory($categoryData, self::CATEGORY_GARDEN_TOOLS); $categoryData->name = [ 'cs' => 'Jídlo', 'en' => 'Food', ]; $categoryData->descriptions = array_merge( $this->createDomainKeyedArray(), [ Domain::FIRST_DOMAIN_ID => 'Food is any substance consumed to provide nutritional support for the body. It is usually of plant or ' . 'animal origin, and contains essential nutrients, such as fats, proteins, vitamins, or minerals. The substance ' . 'is ingested by an organism and assimilated by the organism\'s cells to provide energy, maintain life, ' . 'or stimulate growth.', ] ); $this->createCategory($categoryData, self::CATEGORY_FOOD); } /** * @return null[] */ protected function createDomainKeyedArray(): array { return array_fill_keys($this->domain->getAllIds(), null); } /** * @param \Shopsys\FrameworkBundle\Model\Category\CategoryData $categoryData * @param string|null $referenceName * @return \Shopsys\FrameworkBundle\Model\Category\Category */ protected function createCategory(CategoryData $categoryData, $referenceName = null) { $category = $this->categoryFacade->create($categoryData); if ($referenceName !== null) { $this->addReference($referenceName, $category); } return $category; } }
1
16,271
OK, i think there will be more of those...
shopsys-shopsys
php
@@ -12,6 +12,6 @@ return [ | */ - 'failed' => 'These credentials do not match our records.', - 'throttle' => 'Too many login attempts. Please try again in :seconds seconds.', + 'failed' => 'Овие акредитиви не се совпаѓаат со нашите записи.', + 'throttle' => 'Премногу обиди за најавување. Обидете се повторно за :seconds секунди.', ];
1
<?php return [ /* |-------------------------------------------------------------------------- | Authentication Language Lines |-------------------------------------------------------------------------- | | The following language lines are used during authentication for various | messages that we need to display to the user. You are free to modify | these language lines according to your application's requirements. | */ 'failed' => 'These credentials do not match our records.', 'throttle' => 'Too many login attempts. Please try again in :seconds seconds.', ];
1
8,249
Can you add : 'password' => 'The provided password is incorrect.', ?
Laravel-Lang-lang
php
@@ -18,6 +18,7 @@ from logHandler import log from comtypes.gen.UIAutomationClient import * + #Some new win8 UIA constants that could be missing UIA_StyleIdAttributeId=40034 UIA_AnnotationAnnotationTypeIdPropertyId=30113
1
from ctypes import * from ctypes.wintypes import * import comtypes.client from comtypes.automation import VT_EMPTY from comtypes import * import weakref import threading import time import api import appModuleHandler import queueHandler import controlTypes import NVDAHelper import winKernel import winUser import eventHandler from logHandler import log from comtypes.gen.UIAutomationClient import * #Some new win8 UIA constants that could be missing UIA_StyleIdAttributeId=40034 UIA_AnnotationAnnotationTypeIdPropertyId=30113 UIA_AnnotationTypesAttributeId=40031 AnnotationType_SpellingError=60001 UIA_AnnotationObjectsAttributeId=40032 StyleId_Heading1=70001 StyleId_Heading9=70009 ItemIndex_Property_GUID=GUID("{92A053DA-2969-4021-BF27-514CFC2E4A69}") ItemCount_Property_GUID=GUID("{ABBF5C45-5CCC-47b7-BB4E-87CB87BBD162}") UIA_FullDescriptionPropertyId=30159 UIA_LevelPropertyId=30154 UIA_PositionInSetPropertyId=30152 UIA_SizeOfSetPropertyId=30153 UIA_LocalizedLandmarkTypePropertyId=30158 UIA_LandmarkTypePropertyId=30157 HorizontalTextAlignment_Left=0 HorizontalTextAlignment_Centered=1 HorizontalTextAlignment_Right=2 HorizontalTextAlignment_Justified=3 badUIAWindowClassNames=[ "SysTreeView32", "WuDuiListView", "ComboBox", "msctls_progress32", "Edit", "CommonPlacesWrapperWndClass", "SysMonthCal32", "SUPERGRID", #Outlook 2010 message list "RichEdit", "RichEdit20", "RICHEDIT50W", "SysListView32", "_WwG", '_WwN', "EXCEL7", "Button", ] NVDAUnitsToUIAUnits={ "character":TextUnit_Character, "word":TextUnit_Word, "line":TextUnit_Line, "paragraph":TextUnit_Paragraph, "readingChunk":TextUnit_Line, } UIAControlTypesToNVDARoles={ UIA_ButtonControlTypeId:controlTypes.ROLE_BUTTON, UIA_CalendarControlTypeId:controlTypes.ROLE_CALENDAR, UIA_CheckBoxControlTypeId:controlTypes.ROLE_CHECKBOX, UIA_ComboBoxControlTypeId:controlTypes.ROLE_COMBOBOX, UIA_EditControlTypeId:controlTypes.ROLE_EDITABLETEXT, UIA_HyperlinkControlTypeId:controlTypes.ROLE_LINK, UIA_ImageControlTypeId:controlTypes.ROLE_GRAPHIC, UIA_ListItemControlTypeId:controlTypes.ROLE_LISTITEM, UIA_ListControlTypeId:controlTypes.ROLE_LIST, UIA_MenuControlTypeId:controlTypes.ROLE_POPUPMENU, UIA_MenuBarControlTypeId:controlTypes.ROLE_MENUBAR, UIA_MenuItemControlTypeId:controlTypes.ROLE_MENUITEM, UIA_ProgressBarControlTypeId:controlTypes.ROLE_PROGRESSBAR, UIA_RadioButtonControlTypeId:controlTypes.ROLE_RADIOBUTTON, UIA_ScrollBarControlTypeId:controlTypes.ROLE_SCROLLBAR, UIA_SliderControlTypeId:controlTypes.ROLE_SLIDER, UIA_SpinnerControlTypeId:controlTypes.ROLE_SPINBUTTON, UIA_StatusBarControlTypeId:controlTypes.ROLE_STATUSBAR, UIA_TabControlTypeId:controlTypes.ROLE_TABCONTROL, UIA_TabItemControlTypeId:controlTypes.ROLE_TAB, UIA_TextControlTypeId:controlTypes.ROLE_STATICTEXT, UIA_ToolBarControlTypeId:controlTypes.ROLE_TOOLBAR, UIA_ToolTipControlTypeId:controlTypes.ROLE_TOOLTIP, UIA_TreeControlTypeId:controlTypes.ROLE_TREEVIEW, UIA_TreeItemControlTypeId:controlTypes.ROLE_TREEVIEWITEM, UIA_CustomControlTypeId:controlTypes.ROLE_UNKNOWN, UIA_GroupControlTypeId:controlTypes.ROLE_GROUPING, UIA_ThumbControlTypeId:controlTypes.ROLE_THUMB, UIA_DataGridControlTypeId:controlTypes.ROLE_DATAGRID, UIA_DataItemControlTypeId:controlTypes.ROLE_DATAITEM, UIA_DocumentControlTypeId:controlTypes.ROLE_DOCUMENT, UIA_SplitButtonControlTypeId:controlTypes.ROLE_SPLITBUTTON, UIA_WindowControlTypeId:controlTypes.ROLE_WINDOW, UIA_PaneControlTypeId:controlTypes.ROLE_PANE, UIA_HeaderControlTypeId:controlTypes.ROLE_HEADER, UIA_HeaderItemControlTypeId:controlTypes.ROLE_HEADERITEM, UIA_TableControlTypeId:controlTypes.ROLE_TABLE, UIA_TitleBarControlTypeId:controlTypes.ROLE_TITLEBAR, UIA_SeparatorControlTypeId:controlTypes.ROLE_SEPARATOR, } UIAPropertyIdsToNVDAEventNames={ UIA_NamePropertyId:"nameChange", UIA_HelpTextPropertyId:"descriptionChange", UIA_ExpandCollapseExpandCollapseStatePropertyId:"stateChange", UIA_ToggleToggleStatePropertyId:"stateChange", UIA_IsEnabledPropertyId:"stateChange", UIA_ValueValuePropertyId:"valueChange", UIA_RangeValueValuePropertyId:"valueChange", } UIAEventIdsToNVDAEventNames={ #UIA_Text_TextChangedEventId:"textChanged", UIA_SelectionItem_ElementSelectedEventId:"UIA_elementSelected", UIA_MenuOpenedEventId:"gainFocus", UIA_SelectionItem_ElementAddedToSelectionEventId:"stateChange", UIA_SelectionItem_ElementRemovedFromSelectionEventId:"stateChange", #UIA_MenuModeEndEventId:"menuModeEnd", #UIA_Text_TextSelectionChangedEventId:"caret", UIA_ToolTipOpenedEventId:"UIA_toolTipOpened", #UIA_AsyncContentLoadedEventId:"documentLoadComplete", #UIA_ToolTipClosedEventId:"hide", UIA_Window_WindowOpenedEventId:"UIA_window_windowOpen", } class UIAHandler(COMObject): _com_interfaces_=[IUIAutomationEventHandler,IUIAutomationFocusChangedEventHandler,IUIAutomationPropertyChangedEventHandler] def __init__(self): super(UIAHandler,self).__init__() self.MTAThreadInitEvent=threading.Event() self.MTAThreadStopEvent=threading.Event() self.MTAThreadInitException=None self.MTAThread=threading.Thread(target=self.MTAThreadFunc) self.MTAThread.daemon=True self.MTAThread.start() self.MTAThreadInitEvent.wait(2) if self.MTAThreadInitException: raise self.MTAThreadInitException def terminate(self): MTAThreadHandle=HANDLE(windll.kernel32.OpenThread(winKernel.SYNCHRONIZE,False,self.MTAThread.ident)) self.MTAThreadStopEvent.set() #Wait for the MTA thread to die (while still message pumping) if windll.user32.MsgWaitForMultipleObjects(1,byref(MTAThreadHandle),False,200,0)!=0: log.debugWarning("Timeout or error while waiting for UIAHandler MTA thread") windll.kernel32.CloseHandle(MTAThreadHandle) del self.MTAThread def MTAThreadFunc(self): try: oledll.ole32.CoInitializeEx(None,comtypes.COINIT_MULTITHREADED) isUIA8=False try: self.clientObject=CoCreateInstance(CUIAutomation8._reg_clsid_,interface=IUIAutomation,clsctx=CLSCTX_INPROC_SERVER) isUIA8=True except (COMError,WindowsError,NameError): self.clientObject=CoCreateInstance(CUIAutomation._reg_clsid_,interface=IUIAutomation,clsctx=CLSCTX_INPROC_SERVER) if isUIA8: try: self.clientObject=self.clientObject.QueryInterface(IUIAutomation3) except COMError: self.clientObject=self.clientObject.QueryInterface(IUIAutomation2) log.info("UIAutomation: %s"%self.clientObject.__class__.__mro__[1].__name__) self.windowTreeWalker=self.clientObject.createTreeWalker(self.clientObject.CreateNotCondition(self.clientObject.CreatePropertyCondition(UIA_NativeWindowHandlePropertyId,0))) self.windowCacheRequest=self.clientObject.CreateCacheRequest() self.windowCacheRequest.AddProperty(UIA_NativeWindowHandlePropertyId) self.UIAWindowHandleCache={} self.baseTreeWalker=self.clientObject.RawViewWalker self.baseCacheRequest=self.windowCacheRequest.Clone() import UIAHandler self.ItemIndex_PropertyId=NVDAHelper.localLib.registerUIAProperty(byref(ItemIndex_Property_GUID),u"ItemIndex",1) self.ItemCount_PropertyId=NVDAHelper.localLib.registerUIAProperty(byref(ItemCount_Property_GUID),u"ItemCount",1) for propertyId in (UIA_FrameworkIdPropertyId,UIA_AutomationIdPropertyId,UIA_ClassNamePropertyId,UIA_ControlTypePropertyId,UIA_ProviderDescriptionPropertyId,UIA_ProcessIdPropertyId,UIA_IsTextPatternAvailablePropertyId): self.baseCacheRequest.addProperty(propertyId) self.baseCacheRequest.addPattern(UIA_TextPatternId) self.rootElement=self.clientObject.getRootElementBuildCache(self.baseCacheRequest) self.reservedNotSupportedValue=self.clientObject.ReservedNotSupportedValue self.ReservedMixedAttributeValue=self.clientObject.ReservedMixedAttributeValue self.clientObject.AddFocusChangedEventHandler(self.baseCacheRequest,self) self.clientObject.AddPropertyChangedEventHandler(self.rootElement,TreeScope_Subtree,self.baseCacheRequest,self,UIAPropertyIdsToNVDAEventNames.keys()) for x in UIAEventIdsToNVDAEventNames.iterkeys(): self.clientObject.addAutomationEventHandler(x,self.rootElement,TreeScope_Subtree,self.baseCacheRequest,self) except Exception as e: self.MTAThreadInitException=e finally: self.MTAThreadInitEvent.set() self.MTAThreadStopEvent.wait() self.clientObject.RemoveAllEventHandlers() def IUIAutomationEventHandler_HandleAutomationEvent(self,sender,eventID): if not self.MTAThreadInitEvent.isSet(): # UIAHandler hasn't finished initialising yet, so just ignore this event. return if eventID==UIA_MenuOpenedEventId and eventHandler.isPendingEvents("gainFocus"): # We don't need the menuOpened event if focus has been fired, # as focus should be more correct. return NVDAEventName=UIAEventIdsToNVDAEventNames.get(eventID,None) if not NVDAEventName: return if not self.isNativeUIAElement(sender): return window=self.getNearestWindowHandle(sender) if window and not eventHandler.shouldAcceptEvent(NVDAEventName,windowHandle=window): return import NVDAObjects.UIA obj=NVDAObjects.UIA.UIA(UIAElement=sender) if not obj or (NVDAEventName=="gainFocus" and not obj.shouldAllowUIAFocusEvent): return focus=api.getFocusObject() if obj==focus: obj=focus eventHandler.queueEvent(NVDAEventName,obj) def IUIAutomationFocusChangedEventHandler_HandleFocusChangedEvent(self,sender): if not self.MTAThreadInitEvent.isSet(): # UIAHandler hasn't finished initialising yet, so just ignore this event. return if not self.isNativeUIAElement(sender): return import NVDAObjects.UIA if isinstance(eventHandler.lastQueuedFocusObject,NVDAObjects.UIA.UIA): lastFocus=eventHandler.lastQueuedFocusObject.UIAElement # Ignore duplicate focus events. # It seems that it is possible for compareElements to return True, even though the objects are different. # Therefore, don't ignore the event if the last focus object has lost its hasKeyboardFocus state. if self.clientObject.compareElements(sender,lastFocus) and lastFocus.currentHasKeyboardFocus: return window=self.getNearestWindowHandle(sender) if window and not eventHandler.shouldAcceptEvent("gainFocus",windowHandle=window): return obj=NVDAObjects.UIA.UIA(UIAElement=sender) if not obj or not obj.shouldAllowUIAFocusEvent: return eventHandler.queueEvent("gainFocus",obj) def IUIAutomationPropertyChangedEventHandler_HandlePropertyChangedEvent(self,sender,propertyId,newValue): # #3867: For now manually force this VARIANT type to empty to get around a nasty double free in comtypes/ctypes. # We also don't use the value in this callback. newValue.vt=VT_EMPTY if not self.MTAThreadInitEvent.isSet(): # UIAHandler hasn't finished initialising yet, so just ignore this event. return NVDAEventName=UIAPropertyIdsToNVDAEventNames.get(propertyId,None) if not NVDAEventName: return if not self.isNativeUIAElement(sender): return window=self.getNearestWindowHandle(sender) if window and not eventHandler.shouldAcceptEvent(NVDAEventName,windowHandle=window): return import NVDAObjects.UIA obj=NVDAObjects.UIA.UIA(UIAElement=sender) if not obj: return focus=api.getFocusObject() if obj==focus: obj=focus eventHandler.queueEvent(NVDAEventName,obj) def _isUIAWindowHelper(self,hwnd): # UIA in NVDA's process freezes in Windows 7 and below processID=winUser.getWindowThreadProcessID(hwnd)[0] if windll.kernel32.GetCurrentProcessId()==processID: return False import NVDAObjects.window windowClass=NVDAObjects.window.Window.normalizeWindowClassName(winUser.getClassName(hwnd)) # There are certain window classes that just had bad UIA implementations if windowClass in badUIAWindowClassNames: return False if windowClass=="NetUIHWND": parentHwnd=winUser.getAncestor(hwnd,winUser.GA_ROOT) # #2816: Outlook 2010 auto complete does not fire enough UIA events, IAccessible is better. # #4056: Combo boxes in Office 2010 Options dialogs don't expose a name via UIA, but do via MSAA. if winUser.getClassName(parentHwnd) in {"Net UI Tool Window","NUIDialog"}: return False # allow the appModule for the window to also choose if this window is bad appModule=appModuleHandler.getAppModuleFromProcessID(processID) if appModule and appModule.isBadUIAWindow(hwnd): return False # Ask the window if it supports UIA natively return windll.UIAutomationCore.UiaHasServerSideProvider(hwnd) def isUIAWindow(self,hwnd): now=time.time() v=self.UIAWindowHandleCache.get(hwnd,None) if not v or (now-v[1])>0.5: v=self._isUIAWindowHelper(hwnd),now self.UIAWindowHandleCache[hwnd]=v return v[0] def getNearestWindowHandle(self,UIAElement): if hasattr(UIAElement,"_nearestWindowHandle"): # Called previously. Use cached result. return UIAElement._nearestWindowHandle try: window=UIAElement.cachedNativeWindowHandle except COMError: window=None if not window: # This element reports no window handle, so use the nearest ancestor window handle. try: new=self.windowTreeWalker.NormalizeElementBuildCache(UIAElement,self.windowCacheRequest) except COMError: return None try: window=new.cachedNativeWindowHandle except COMError: window=None # Cache for future use to improve performance. UIAElement._nearestWindowHandle=window return window def isNativeUIAElement(self,UIAElement): #Due to issues dealing with UIA elements coming from the same process, we do not class these UIA elements as usable. #It seems to be safe enough to retreave the cached processID, but using tree walkers or fetching other properties causes a freeze. try: processID=UIAElement.cachedProcessId except COMError: return False if processID==windll.kernel32.GetCurrentProcessId(): return False # Whether this is a native element depends on whether its window natively supports UIA. windowHandle=self.getNearestWindowHandle(UIAElement) if windowHandle: if self.isUIAWindow(windowHandle): return True if winUser.getClassName(windowHandle)=="DirectUIHWND" and "IEFRAME.dll" in UIAElement.cachedProviderDescription and UIAElement.currentClassName in ("DownloadBox", "accessiblebutton", "DUIToolbarButton", "PushButton"): # This is the IE 9 downloads list. # #3354: UiaHasServerSideProvider returns false for the IE 9 downloads list window, # so we'd normally use MSAA for this control. # However, its MSAA implementation is broken (fires invalid events) if UIA is initialised, # whereas its UIA implementation works correctly. # Therefore, we must use UIA here. return True return False
1
19,322
Extraneous blank line.
nvaccess-nvda
py
@@ -30,7 +30,7 @@ function ariaOwns(nodes, role) { if (nodes[index] === null) { continue; } - let virtualTree = axe.utils.getNodeFromTree(axe._tree[0], nodes[index]); + let virtualTree = axe.utils.getNodeFromTree(nodes[index]); if (owns(nodes[index], virtualTree, role, true)) { return true; }
1
const requiredOwned = axe.commons.aria.requiredOwned; const implicitNodes = axe.commons.aria.implicitNodes; const matchesSelector = axe.utils.matchesSelector; const idrefs = axe.commons.dom.idrefs; const reviewEmpty = options && Array.isArray(options.reviewEmpty) ? options.reviewEmpty : []; function owns(node, virtualTree, role, ariaOwned) { if (node === null) { return false; } var implicit = implicitNodes(role), selector = ['[role="' + role + '"]']; if (implicit) { selector = selector.concat(implicit); } selector = selector.join(','); return ariaOwned ? matchesSelector(node, selector) || !!axe.utils.querySelectorAll(virtualTree, selector)[0] : !!axe.utils.querySelectorAll(virtualTree, selector)[0]; } function ariaOwns(nodes, role) { var index, length; for (index = 0, length = nodes.length; index < length; index++) { if (nodes[index] === null) { continue; } let virtualTree = axe.utils.getNodeFromTree(axe._tree[0], nodes[index]); if (owns(nodes[index], virtualTree, role, true)) { return true; } } return false; } function missingRequiredChildren(node, childRoles, all, role) { /* eslint max-statements: ["error", 22], complexity: ["error", 17] */ var i, l = childRoles.length, missing = [], ownedElements = idrefs(node, 'aria-owns'); for (i = 0; i < l; i++) { var r = childRoles[i]; if (owns(node, virtualNode, r) || ariaOwns(ownedElements, r)) { if (!all) { return null; } } else { if (all) { missing.push(r); } } } // combobox exceptions if (role === 'combobox') { // remove 'textbox' from missing roles if combobox is a native text-type input var textboxIndex = missing.indexOf('textbox'); var textTypeInputs = ['text', 'search', 'email', 'url', 'tel']; if ( textboxIndex >= 0 && node.nodeName.toUpperCase() === 'INPUT' && textTypeInputs.includes(node.type) ) { missing.splice(textboxIndex, 1); } // remove 'listbox' from missing roles if combobox is collapsed var listboxIndex = missing.indexOf('listbox'); var expanded = node.getAttribute('aria-expanded'); if (listboxIndex >= 0 && (!expanded || expanded === 'false')) { missing.splice(listboxIndex, 1); } } if (missing.length) { return missing; } if (!all && childRoles.length) { return childRoles; } return null; } var role = node.getAttribute('role'); var required = requiredOwned(role); if (!required) { return true; } var all = false; var childRoles = required.one; if (!childRoles) { var all = true; childRoles = required.all; } var missing = missingRequiredChildren(node, childRoles, all, role); if (!missing) { return true; } this.data(missing); if (reviewEmpty.includes(role)) { return undefined; } else { return false; }
1
14,370
nitpick: use `const virtualTree`, I know it was `let` earlier.
dequelabs-axe-core
js
@@ -464,7 +464,10 @@ func (j *mdJournal) convertToBranch( brmd.SetUnmerged() brmd.SetBranchID(bid) - // Delete the old "merged" version from the cache. + // Delete the old "merged" version from the cache. We aren't + // equipped here to cache the new version, but it will + // eventually get passed via an mdFlushListener to somewhere + // that will cache it. mdcache.Delete(tlfID, ibrmd.RevisionNumber(), NullBranchID) // Re-sign the writer metadata.
1
// Copyright 2016 Keybase Inc. All rights reserved. // Use of this source code is governed by a BSD // license that can be found in the LICENSE file. package libkbfs import ( "errors" "fmt" "io/ioutil" "os" "path/filepath" "time" "github.com/keybase/client/go/logger" "github.com/keybase/kbfs/kbfscodec" "github.com/keybase/kbfs/kbfscrypto" "golang.org/x/net/context" "github.com/keybase/client/go/protocol/keybase1" ) // ImmutableBareRootMetadata is a thin wrapper around a // BareRootMetadata that takes ownership of it and does not ever // modify it again. Thus, its MdID can be calculated and // stored along with a local timestamp. ImmutableBareRootMetadata // objects can be assumed to never alias a (modifiable) BareRootMetadata. // // Note that crypto.MakeMdID() on an ImmutableBareRootMetadata will // compute the wrong result, since anonymous fields of interface type // are not encoded inline by the codec. Use // crypto.MakeMDID(ibrmd.BareRootMetadata) instead. // // TODO: Move this to bare_root_metadata.go if it's used in more // places. type ImmutableBareRootMetadata struct { BareRootMetadata mdID MdID localTimestamp time.Time } // MakeImmutableBareRootMetadata makes a new ImmutableBareRootMetadata // from the given BareRootMetadata and its corresponding MdID. func MakeImmutableBareRootMetadata( rmd BareRootMetadata, mdID MdID, localTimestamp time.Time) ImmutableBareRootMetadata { if mdID == (MdID{}) { panic("zero mdID passed to MakeImmutableBareRootMetadata") } return ImmutableBareRootMetadata{rmd, mdID, localTimestamp} } // mdJournal stores a single ordered list of metadata IDs for a (TLF, // user, device) tuple, along with the associated metadata objects, in // flat files on disk. // // The directory layout looks like: // // dir/md_journal/EARLIEST // dir/md_journal/LATEST // dir/md_journal/0...001 // dir/md_journal/0...002 // dir/md_journal/0...fff // dir/mds/0100/0...01/data // ... // dir/mds/01ff/f...ff/data // // There's a single journal subdirectory; the journal ordinals are // just MetadataRevisions, and the journal entries are just MdIDs. // // The Metadata objects are stored separately in dir/mds. Each MD has // its own subdirectory with its ID truncated to 17 bytes (34 // characters) as a name. The MD subdirectories are splayed over (# of // possible hash types) * 256 subdirectories -- one byte for the hash // type (currently only one) plus the first byte of the hash data -- // using the first four characters of the name to keep the number of // directories in dir itself to a manageable number, similar to git. // Each block directory has data, which is the raw MD data that should // hash to the MD ID. Future versions of the journal might add more // files to this directory; if any code is written to move MDs around, // it should be careful to preserve any unknown files in an MD // directory. // // The maximum number of characters added to the root dir by an MD // journal is 45: // // /mds/01ff/f...(30 characters total)...ff/data // // This covers even the temporary files created in convertToBranch, // which create paths like // // /md_journal123456789/0...(16 characters total)...001 // // which have only 37 characters. // // mdJournal is not goroutine-safe, so any code that uses it must // guarantee that only one goroutine at a time calls its functions. type mdJournal struct { // key is assumed to be the VerifyingKey of a device owned by // uid, and both uid and key are assumed constant for the // lifetime of this object. uid keybase1.UID key kbfscrypto.VerifyingKey codec kbfscodec.Codec crypto cryptoPure dir string log logger.Logger deferLog logger.Logger j mdIDJournal // This doesn't need to be persisted, even if the journal // becomes empty, since on a restart the branch ID is // retrieved from the server (via GetUnmergedForTLF). branchID BranchID // Set only when the journal becomes empty due to // flushing. This doesn't need to be persisted for the same // reason as branchID. lastMdID MdID } func makeMDJournal( uid keybase1.UID, key kbfscrypto.VerifyingKey, codec kbfscodec.Codec, crypto cryptoPure, dir string, log logger.Logger) (*mdJournal, error) { if uid == keybase1.UID("") { return nil, errors.New("Empty user") } if key == (kbfscrypto.VerifyingKey{}) { return nil, errors.New("Empty verifying key") } journalDir := filepath.Join(dir, "md_journal") deferLog := log.CloneWithAddedDepth(1) journal := mdJournal{ uid: uid, key: key, codec: codec, crypto: crypto, dir: dir, log: log, deferLog: deferLog, j: makeMdIDJournal(codec, journalDir), } earliest, err := journal.getEarliest(false) if err != nil { return nil, err } latest, err := journal.getLatest(false) if err != nil { return nil, err } if (earliest == ImmutableBareRootMetadata{}) != (latest == ImmutableBareRootMetadata{}) { return nil, fmt.Errorf("has earliest=%t != has latest=%t", earliest != ImmutableBareRootMetadata{}, latest != ImmutableBareRootMetadata{}) } if earliest != (ImmutableBareRootMetadata{}) { if earliest.BID() != latest.BID() { return nil, fmt.Errorf( "earliest.BID=%s != latest.BID=%s", earliest.BID(), latest.BID()) } journal.branchID = earliest.BID() } return &journal, nil } // The functions below are for building various paths. func (j mdJournal) mdsPath() string { return filepath.Join(j.dir, "mds") } func (j mdJournal) mdPath(id MdID) string { // Truncate to 34 characters, which corresponds to 16 random // bytes (since the first byte is a hash type) or 128 random // bits, which means that the expected number of MDs generated // before getting a path collision is 2^64 (see // https://en.wikipedia.org/wiki/Birthday_problem#Cast_as_a_collision_problem // ). The full ID can be recovered just by hashing the data // again with the same hash type. idStr := id.String() return filepath.Join(j.mdsPath(), idStr[:4], idStr[4:34]) } func (j mdJournal) mdDataPath(id MdID) string { return filepath.Join(j.mdPath(id), "data") } // getMD verifies the MD data and the writer signature (but not the // key) for the given ID and returns it. It also returns the // last-modified timestamp of the file. verifyBranchID should be false // only when called from makeMDJournal, i.e. when figuring out what to // set j.branchID in the first place. func (j mdJournal) getMD(id MdID, verifyBranchID bool) ( BareRootMetadata, time.Time, error) { // Read data. data, err := ioutil.ReadFile(j.mdDataPath(id)) if err != nil { return nil, time.Time{}, err } // TODO: Read version info. // MDv3 TODO: the file needs to encode the version var rmd BareRootMetadataV2 err = j.codec.Decode(data, &rmd) if err != nil { return nil, time.Time{}, err } // Check integrity. // TODO: MakeMdID serializes rmd -- use data instead. mdID, err := j.crypto.MakeMdID(&rmd) if err != nil { return nil, time.Time{}, err } if mdID != id { return nil, time.Time{}, fmt.Errorf( "Metadata ID mismatch: expected %s, got %s", id, mdID) } err = rmd.IsLastModifiedBy(j.uid, j.key) if err != nil { return nil, time.Time{}, err } // MDv3 TODO: pass key bundles when needed err = rmd.IsValidAndSigned(j.codec, j.crypto, nil) if err != nil { return nil, time.Time{}, err } if verifyBranchID && rmd.BID() != j.branchID { return nil, time.Time{}, fmt.Errorf( "Branch ID mismatch: expected %s, got %s", j.branchID, rmd.BID()) } fi, err := os.Stat(j.mdPath(id)) if err != nil { return nil, time.Time{}, err } return &rmd, fi.ModTime(), nil } // putMD stores the given metadata under its ID, if it's not already // stored. func (j mdJournal) putMD(rmd BareRootMetadata) (MdID, error) { // MDv3 TODO: pass key bundles when needed err := rmd.IsValidAndSigned(j.codec, j.crypto, nil) if err != nil { return MdID{}, err } err = rmd.IsLastModifiedBy(j.uid, j.key) if err != nil { return MdID{}, err } id, err := j.crypto.MakeMdID(rmd) if err != nil { return MdID{}, err } _, _, err = j.getMD(id, true) if os.IsNotExist(err) { // Continue on. } else if err != nil { return MdID{}, err } else { // Entry exists, so nothing else to do. return MdID{}, nil } buf, err := j.codec.Encode(rmd) if err != nil { return MdID{}, err } err = os.MkdirAll(j.mdPath(id), 0700) if err != nil { return MdID{}, err } // TODO: Write version info. err = ioutil.WriteFile(j.mdDataPath(id), buf, 0600) if err != nil { return MdID{}, err } return id, nil } // removeMD removes the metadata (which must exist) with the given ID. func (j *mdJournal) removeMD(id MdID) error { path := j.mdPath(id) err := os.RemoveAll(path) if err != nil { return err } // Remove the parent (splayed) directory (which should exist) // if it's empty. err = os.Remove(filepath.Dir(path)) if isExist(err) { err = nil } return err } func (j mdJournal) getEarliest(verifyBranchID bool) ( ImmutableBareRootMetadata, error) { entry, exists, err := j.j.getEarliestEntry() if err != nil { return ImmutableBareRootMetadata{}, err } if !exists { return ImmutableBareRootMetadata{}, nil } earliestID := entry.ID earliest, ts, err := j.getMD(earliestID, verifyBranchID) if err != nil { return ImmutableBareRootMetadata{}, err } return MakeImmutableBareRootMetadata(earliest, earliestID, ts), nil } func (j mdJournal) getLatest(verifyBranchID bool) ( ImmutableBareRootMetadata, error) { entry, exists, err := j.j.getLatestEntry() if err != nil { return ImmutableBareRootMetadata{}, err } if !exists { return ImmutableBareRootMetadata{}, nil } latestID := entry.ID latest, ts, err := j.getMD(latestID, verifyBranchID) if err != nil { return ImmutableBareRootMetadata{}, err } return MakeImmutableBareRootMetadata(latest, latestID, ts), nil } func (j mdJournal) checkGetParams() ( ImmutableBareRootMetadata, error) { head, err := j.getLatest(true) if err != nil { return ImmutableBareRootMetadata{}, err } if head != (ImmutableBareRootMetadata{}) { extra, err := j.getExtraMD( head.GetTLFWriterKeyBundleID(), head.GetTLFReaderKeyBundleID()) if err != nil { return ImmutableBareRootMetadata{}, err } ok, err := isReader(j.uid, head.BareRootMetadata, extra) if err != nil { return ImmutableBareRootMetadata{}, err } if !ok { // TODO: Use a non-server error. return ImmutableBareRootMetadata{}, MDServerErrorUnauthorized{} } } return head, nil } func (j *mdJournal) convertToBranch( ctx context.Context, signer cryptoSigner, tlfID TlfID, mdcache MDCache) (bid BranchID, err error) { if j.branchID != NullBranchID { return NullBranchID, fmt.Errorf( "convertToBranch called with BID=%s", j.branchID) } earliestRevision, err := j.j.readEarliestRevision() if err != nil { return NullBranchID, err } latestRevision, err := j.j.readLatestRevision() if err != nil { return NullBranchID, err } j.log.CDebugf( ctx, "rewriting MDs %s to %s", earliestRevision, latestRevision) _, allEntries, err := j.j.getEntryRange( earliestRevision, latestRevision) if err != nil { return NullBranchID, err } bid, err = j.crypto.MakeRandomBranchID() if err != nil { return NullBranchID, err } j.log.CDebugf(ctx, "New branch ID=%s", bid) journalTempDir, err := ioutil.TempDir(j.dir, "md_journal") if err != nil { return NullBranchID, err } j.log.CDebugf(ctx, "Using temp dir %s for rewriting", journalTempDir) mdsToRemove := make([]MdID, 0, len(allEntries)) defer func() { j.log.CDebugf(ctx, "Removing temp dir %s and %d old MDs", journalTempDir, len(mdsToRemove)) removeErr := os.RemoveAll(journalTempDir) if removeErr != nil { j.log.CWarningf(ctx, "Error when removing temp dir %s: %v", journalTempDir, removeErr) } // Garbage-collect the unnecessary MD entries. TODO: we'll // eventually need a sweeper to clean up entries left behind // if we crash here. for _, id := range mdsToRemove { removeErr := j.removeMD(id) if removeErr != nil { j.log.CWarningf(ctx, "Error when removing old MD %s: %v", id, removeErr) } } }() tempJournal := makeMdIDJournal(j.codec, journalTempDir) var prevID MdID for i, entry := range allEntries { ibrmd, _, err := j.getMD(entry.ID, true) if err != nil { return NullBranchID, err } brmd, ok := ibrmd.(MutableBareRootMetadata) if !ok { return NullBranchID, MutableBareRootMetadataNoImplError{} } brmd.SetUnmerged() brmd.SetBranchID(bid) // Delete the old "merged" version from the cache. mdcache.Delete(tlfID, ibrmd.RevisionNumber(), NullBranchID) // Re-sign the writer metadata. buf, err := brmd.GetSerializedWriterMetadata(j.codec) if err != nil { return NullBranchID, err } sigInfo, err := signer.Sign(ctx, buf) if err != nil { return NullBranchID, err } brmd.SetWriterMetadataSigInfo(sigInfo) j.log.CDebugf(ctx, "Old prev root of rev=%s is %s", brmd.RevisionNumber(), brmd.GetPrevRoot()) if i > 0 { j.log.CDebugf(ctx, "Changing prev root of rev=%s to %s", brmd.RevisionNumber(), prevID) brmd.SetPrevRoot(prevID) } // TODO: this rewrites the file, and so the modification time // no longer tracks when exactly the original operation is // done, so future ImmutableBareMetadatas for this MD will // have a slightly wrong localTimestamp. Instead, we might // want to pass in the timestamp and do an explicit // os.Chtimes() on the file after writing it. newID, err := j.putMD(brmd) if err != nil { return NullBranchID, err } mdsToRemove = append(mdsToRemove, newID) // Preserve unknown fields from the old journal. newEntry := entry newEntry.ID = newID err = tempJournal.append(brmd.RevisionNumber(), newEntry) if err != nil { return NullBranchID, err } prevID = newID j.log.CDebugf(ctx, "Changing ID for rev=%s from %s to %s", brmd.RevisionNumber(), entry.ID, newID) } // TODO: Do the below atomically on the filesystem // level. Specifically, make "md_journal" always be a symlink, // and then perform the swap by atomically changing the // symlink to point to the new journal directory. oldJournalTempDir := journalTempDir + ".old" dir, err := j.j.move(oldJournalTempDir) if err != nil { return NullBranchID, err } j.log.CDebugf(ctx, "Moved old journal from %s to %s", dir, oldJournalTempDir) newJournalOldDir, err := tempJournal.move(dir) if err != nil { return NullBranchID, err } j.log.CDebugf(ctx, "Moved new journal from %s to %s", newJournalOldDir, dir) // Make the defer block above remove oldJournalTempDir. journalTempDir = oldJournalTempDir mdsToRemove = nil for _, entry := range allEntries { mdsToRemove = append(mdsToRemove, entry.ID) } j.j = tempJournal j.branchID = bid return bid, nil } // getNextEntryToFlush returns the info for the next journal entry to // flush, if it exists, and its revision is less than end. If there is // no next journal entry to flush, the returned MdID will be zero, and // the returned *RootMetadataSigned will be nil. func (j mdJournal) getNextEntryToFlush( ctx context.Context, end MetadataRevision, signer cryptoSigner) ( MdID, *RootMetadataSigned, error) { rmd, err := j.getEarliest(true) if err != nil { return MdID{}, nil, err } if rmd == (ImmutableBareRootMetadata{}) || rmd.RevisionNumber() >= end { return MdID{}, nil, nil } mbrmd, ok := rmd.BareRootMetadata.(MutableBareRootMetadata) if !ok { return MdID{}, nil, MutableBareRootMetadataNoImplError{} } rmds := RootMetadataSigned{MD: mbrmd} err = signMD(ctx, j.codec, signer, &rmds) if err != nil { return MdID{}, nil, err } return rmd.mdID, &rmds, nil } func (j *mdJournal) removeFlushedEntry( ctx context.Context, mdID MdID, rmds *RootMetadataSigned) error { rmd, err := j.getEarliest(true) if err != nil { return err } if rmd == (ImmutableBareRootMetadata{}) { return errors.New("mdJournal unexpectedly empty") } if mdID != rmd.mdID { return fmt.Errorf("Expected mdID %s, got %s", mdID, rmd.mdID) } eq, err := kbfscodec.Equal(j.codec, rmd.BareRootMetadata, rmds.MD) if err != nil { return err } if !eq { return errors.New( "Given RootMetadataSigned doesn't match earliest") } empty, err := j.j.removeEarliest() if err != nil { return err } // Since the journal is now empty, set lastMdID. if empty { j.log.CDebugf(ctx, "Journal is now empty; saving last MdID=%s", mdID) j.lastMdID = mdID } // Garbage-collect the old entry. TODO: we'll eventually need a // sweeper to clean up entries left behind if we crash here. return j.removeMD(mdID) } func getMdID(ctx context.Context, mdserver MDServer, crypto cryptoPure, tlfID TlfID, bid BranchID, mStatus MergeStatus, revision MetadataRevision) (MdID, error) { rmdses, err := mdserver.GetRange( ctx, tlfID, bid, mStatus, revision, revision) if err != nil { return MdID{}, err } else if len(rmdses) == 0 { return MdID{}, nil } else if len(rmdses) > 1 { return MdID{}, fmt.Errorf( "Got more than one object when trying to get rev=%d for branch %s of TLF %s", revision, bid, tlfID) } return crypto.MakeMdID(rmdses[0].MD) } // All functions below are public functions. func (j mdJournal) readEarliestRevision() (MetadataRevision, error) { return j.j.readEarliestRevision() } func (j mdJournal) readLatestRevision() (MetadataRevision, error) { return j.j.readLatestRevision() } func (j mdJournal) length() (uint64, error) { return j.j.length() } func (j mdJournal) end() (MetadataRevision, error) { return j.j.end() } func (j mdJournal) getBranchID() BranchID { return j.branchID } func (j mdJournal) getHead() ( ImmutableBareRootMetadata, error) { return j.checkGetParams() } func (j mdJournal) getRange(start, stop MetadataRevision) ( []ImmutableBareRootMetadata, error) { _, err := j.checkGetParams() if err != nil { return nil, err } realStart, entries, err := j.j.getEntryRange(start, stop) if err != nil { return nil, err } var rmds []ImmutableBareRootMetadata for i, entry := range entries { expectedRevision := realStart + MetadataRevision(i) rmd, ts, err := j.getMD(entry.ID, true) if err != nil { return nil, err } if expectedRevision != rmd.RevisionNumber() { panic(fmt.Errorf("expected revision %v, got %v", expectedRevision, rmd.RevisionNumber())) } irmd := MakeImmutableBareRootMetadata(rmd, entry.ID, ts) rmds = append(rmds, irmd) } return rmds, nil } // MDJournalConflictError is an error that is returned when a put // detects a rewritten journal. type MDJournalConflictError struct{} func (e MDJournalConflictError) Error() string { return "MD journal conflict error" } // put verifies and stores the given RootMetadata in the journal, // modifying it as needed. In particular, there are four cases: // // Merged // ------ // rmd is merged. If the journal is empty, then rmd becomes the // initial entry. Otherwise, if the journal has been converted to a // branch, then an MDJournalConflictError error is returned, and the // caller is expected to set the unmerged bit and retry (see case // Unmerged-1). Otherwise, either rmd must be the successor to the // journal's head, in which case it is appended, or it must have the // same revision number as the journal's head, in which case it // replaces the journal's head. (This is necessary since if a journal // put is cancelled and an error is returned, it still happens, and so // we want the retried put (if any) to not conflict with it.) // // Unmerged-1 // ---------- // rmd is unmerged and has a null branch ID. This happens when case // Merged returns with MDJournalConflictError. In this case, the rmd's // branch ID is set to the journal's branch ID and its prevRoot is set // to the last known journal root. It doesn't matter if the journal is // completely drained, since the branch ID and last known root is // remembered in memory. However, since this cache isn't persisted to // disk, we need case Unmerged-3. Similarly to case Merged, this case // then also does append-or-replace. // // Unmerged-2 // ---------- // rmd is unmerged and has a non-null branch ID, and the journal was // non-empty at some time during this process's lifetime. Similarly to // case Merged, if the journal is empty, then rmd becomes the initial // entry, and otherwise, this case does append-or-replace. // // Unmerged-3 // ---------- // rmd is unmerged and has a non-null branch ID, and the journal has // always been empty during this process's lifetime. The branch ID is // assumed to be correct, i.e. retrieved from the remote MDServer, and // rmd becomes the initial entry. func (j *mdJournal) put( ctx context.Context, signer cryptoSigner, ekg encryptionKeyGetter, bsplit BlockSplitter, rmd *RootMetadata) ( mdID MdID, err error) { j.log.CDebugf(ctx, "Putting MD for TLF=%s with rev=%s bid=%s", rmd.TlfID(), rmd.Revision(), rmd.BID()) defer func() { if err != nil { j.deferLog.CDebugf(ctx, "Put MD for TLF=%s with rev=%s bid=%s failed with %v", rmd.TlfID(), rmd.Revision(), rmd.BID(), err) } }() head, err := j.getLatest(true) if err != nil { return MdID{}, err } mStatus := rmd.MergedStatus() // Make modifications for the Unmerged cases. if mStatus == Unmerged { var lastMdID MdID if head == (ImmutableBareRootMetadata{}) { lastMdID = j.lastMdID } else { lastMdID = head.mdID } if rmd.BID() == NullBranchID && j.branchID == NullBranchID { return MdID{}, errors.New( "Unmerged put with rmd.BID() == j.branchID == NullBranchID") } if head == (ImmutableBareRootMetadata{}) && j.branchID == NullBranchID { // Case Unmerged-3. j.branchID = rmd.BID() // Revert branch ID if we encounter an error. defer func() { if err != nil { j.branchID = NullBranchID } }() } else if rmd.BID() == NullBranchID { // Case Unmerged-1. j.log.CDebugf( ctx, "Changing branch ID to %s and prev root to %s for MD for TLF=%s with rev=%s", j.branchID, lastMdID, rmd.TlfID(), rmd.Revision()) rmd.SetBranchID(j.branchID) rmd.SetPrevRoot(lastMdID) } else { // Using de Morgan's laws, this branch is // taken when both rmd.BID() is non-null, and // either head is non-empty or j.branchID is // non-empty. So this is most of case // Unmerged-2, and there's nothing to do. // // The remaining part of case Unmerged-2, // where rmd.BID() is non-null, head is empty, // and j.branchID is empty, is an error case, // handled below. } } // The below is code common to all the cases. if (mStatus == Merged) != (rmd.BID() == NullBranchID) { return MdID{}, fmt.Errorf( "mStatus=%s doesn't match bid=%s", mStatus, rmd.BID()) } // If we're trying to push a merged MD onto a branch, return a // conflict error so the caller can retry with an unmerged MD. if mStatus == Merged && j.branchID != NullBranchID { return MdID{}, MDJournalConflictError{} } if rmd.BID() != j.branchID { return MdID{}, fmt.Errorf( "Branch ID mismatch: expected %s, got %s", j.branchID, rmd.BID()) } // Check permissions and consistency with head, if it exists. if head != (ImmutableBareRootMetadata{}) { prevExtra, err := j.getExtraMD( head.GetTLFWriterKeyBundleID(), head.GetTLFReaderKeyBundleID()) ok, err := isWriterOrValidRekey( j.codec, j.uid, head.BareRootMetadata, rmd.bareMd, prevExtra, rmd.extra) if err != nil { return MdID{}, err } if !ok { // TODO: Use a non-server error. return MdID{}, MDServerErrorUnauthorized{} } // Consistency checks if rmd.Revision() != head.RevisionNumber() { err = head.CheckValidSuccessorForServer(head.mdID, rmd.bareMd) if err != nil { return MdID{}, err } } } // Ensure that the block changes are properly unembedded. if rmd.data.Changes.Info.BlockPointer == zeroPtr && !bsplit.ShouldEmbedBlockChanges(&rmd.data.Changes) { return MdID{}, errors.New("MD has embedded block changes, but shouldn't") } brmd, err := encryptMDPrivateData( ctx, j.codec, j.crypto, signer, ekg, j.uid, rmd.ReadOnly()) if err != nil { return MdID{}, err } id, err := j.putMD(brmd) if err != nil { return MdID{}, err } if head != (ImmutableBareRootMetadata{}) && rmd.Revision() == head.RevisionNumber() { j.log.CDebugf( ctx, "Replacing head MD for TLF=%s with rev=%s bid=%s", rmd.TlfID(), rmd.Revision(), rmd.BID()) // Don't try and preserve unknown fields from the old // head here -- the new head is in general a different // MD, so the unknown fields from the old head won't // make sense. err = j.j.replaceHead(mdIDJournalEntry{ID: id}) if err != nil { return MdID{}, err } } else { err = j.j.append( brmd.RevisionNumber(), mdIDJournalEntry{ID: id}) if err != nil { return MdID{}, err } } // Since the journal is now non-empty, clear lastMdID. j.lastMdID = MdID{} return id, nil } func (j *mdJournal) clear( ctx context.Context, bid BranchID) (err error) { j.log.CDebugf(ctx, "Clearing journal for branch %s", bid) defer func() { if err != nil { j.deferLog.CDebugf(ctx, "Clearing journal for branch %s failed with %v", bid, err) } }() if bid == NullBranchID { return errors.New("Cannot clear master branch") } if j.branchID != bid { // Nothing to do. j.log.CDebugf(ctx, "Ignoring clear for branch %s while on branch %s", bid, j.branchID) return nil } head, err := j.getHead() if err != nil { return err } if head == (ImmutableBareRootMetadata{}) { // The journal has been flushed but not cleared yet. j.branchID = NullBranchID return nil } if head.BID() != j.branchID { return fmt.Errorf("Head branch ID %s doesn't match journal "+ "branch ID %s while clearing", head.BID(), j.branchID) } earliestRevision, err := j.j.readEarliestRevision() if err != nil { return err } latestRevision, err := j.j.readLatestRevision() if err != nil { return err } _, allEntries, err := j.j.getEntryRange( earliestRevision, latestRevision) if err != nil { return err } j.branchID = NullBranchID // No need to set lastMdID in this case. err = j.j.clear() if err != nil { return nil } // Garbage-collect the old branch entries. TODO: we'll eventually // need a sweeper to clean up entries left behind if we crash // here. for _, entry := range allEntries { err := j.removeMD(entry.ID) if err != nil { return err } } return nil } func (j mdJournal) getExtraMD(wkbID TLFWriterKeyBundleID, rkbID TLFReaderKeyBundleID) ( ExtraMetadata, error) { // MDv3 TODO: implement this if (wkbID != TLFWriterKeyBundleID{}) || (rkbID != TLFReaderKeyBundleID{}) { panic("Bundle IDs are unexpectedly set") } return nil, nil }
1
13,631
Actually, I think it might be easier to do it here. Rough outline: 1. Save a list of the md cache keys and new brmd while looping. 2. After the loop, for each entry: 2a. Look up the old cache entry. If it doesn't exist, continue. 2b. Otherwise, make a deep copy of it, and replace its brmd with the newly-generated one. 2c. Delete the old cache entry, put the new cache entry. What do you think? The problem with the current approach is that the gap between dropping the old version and putting the new version is large enough that when we put the new version, we may be kicking out hotter cache entries. In this case, we'd be doing so to a lesser extent. Ideally, we'd be able to replace the entry in the MDCache LRU without changing the LRU info.
keybase-kbfs
go
@@ -253,6 +253,12 @@ namespace Microsoft.VisualStudio.TestPlatform.CommandLine } source = source.Trim(); + + if(!FileHelper.IsRootedPath(source)) + { + source = FileHelper.CombinePath(source); + } + if (!FileHelper.Exists(source)) { throw new CommandLineException(
1
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. namespace Microsoft.VisualStudio.TestPlatform.CommandLine { using System; using System.Collections.Generic; using System.Globalization; using System.Linq; using Microsoft.VisualStudio.TestPlatform.ObjectModel; using Utilities.Helpers; using Utilities.Helpers.Interfaces; using CommandLineResources = Microsoft.VisualStudio.TestPlatform.CommandLine.Resources.Resources; /// <summary> /// Provides access to the command-line options. /// </summary> internal class CommandLineOptions { #region Constants/Readonly /// <summary> /// The default batch size. /// </summary> public const long DefaultBatchSize = 10; /// <summary> /// The use vsix extensions key. /// </summary> public const string UseVsixExtensionsKey = "UseVsixExtensions"; /// <summary> /// The default use vsix extensions value. /// </summary> public const bool DefaultUseVsixExtensionsValue = false; /// <summary> /// The default retrieval timeout for fetching of test results or test cases /// </summary> private readonly TimeSpan DefaultRetrievalTimeout = new TimeSpan(0, 0, 0, 1, 500); #endregion #region PrivateMembers private static CommandLineOptions instance; private List<string> sources = new List<string>(); private Architecture architecture; private Framework frameworkVersion; #endregion /// <summary> /// Gets the instance. /// </summary> internal static CommandLineOptions Instance { get { if (instance == null) { instance = new CommandLineOptions(); } return instance; } } #region Constructor /// <summary> /// Default constructor. /// </summary> private CommandLineOptions() { this.BatchSize = DefaultBatchSize; this.TestRunStatsEventTimeout = this.DefaultRetrievalTimeout; this.FileHelper = new FileHelper(); #if TODO UseVsixExtensions = Utilities.GetAppSettingValue(UseVsixExtensionsKey, false); #endif } #endregion #region Properties /// <summary> /// Specifies whether parallel execution is on or off. /// </summary> public bool Parallel { get; set; } /// <summary> /// Readonly collection of all available test sources /// </summary> public IEnumerable<string> Sources { get { return this.sources.AsReadOnly(); } } /// <summary> /// Specifies whether dynamic code coverage diagnostic data adapter needs to be configured. /// </summary> public bool EnableCodeCoverage { get; set; } /// <summary> /// Specifies whether the Fakes automatic configuration should be disabled. /// </summary> public bool DisableAutoFakes { get; set; } /// <summary> /// Specifies whether vsixExtensions is enabled or not. /// </summary> public bool UseVsixExtensions { get; set; } /// <summary> /// Path to the custom test adapters. /// </summary> public string TestAdapterPath { get; set; } /// <summary> /// Process Id of the process which launched vstest runner /// </summary> public int ParentProcessId { get; set; } /// <summary> /// Port IDE process is listening to /// </summary> public int Port { get; set; } /// <summary> /// Configuration the project is built for e.g. Debug/Release /// </summary> public string Configuration { get; set; } /// <summary> /// Directory containing the temporary outputs /// </summary> public string BuildBasePath { get; set; } /// <summary> /// Directory containing the binaries to run /// </summary> public string Output { get; set; } /// <summary> /// Specifies the frequency of the runStats/discoveredTests event /// </summary> public long BatchSize { get; set; } /// <summary> /// Specifies the timeout of the runStats event /// </summary> public TimeSpan TestRunStatsEventTimeout { get; set; } /// <summary> /// Test case filter value for run with sources. /// </summary> public string TestCaseFilterValue { get; set; } /// <summary> /// Specifies the Target Device /// </summary> public string TargetDevice { get; set; } /// <summary> /// Specifies whether the target device has a Windows Phone context or not /// </summary> public bool HasPhoneContext { get { return !string.IsNullOrEmpty(TargetDevice); } } /// <summary> /// Specifies the target platform type for test run. /// </summary> public Architecture TargetArchitecture { get { return this.architecture; } set { this.architecture = value; this.ArchitectureSpecified = true; } } /// <summary> /// Specifies if /Platform has been specified on command line or not. /// </summary> internal bool ArchitectureSpecified { get; private set; } internal IFileHelper FileHelper { get; set; } /// <summary> /// Gets or sets the target Framework version for test run. /// </summary> internal Framework TargetFrameworkVersion { get { return this.frameworkVersion; } set { this.frameworkVersion = value; this.FrameworkVersionSpecified = true; } } /// <summary> /// Gets a value indicating whether /Framework has been specified on command line or not. /// </summary> internal bool FrameworkVersionSpecified { get; private set; } /// <summary> /// Gets or sets the results directory for test run. /// </summary> internal string ResultsDirectory { get; set; } /// <summary> /// Gets or sets the /setting switch value. i.e path to settings file. /// </summary> internal string SettingsFile { get; set; } #endregion #region Public Methods /// <summary> /// Adds a source file to look for tests in. /// </summary> /// <param name="source">Path to source file to look for tests in.</param> public void AddSource(string source) { if (String.IsNullOrWhiteSpace(source)) { throw new CommandLineException(CommandLineResources.CannotBeNullOrEmpty); } source = source.Trim(); if (!FileHelper.Exists(source)) { throw new CommandLineException( string.Format(CultureInfo.CurrentUICulture, CommandLineResources.TestSourceFileNotFound, source)); } if (this.sources.Contains(source, StringComparer.OrdinalIgnoreCase)) { throw new CommandLineException( string.Format(CultureInfo.CurrentCulture, CommandLineResources.DuplicateSource, source)); } this.sources.Add(source); } #endregion #region Internal Methods /// <summary> /// Resets the options. Clears the sources. /// </summary> internal void Reset() { instance = null; } #endregion } }
1
11,663
We need tests for this
microsoft-vstest
.cs
@@ -47,11 +47,11 @@ func initProvider() func() { // `localhost:30080` address. Otherwise, replace `localhost` with the // address of your cluster. If you run the app inside k8s, then you can // probably connect directly to the service through dns - exp, err := otlp.NewExporter( - otlp.WithInsecure(), + config := otlp.NewConnectionConfig(otlp.WithInsecure(), otlp.WithAddress("localhost:30080"), otlp.WithGRPCDialOption(grpc.WithBlock()), // useful for testing ) + exp, err := otlp.NewExporter(config, config) handleErr(err, "failed to create exporter") bsp := sdktrace.NewBatchSpanProcessor(exp)
1
// Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Example using the OTLP exporter + collector + third-party backends. For // information about using the exporter, see: // https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp?tab=doc#example-package-Insecure package main import ( "context" "fmt" "log" "time" "google.golang.org/grpc" "go.opentelemetry.io/otel/api/global" "go.opentelemetry.io/otel/api/metric" apitrace "go.opentelemetry.io/otel/api/trace" "go.opentelemetry.io/otel/exporters/otlp" "go.opentelemetry.io/otel/label" "go.opentelemetry.io/otel/sdk/metric/controller/push" "go.opentelemetry.io/otel/sdk/metric/processor/basic" "go.opentelemetry.io/otel/sdk/metric/selector/simple" "go.opentelemetry.io/otel/sdk/resource" sdktrace "go.opentelemetry.io/otel/sdk/trace" "go.opentelemetry.io/otel/semconv" ) // Initializes an OTLP exporter, and configures the corresponding trace and // metric providers. func initProvider() func() { // If the OpenTelemetry Collector is running on a local cluster (minikube or // microk8s), it should be accessible through the NodePort service at the // `localhost:30080` address. Otherwise, replace `localhost` with the // address of your cluster. If you run the app inside k8s, then you can // probably connect directly to the service through dns exp, err := otlp.NewExporter( otlp.WithInsecure(), otlp.WithAddress("localhost:30080"), otlp.WithGRPCDialOption(grpc.WithBlock()), // useful for testing ) handleErr(err, "failed to create exporter") bsp := sdktrace.NewBatchSpanProcessor(exp) tracerProvider := sdktrace.NewProvider( sdktrace.WithConfig(sdktrace.Config{DefaultSampler: sdktrace.AlwaysSample()}), sdktrace.WithResource(resource.New( // the service name used to display traces in backends semconv.ServiceNameKey.String("test-service"), )), sdktrace.WithSpanProcessor(bsp), ) pusher := push.New( basic.New( simple.NewWithExactDistribution(), exp, ), exp, push.WithPeriod(2*time.Second), ) global.SetTracerProvider(tracerProvider) global.SetMeterProvider(pusher.Provider()) pusher.Start() return func() { bsp.Shutdown() // shutdown the processor handleErr(exp.Shutdown(context.Background()), "failed to stop exporter") pusher.Stop() // pushes any last exports to the receiver } } func main() { log.Printf("Waiting for connection...") shutdown := initProvider() defer shutdown() tracer := global.Tracer("test-tracer") meter := global.Meter("test-meter") // labels represent additional key-value descriptors that can be bound to a // metric observer or recorder. commonLabels := []label.KeyValue{ label.String("labelA", "chocolate"), label.String("labelB", "raspberry"), label.String("labelC", "vanilla"), } // Recorder metric example valuerecorder := metric.Must(meter). NewFloat64Counter( "an_important_metric", metric.WithDescription("Measures the cumulative epicness of the app"), ).Bind(commonLabels...) defer valuerecorder.Unbind() // work begins ctx, span := tracer.Start( context.Background(), "CollectorExporter-Example", apitrace.WithAttributes(commonLabels...)) defer span.End() for i := 0; i < 10; i++ { _, iSpan := tracer.Start(ctx, fmt.Sprintf("Sample-%d", i)) log.Printf("Doing really hard work (%d / 10)\n", i+1) valuerecorder.Add(ctx, 1.0) <-time.After(time.Second) iSpan.End() } log.Printf("Done!") } func handleErr(err error, message string) { if err != nil { log.Fatalf("%s: %v", message, err) } }
1
13,368
NewExporter takes in two arguments: a configuration for the metrics connection and one for the traces
open-telemetry-opentelemetry-go
go
@@ -241,6 +241,15 @@ import connectionManager from 'connectionManager'; navigator.mediaSession.setActionHandler('seekforward', function () { execute('fastForward'); }); + + /* eslint-disable-next-line compat/compat */ + navigator.mediaSession.setActionHandler('seekto', function (object) { + let item = playbackManager.getPlayerState(playbackManager.getCurrentPlayer()).NowPlayingItem; + // Convert to ms + let duration = parseInt(item.RunTimeTicks ? (item.RunTimeTicks / 10000) : 0); + let wantedTime = object.seekTime * 1000; + playbackManager.seekPercent(wantedTime / duration * 100, currentPlayer); + }); } events.on(playbackManager, 'playerchange', function () {
1
import playbackManager from 'playbackManager'; import nowPlayingHelper from 'nowPlayingHelper'; import events from 'events'; import connectionManager from 'connectionManager'; /* eslint-disable indent */ // Reports media playback to the device for lock screen control let currentPlayer; function seriesImageUrl(item, options = {}) { options.type = options.type || 'Primary'; if (item.Type !== 'Episode') { return null; } else if (options.type === 'Primary' && item.SeriesPrimaryImageTag) { options.tag = item.SeriesPrimaryImageTag; return connectionManager.getApiClient(item.ServerId).getScaledImageUrl(item.SeriesId, options); } else if (options.type === 'Thumb') { if (item.SeriesThumbImageTag) { options.tag = item.SeriesThumbImageTag; return connectionManager.getApiClient(item.ServerId).getScaledImageUrl(item.SeriesId, options); } else if (item.ParentThumbImageTag) { options.tag = item.ParentThumbImageTag; return connectionManager.getApiClient(item.ServerId).getScaledImageUrl(item.ParentThumbItemId, options); } } return null; } function imageUrl(item, options = {}) { options.type = options.type || 'Primary'; if (item.ImageTags && item.ImageTags[options.type]) { options.tag = item.ImageTags[options.type]; return connectionManager.getApiClient(item.ServerId).getScaledImageUrl(item.Id, options); } else if (item.AlbumId && item.AlbumPrimaryImageTag) { options.tag = item.AlbumPrimaryImageTag; return connectionManager.getApiClient(item.ServerId).getScaledImageUrl(item.AlbumId, options); } return null; } function getImageUrl(item, imageOptions = {}) { const url = seriesImageUrl(item, imageOptions) || imageUrl(item, imageOptions); if (url) { const height = imageOptions.height || imageOptions.maxHeight; return { src: url, sizes: height + 'x' + height }; } else { return null; } } function getImageUrls(item, imageSizes = [96, 128, 192, 256, 384, 512]) { const list = []; imageSizes.forEach((size) => { const url = getImageUrl(item, {height: size}); if (url !== null) { list.push(url); } }); return list; } function updatePlayerState(player, state, eventName) { // Don't go crazy reporting position changes if (eventName === 'timeupdate') { // Only report if this item hasn't been reported yet, or if there's an actual playback change. // Don't report on simple time updates return; } const item = state.NowPlayingItem; if (!item) { hideMediaControls(); return; } if (eventName === 'init') { // transform "init" event into "timeupdate" to restraint update rate eventName = 'timeupdate'; } const isVideo = item.MediaType === 'Video'; const isLocalPlayer = player.isLocalPlayer || false; // Local players do their own notifications if (isLocalPlayer && isVideo) { return; } const playState = state.PlayState || {}; const parts = nowPlayingHelper.getNowPlayingNames(item); const artist = parts[parts.length - 1].text; const title = parts.length === 1 ? '' : parts[0].text; const album = item.Album || ''; const itemId = item.Id; // Convert to ms const duration = parseInt(item.RunTimeTicks ? (item.RunTimeTicks / 10000) : 0); const currentTime = parseInt(playState.PositionTicks ? (playState.PositionTicks / 10000) : 0); const isPaused = playState.IsPaused || false; const canSeek = playState.CanSeek || false; if ('mediaSession' in navigator) { /* eslint-disable-next-line compat/compat */ navigator.mediaSession.metadata = new MediaMetadata({ title: title, artist: artist, album: album, artwork: getImageUrls(item) }); } else { let itemImageUrl = seriesImageUrl(item, { maxHeight: 3000 }) || imageUrl(item, { maxHeight: 3000 }); window.NativeShell.updateMediaSession({ action: eventName, isLocalPlayer: isLocalPlayer, itemId: itemId, title: title, artist: artist, album: album, duration: duration, position: currentTime, imageUrl: itemImageUrl, canSeek: canSeek, isPaused: isPaused }); } } function onGeneralEvent(e) { const state = playbackManager.getPlayerState(this); updatePlayerState(this, state, e.type); } function onStateChanged(e, state) { updatePlayerState(this, state, 'statechange'); } function onPlaybackStart(e, state) { updatePlayerState(this, state, e.type); } function onPlaybackStopped() { hideMediaControls(); } function releaseCurrentPlayer() { if (currentPlayer) { events.off(currentPlayer, 'playbackstart', onPlaybackStart); events.off(currentPlayer, 'playbackstop', onPlaybackStopped); events.off(currentPlayer, 'unpause', onGeneralEvent); events.off(currentPlayer, 'pause', onGeneralEvent); events.off(currentPlayer, 'statechange', onStateChanged); events.off(currentPlayer, 'timeupdate', onGeneralEvent); currentPlayer = null; hideMediaControls(); } } function hideMediaControls() { if ('mediaSession' in navigator) { /* eslint-disable-next-line compat/compat */ navigator.mediaSession.metadata = null; } else { window.NativeShell.hideMediaSession(); } } function bindToPlayer(player) { releaseCurrentPlayer(); if (!player) { return; } currentPlayer = player; const state = playbackManager.getPlayerState(player); updatePlayerState(player, state, 'init'); events.on(currentPlayer, 'playbackstart', onPlaybackStart); events.on(currentPlayer, 'playbackstop', onPlaybackStopped); events.on(currentPlayer, 'unpause', onGeneralEvent); events.on(currentPlayer, 'pause', onGeneralEvent); events.on(currentPlayer, 'statechange', onStateChanged); events.on(currentPlayer, 'timeupdate', onGeneralEvent); } function execute(name) { playbackManager[name](currentPlayer); } if ('mediaSession' in navigator) { /* eslint-disable-next-line compat/compat */ navigator.mediaSession.setActionHandler('previoustrack', function () { execute('previousTrack'); }); /* eslint-disable-next-line compat/compat */ navigator.mediaSession.setActionHandler('nexttrack', function () { execute('nextTrack'); }); /* eslint-disable-next-line compat/compat */ navigator.mediaSession.setActionHandler('play', function () { execute('unpause'); }); /* eslint-disable-next-line compat/compat */ navigator.mediaSession.setActionHandler('pause', function () { execute('pause'); }); /* eslint-disable-next-line compat/compat */ navigator.mediaSession.setActionHandler('seekbackward', function () { execute('rewind'); }); /* eslint-disable-next-line compat/compat */ navigator.mediaSession.setActionHandler('seekforward', function () { execute('fastForward'); }); } events.on(playbackManager, 'playerchange', function () { bindToPlayer(playbackManager.getCurrentPlayer()); }); bindToPlayer(playbackManager.getCurrentPlayer()); /* eslint-enable indent */
1
16,283
Why not use currentPlayer instead of playbackManager.getCurrentPlayer()?
jellyfin-jellyfin-web
js
@@ -15,7 +15,8 @@ class UsersController < Clearance::UsersController def create_user_from_params params.require(:user).permit( :email, :password, :name, :github_username, :bio, :organization, - :address1, :address2, :city, :state, :zip_code, :country + :address1, :address2, :city, :state, :zip_code, :country, + :unsubscribed_from_emails ) end
1
class UsersController < Clearance::UsersController before_filter :require_login, only: [:edit, :update] def edit end def update if current_user.update_attributes(create_user_from_params) redirect_to my_account_path, notice: I18n.t("users.flashes.update.success") else render action: :edit end end def create_user_from_params params.require(:user).permit( :email, :password, :name, :github_username, :bio, :organization, :address1, :address2, :city, :state, :zip_code, :country ) end def return_to session[:return_to] || params[:return_to] end end
1
17,531
Avoid comma after the last parameter of a method call, unless each item is on its own line.
thoughtbot-upcase
rb
@@ -52,7 +52,12 @@ type Config struct { // DataDir is the directory data is saved to in order to preserve state // across agent restarts. It is only used if "Checkpoint" is true as well. + // It is also used to keep the metadata of containers managed by the agent + // regardless of whether "Checkpoint" is set. DataDir string + // DataDirOnHost is the directory in the instance from which we mount + // DataDir to the ecs-agent container and to agent managed containers + DataDirOnHost string // Checkpoint configures whether data should be periodically to a checkpoint // file, in DataDir, such that on instance or agent restarts it will resume // as the same ContainerInstance. It defaults to false.
1
// Copyright 2014-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may // not use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either // express or implied. See the License for the specific language governing // permissions and limitations under the License. package config import ( "encoding/json" "time" "github.com/aws/amazon-ecs-agent/agent/engine/dockerclient" ) type Config struct { // DEPRECATED // ClusterArn is the Name or full ARN of a Cluster to register into. It has // been deprecated (and will eventually be removed) in favor of Cluster ClusterArn string `deprecated:"Please use Cluster instead"` // Cluster can either be the Name or full ARN of a Cluster. This is the // cluster the agent should register this ContainerInstance into. If this // value is not set, it will default to "default" Cluster string `trim:"true"` // APIEndpoint is the endpoint, such as "ecs.us-east-1.amazonaws.com", to // make calls against. If this value is not set, it will default to the // endpoint for your current AWSRegion APIEndpoint string `trim:"true"` // DockerEndpoint is the address the agent will attempt to connect to the // Docker daemon at. This should have the same value as "DOCKER_HOST" // normally would to interact with the daemon. It defaults to // unix:///var/run/docker.sock DockerEndpoint string // AWSRegion is the region to run in (such as "us-east-1"). This value will // be inferred from the EC2 metadata service, but if it cannot be found this // will be fatal. AWSRegion string `missing:"fatal" trim:"true"` // ReservedPorts is an array of ports which should be registerd as // unavailable. If not set, they default to [22,2375,2376,51678]. ReservedPorts []uint16 // ReservedPortsUDP is an array of UDP ports which should be registered as // unavailable. If not set, it defaults to []. ReservedPortsUDP []uint16 // DataDir is the directory data is saved to in order to preserve state // across agent restarts. It is only used if "Checkpoint" is true as well. DataDir string // Checkpoint configures whether data should be periodically to a checkpoint // file, in DataDir, such that on instance or agent restarts it will resume // as the same ContainerInstance. It defaults to false. Checkpoint bool // EngineAuthType configures what type of data is in EngineAuthData. // Supported types, right now, can be found in the dockerauth package: https://godoc.org/github.com/aws/amazon-ecs-agent/agent/engine/dockerauth EngineAuthType string `trim:"true"` // EngineAuthData contains authentication data. Please see the documentation // for EngineAuthType for more information. EngineAuthData *SensitiveRawMessage // UpdatesEnabled specifies whether updates should be applied to this agent. // Default true UpdatesEnabled bool // UpdateDownloadDir specifies where new agent versions should be placed // within the container in order for the external updating process to // correctly handle them. UpdateDownloadDir string // DisableMetrics configures whether task utilization metrics should be // sent to the ECS telemetry endpoint DisableMetrics bool // ReservedMemory specifies the amount of memory (in MB) to reserve for things // other than containers managed by ECS ReservedMemory uint16 // DockerStopTimeout specifies the amount time before a SIGKILL is issued to // containers managed by ECS DockerStopTimeout time.Duration // AvailableLoggingDrivers specifies the logging drivers available for use // with Docker. If not set, it defaults to ["json-file"]. AvailableLoggingDrivers []dockerclient.LoggingDriver // PrivilegedDisabled specified whether the Agent is capable of launching // tasks with privileged containers PrivilegedDisabled bool // SELinxuCapable specifies whether the Agent is capable of using SELinux // security options SELinuxCapable bool // AppArmorCapable specifies whether the Agent is capable of using AppArmor // security options AppArmorCapable bool // TaskCleanupWaitDuration specifies the time to wait after a task is stopped // until cleanup of task resources is started. TaskCleanupWaitDuration time.Duration // TaskIAMRoleEnabled specifies if the Agent is capable of launching // tasks with IAM Roles. TaskIAMRoleEnabled bool // CredentialsAuditLogFile specifies the path/filename of the audit log. CredentialsAuditLogFile string // CredentialsAuditLogEnabled specifies whether audit logging is disabled. CredentialsAuditLogDisabled bool // TaskIAMRoleEnabledForNetworkHost specifies if the Agent is capable of launching // tasks with IAM Roles when networkMode is set to 'host' TaskIAMRoleEnabledForNetworkHost bool // ImageCleanupDisabled specifies whether the Agent will periodically perform // automated image cleanup ImageCleanupDisabled bool // MinimumImageDeletionAge specifies the minimum time since it was pulled // before it can be deleted MinimumImageDeletionAge time.Duration // ImageCleanupInterval specifies the time to wait before performing the image // cleanup since last time it was executed ImageCleanupInterval time.Duration // NumImagesToDeletePerCycle specifies the num of image to delete every time // when Agent performs cleanup NumImagesToDeletePerCycle int // InstanceAttributes contains key/value pairs representing // attributes to be associated with this instance within the // ECS service and used to influence behavior such as launch // placement. InstanceAttributes map[string]string // Set if clients validate ssl certificates. Used mainly for testing AcceptInsecureCert bool `json:"-"` } // SensitiveRawMessage is a struct to store some data that should not be logged // or printed. // This struct is a Stringer which will not print its contents with 'String'. // It is a json.Marshaler and json.Unmarshaler and will present its actual // contents in plaintext when read/written from/to json. type SensitiveRawMessage struct { contents json.RawMessage } // NewSensitiveRawMessage returns a new encapsulated json.RawMessage or nil if // the data is empty. It cannot be accidentally logged via .String/.GoString/%v/%#v func NewSensitiveRawMessage(data json.RawMessage) *SensitiveRawMessage { if len(data) == 0 { return nil } return &SensitiveRawMessage{contents: data} } func (data SensitiveRawMessage) String() string { return "[redacted]" } func (data SensitiveRawMessage) GoString() string { return "[redacted]" } func (data SensitiveRawMessage) Contents() json.RawMessage { return data.contents } func (data SensitiveRawMessage) MarshalJSON() ([]byte, error) { return data.contents, nil } func (data *SensitiveRawMessage) UnmarshalJSON(jsonData []byte) error { data.contents = json.RawMessage(jsonData) return nil }
1
15,951
You should remove the outdated sentence.
aws-amazon-ecs-agent
go
@@ -158,7 +158,7 @@ public class MessageCompose extends K9Activity implements OnClickListener, "com.fsck.k9.activity.MessageCompose.quotedTextFormat"; private static final String STATE_KEY_NUM_ATTACHMENTS_LOADING = "numAttachmentsLoading"; private static final String STATE_KEY_WAITING_FOR_ATTACHMENTS = "waitingForAttachments"; - + private static final String STATE_FIRST_TIME_EMPTY_SUBJECT = "firstTimeEmptySubject"; private static final String LOADER_ARG_ATTACHMENT = "attachment"; private static final String FRAGMENT_WAITING_FOR_ATTACHMENT = "waitingForAttachment";
1
package com.fsck.k9.activity; import java.text.DateFormat; import java.util.ArrayList; import java.util.Collection; import java.util.Date; import java.util.HashMap; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.regex.Matcher; import java.util.regex.Pattern; import android.annotation.SuppressLint; import android.annotation.TargetApi; import android.app.AlertDialog; import android.app.AlertDialog.Builder; import android.app.Dialog; import android.app.LoaderManager; import android.app.PendingIntent; import android.content.ClipData; import android.content.Context; import android.content.DialogInterface; import android.content.Intent; import android.content.IntentSender.SendIntentException; import android.content.Loader; import android.content.pm.ActivityInfo; import android.net.Uri; import android.os.AsyncTask; import android.os.Build; import android.os.Bundle; import android.os.Handler; import android.os.Parcelable; import android.support.annotation.Nullable; import android.text.TextUtils; import android.text.TextWatcher; import android.util.Log; import android.util.TypedValue; import android.view.ContextThemeWrapper; import android.view.LayoutInflater; import android.view.Menu; import android.view.MenuItem; import android.view.View; import android.view.View.OnClickListener; import android.view.View.OnFocusChangeListener; import android.view.ViewGroup; import android.view.Window; import android.webkit.WebView; import android.webkit.WebViewClient; import android.widget.BaseAdapter; import android.widget.Button; import android.widget.EditText; import android.widget.ImageButton; import android.widget.LinearLayout; import android.widget.TextView; import android.widget.Toast; import com.fsck.k9.Account; import com.fsck.k9.Account.MessageFormat; import com.fsck.k9.Account.QuoteStyle; import com.fsck.k9.FontSizes; import com.fsck.k9.Identity; import com.fsck.k9.K9; import com.fsck.k9.Preferences; import com.fsck.k9.R; import com.fsck.k9.activity.compose.ComposeCryptoStatus; import com.fsck.k9.activity.compose.CryptoSettingsDialog.OnCryptoModeChangedListener; import com.fsck.k9.activity.compose.RecipientMvpView; import com.fsck.k9.activity.compose.RecipientPresenter; import com.fsck.k9.activity.compose.RecipientPresenter.CryptoMode; import com.fsck.k9.activity.loader.AttachmentContentLoader; import com.fsck.k9.activity.loader.AttachmentInfoLoader; import com.fsck.k9.activity.misc.Attachment; import com.fsck.k9.controller.MessagingController; import com.fsck.k9.controller.MessagingListener; import com.fsck.k9.fragment.ProgressDialogFragment; import com.fsck.k9.fragment.ProgressDialogFragment.CancelListener; import com.fsck.k9.helper.Contacts; import com.fsck.k9.helper.HtmlConverter; import com.fsck.k9.helper.IdentityHelper; import com.fsck.k9.helper.MailTo; import com.fsck.k9.helper.SimpleTextWatcher; import com.fsck.k9.helper.Utility; import com.fsck.k9.mail.Address; import com.fsck.k9.mail.Flag; import com.fsck.k9.mail.Message; import com.fsck.k9.mail.Message.RecipientType; import com.fsck.k9.mail.MessagingException; import com.fsck.k9.mail.Multipart; import com.fsck.k9.mail.Part; import com.fsck.k9.mail.internet.MessageExtractor; import com.fsck.k9.mail.internet.MimeMessage; import com.fsck.k9.mail.internet.MimeUtility; import com.fsck.k9.mailstore.LocalBodyPart; import com.fsck.k9.mailstore.LocalMessage; import com.fsck.k9.message.IdentityField; import com.fsck.k9.message.IdentityHeaderParser; import com.fsck.k9.message.InsertableHtmlContent; import com.fsck.k9.message.MessageBuilder; import com.fsck.k9.message.PgpMessageBuilder; import com.fsck.k9.message.QuotedTextMode; import com.fsck.k9.message.SimpleMessageBuilder; import com.fsck.k9.message.SimpleMessageFormat; import com.fsck.k9.provider.AttachmentProvider; import com.fsck.k9.ui.EolConvertingEditText; import com.fsck.k9.view.MessageWebView; import org.htmlcleaner.CleanerProperties; import org.htmlcleaner.HtmlCleaner; import org.htmlcleaner.SimpleHtmlSerializer; import org.htmlcleaner.TagNode; import org.openintents.openpgp.IOpenPgpService2; import org.openintents.openpgp.util.OpenPgpApi; import org.openintents.openpgp.util.OpenPgpServiceConnection; import org.openintents.openpgp.util.OpenPgpServiceConnection.OnBound; @SuppressWarnings("deprecation") public class MessageCompose extends K9Activity implements OnClickListener, CancelListener, OnFocusChangeListener, OnCryptoModeChangedListener, MessageBuilder.Callback { private static final int DIALOG_SAVE_OR_DISCARD_DRAFT_MESSAGE = 1; private static final int DIALOG_CONFIRM_DISCARD_ON_BACK = 2; private static final int DIALOG_CHOOSE_IDENTITY = 3; private static final int DIALOG_CONFIRM_DISCARD = 4; private static final long INVALID_DRAFT_ID = MessagingController.INVALID_MESSAGE_ID; private static final String ACTION_COMPOSE = "com.fsck.k9.intent.action.COMPOSE"; private static final String ACTION_REPLY = "com.fsck.k9.intent.action.REPLY"; private static final String ACTION_REPLY_ALL = "com.fsck.k9.intent.action.REPLY_ALL"; private static final String ACTION_FORWARD = "com.fsck.k9.intent.action.FORWARD"; private static final String ACTION_EDIT_DRAFT = "com.fsck.k9.intent.action.EDIT_DRAFT"; private static final String EXTRA_ACCOUNT = "account"; private static final String EXTRA_MESSAGE_BODY = "messageBody"; private static final String EXTRA_MESSAGE_REFERENCE = "message_reference"; private static final String STATE_KEY_ATTACHMENTS = "com.fsck.k9.activity.MessageCompose.attachments"; private static final String STATE_KEY_QUOTED_TEXT_MODE = "com.fsck.k9.activity.MessageCompose.QuotedTextShown"; private static final String STATE_KEY_SOURCE_MESSAGE_PROCED = "com.fsck.k9.activity.MessageCompose.stateKeySourceMessageProced"; private static final String STATE_KEY_DRAFT_ID = "com.fsck.k9.activity.MessageCompose.draftId"; private static final String STATE_KEY_HTML_QUOTE = "com.fsck.k9.activity.MessageCompose.HTMLQuote"; private static final String STATE_IDENTITY_CHANGED = "com.fsck.k9.activity.MessageCompose.identityChanged"; private static final String STATE_IDENTITY = "com.fsck.k9.activity.MessageCompose.identity"; private static final String STATE_IN_REPLY_TO = "com.fsck.k9.activity.MessageCompose.inReplyTo"; private static final String STATE_REFERENCES = "com.fsck.k9.activity.MessageCompose.references"; private static final String STATE_KEY_READ_RECEIPT = "com.fsck.k9.activity.MessageCompose.messageReadReceipt"; private static final String STATE_KEY_DRAFT_NEEDS_SAVING = "com.fsck.k9.activity.MessageCompose.draftNeedsSaving"; private static final String STATE_KEY_FORCE_PLAIN_TEXT = "com.fsck.k9.activity.MessageCompose.forcePlainText"; private static final String STATE_KEY_QUOTED_TEXT_FORMAT = "com.fsck.k9.activity.MessageCompose.quotedTextFormat"; private static final String STATE_KEY_NUM_ATTACHMENTS_LOADING = "numAttachmentsLoading"; private static final String STATE_KEY_WAITING_FOR_ATTACHMENTS = "waitingForAttachments"; private static final String LOADER_ARG_ATTACHMENT = "attachment"; private static final String FRAGMENT_WAITING_FOR_ATTACHMENT = "waitingForAttachment"; private static final int MSG_PROGRESS_ON = 1; private static final int MSG_PROGRESS_OFF = 2; private static final int MSG_SKIPPED_ATTACHMENTS = 3; private static final int MSG_SAVED_DRAFT = 4; private static final int MSG_DISCARDED_DRAFT = 5; private static final int MSG_PERFORM_STALLED_ACTION = 6; private static final int ACTIVITY_REQUEST_PICK_ATTACHMENT = 1; private static final int REQUEST_MASK_RECIPIENT_PRESENTER = (1<<8); private static final int REQUEST_MASK_MESSAGE_BUILDER = (2<<8); /** * Regular expression to remove the first localized "Re:" prefix in subjects. * * Currently: * - "Aw:" (german: abbreviation for "Antwort") */ private static final Pattern PREFIX = Pattern.compile("^AW[:\\s]\\s*", Pattern.CASE_INSENSITIVE); /** * The account used for message composition. */ private Account mAccount; private Contacts mContacts; /** * This identity's settings are used for message composition. * Note: This has to be an identity of the account {@link #mAccount}. */ private Identity mIdentity; private boolean mIdentityChanged = false; private boolean mSignatureChanged = false; /** * Reference to the source message (in case of reply, forward, or edit * draft actions). */ private MessageReference mMessageReference; private Message mSourceMessage; /** * "Original" message body * * <p> * The contents of this string will be used instead of the body of a referenced message when * replying to or forwarding a message.<br> * Right now this is only used when replying to a signed or encrypted message. It then contains * the stripped/decrypted body of that message. * </p> * <p><strong>Note:</strong> * When this field is not {@code null} we assume that the message we are composing right now * should be encrypted. * </p> */ private String mSourceMessageBody; /** * Indicates that the source message has been processed at least once and should not * be processed on any subsequent loads. This protects us from adding attachments that * have already been added from the restore of the view state. */ private boolean mSourceMessageProcessed = false; private int mMaxLoaderId = 0; private RecipientPresenter recipientPresenter; private MessageBuilder currentMessageBuilder; private boolean mFinishAfterDraftSaved; @Override public void onFocusChange(View v, boolean hasFocus) { switch(v.getId()) { case R.id.message_content: case R.id.subject: if (hasFocus) { recipientPresenter.onNonRecipientFieldFocused(); } break; } } @Override public void onCryptoModeChanged(CryptoMode cryptoMode) { recipientPresenter.onCryptoModeChanged(cryptoMode); } enum Action { COMPOSE, REPLY, REPLY_ALL, FORWARD, EDIT_DRAFT } /** * Contains the action we're currently performing (e.g. replying to a message) */ private Action mAction; private boolean mReadReceipt = false; private QuotedTextMode mQuotedTextMode = QuotedTextMode.NONE; /** * Contains the format of the quoted text (text vs. HTML). */ private SimpleMessageFormat mQuotedTextFormat; /** * When this it {@code true} the message format setting is ignored and we're always sending * a text/plain message. */ private boolean mForcePlainText = false; private TextView mChooseIdentityButton; private EditText mSubjectView; private EolConvertingEditText mSignatureView; private EolConvertingEditText mMessageContentView; private LinearLayout mAttachments; private Button mQuotedTextShow; private View mQuotedTextBar; private ImageButton mQuotedTextEdit; private EolConvertingEditText mQuotedText; private MessageWebView mQuotedHTML; private InsertableHtmlContent mQuotedHtmlContent; // Container for HTML reply as it's being built. private String mOpenPgpProvider; private OpenPgpServiceConnection mOpenPgpServiceConnection; private String mReferences; private String mInReplyTo; private boolean mSourceProcessed = false; /** * The currently used message format. * * <p> * <strong>Note:</strong> * Don't modify this field directly. Use {@link #updateMessageFormat()}. * </p> */ private SimpleMessageFormat mMessageFormat; private QuoteStyle mQuoteStyle; private boolean draftNeedsSaving = false; private boolean isInSubActivity = false; /** * The database ID of this message's draft. This is used when saving drafts so the message in * the database is updated instead of being created anew. This property is INVALID_DRAFT_ID * until the first save. */ private long mDraftId = INVALID_DRAFT_ID; /** * Number of attachments currently being fetched. */ private int mNumAttachmentsLoading = 0; private enum WaitingAction { NONE, SEND, SAVE } /** * Specifies what action to perform once attachments have been fetched. */ private WaitingAction mWaitingForAttachments = WaitingAction.NONE; private Handler mHandler = new Handler() { @Override public void handleMessage(android.os.Message msg) { switch (msg.what) { case MSG_PROGRESS_ON: setProgressBarIndeterminateVisibility(true); break; case MSG_PROGRESS_OFF: setProgressBarIndeterminateVisibility(false); break; case MSG_SKIPPED_ATTACHMENTS: Toast.makeText( MessageCompose.this, getString(R.string.message_compose_attachments_skipped_toast), Toast.LENGTH_LONG).show(); break; case MSG_SAVED_DRAFT: mDraftId = (Long) msg.obj; Toast.makeText( MessageCompose.this, getString(R.string.message_saved_toast), Toast.LENGTH_LONG).show(); break; case MSG_DISCARDED_DRAFT: Toast.makeText( MessageCompose.this, getString(R.string.message_discarded_toast), Toast.LENGTH_LONG).show(); break; case MSG_PERFORM_STALLED_ACTION: performStalledAction(); break; default: super.handleMessage(msg); break; } } }; private Listener mListener = new Listener(); private FontSizes mFontSizes = K9.getFontSizes(); /** * Compose a new message using the given account. If account is null the default account * will be used. */ public static void actionCompose(Context context, Account account) { String accountUuid = (account == null) ? Preferences.getPreferences(context).getDefaultAccount().getUuid() : account.getUuid(); Intent i = new Intent(context, MessageCompose.class); i.putExtra(EXTRA_ACCOUNT, accountUuid); i.setAction(ACTION_COMPOSE); context.startActivity(i); } /** * Get intent for composing a new message as a reply to the given message. If replyAll is true * the function is reply all instead of simply reply. * @param messageBody optional, for decrypted messages, null if it should be grabbed from the given message */ public static Intent getActionReplyIntent( Context context, LocalMessage message, boolean replyAll, String messageBody) { Intent i = new Intent(context, MessageCompose.class); i.putExtra(EXTRA_MESSAGE_BODY, messageBody); i.putExtra(EXTRA_MESSAGE_REFERENCE, message.makeMessageReference()); if (replyAll) { i.setAction(ACTION_REPLY_ALL); } else { i.setAction(ACTION_REPLY); } return i; } public static Intent getActionReplyIntent(Context context, MessageReference messageReference) { Intent intent = new Intent(context, MessageCompose.class); intent.setAction(ACTION_REPLY); intent.putExtra(EXTRA_MESSAGE_REFERENCE, messageReference); intent.addFlags(Intent.FLAG_ACTIVITY_NEW_TASK); return intent; } /** * Compose a new message as a reply to the given message. If replyAll is true the function * is reply all instead of simply reply. * @param messageBody optional, for decrypted messages, null if it should be grabbed from the given message */ public static void actionReply( Context context, LocalMessage message, boolean replyAll, String messageBody) { context.startActivity(getActionReplyIntent(context, message, replyAll, messageBody)); } /** * Compose a new message as a forward of the given message. * @param messageBody optional, for decrypted messages, null if it should be grabbed from the given message */ public static void actionForward( Context context, LocalMessage message, String messageBody) { Intent i = new Intent(context, MessageCompose.class); i.putExtra(EXTRA_MESSAGE_BODY, messageBody); i.putExtra(EXTRA_MESSAGE_REFERENCE, message.makeMessageReference()); i.setAction(ACTION_FORWARD); context.startActivity(i); } /** * Continue composition of the given message. This action modifies the way this Activity * handles certain actions. * Save will attempt to replace the message in the given folder with the updated version. * Discard will delete the message from the given folder. */ public static void actionEditDraft(Context context, MessageReference messageReference) { Intent i = new Intent(context, MessageCompose.class); i.putExtra(EXTRA_MESSAGE_REFERENCE, messageReference); i.setAction(ACTION_EDIT_DRAFT); context.startActivity(i); } @Override public void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); if (UpgradeDatabases.actionUpgradeDatabases(this, getIntent())) { finish(); return; } requestWindowFeature(Window.FEATURE_INDETERMINATE_PROGRESS); if (K9.getK9ComposerThemeSetting() != K9.Theme.USE_GLOBAL) { // theme the whole content according to the theme (except the action bar) ContextThemeWrapper themeContext = new ContextThemeWrapper(this, K9.getK9ThemeResourceId(K9.getK9ComposerTheme())); @SuppressLint("InflateParams") // this is the top level activity element, it has no root View v = LayoutInflater.from(themeContext).inflate(R.layout.message_compose, null); TypedValue outValue = new TypedValue(); // background color needs to be forced themeContext.getTheme().resolveAttribute(R.attr.messageViewBackgroundColor, outValue, true); v.setBackgroundColor(outValue.data); setContentView(v); } else { setContentView(R.layout.message_compose); } final Intent intent = getIntent(); mMessageReference = intent.getParcelableExtra(EXTRA_MESSAGE_REFERENCE); mSourceMessageBody = intent.getStringExtra(EXTRA_MESSAGE_BODY); if (K9.DEBUG && mSourceMessageBody != null) { Log.d(K9.LOG_TAG, "Composing message with explicitly specified message body."); } final String accountUuid = (mMessageReference != null) ? mMessageReference.getAccountUuid() : intent.getStringExtra(EXTRA_ACCOUNT); mAccount = Preferences.getPreferences(this).getAccount(accountUuid); if (mAccount == null) { mAccount = Preferences.getPreferences(this).getDefaultAccount(); } if (mAccount == null) { /* * There are no accounts set up. This should not have happened. Prompt the * user to set up an account as an acceptable bailout. */ startActivity(new Intent(this, Accounts.class)); draftNeedsSaving = false; finish(); return; } mContacts = Contacts.getInstance(MessageCompose.this); mChooseIdentityButton = (TextView) findViewById(R.id.identity); mChooseIdentityButton.setOnClickListener(this); RecipientMvpView recipientMvpView = new RecipientMvpView(this); recipientPresenter = new RecipientPresenter(this, recipientMvpView, mAccount); mSubjectView = (EditText) findViewById(R.id.subject); mSubjectView.getInputExtras(true).putBoolean("allowEmoji", true); EolConvertingEditText upperSignature = (EolConvertingEditText)findViewById(R.id.upper_signature); EolConvertingEditText lowerSignature = (EolConvertingEditText)findViewById(R.id.lower_signature); mMessageContentView = (EolConvertingEditText)findViewById(R.id.message_content); mMessageContentView.getInputExtras(true).putBoolean("allowEmoji", true); mAttachments = (LinearLayout)findViewById(R.id.attachments); mQuotedTextShow = (Button)findViewById(R.id.quoted_text_show); mQuotedTextBar = findViewById(R.id.quoted_text_bar); mQuotedTextEdit = (ImageButton)findViewById(R.id.quoted_text_edit); ImageButton mQuotedTextDelete = (ImageButton) findViewById(R.id.quoted_text_delete); mQuotedText = (EolConvertingEditText)findViewById(R.id.quoted_text); mQuotedText.getInputExtras(true).putBoolean("allowEmoji", true); mQuotedHTML = (MessageWebView) findViewById(R.id.quoted_html); mQuotedHTML.configure(); // Disable the ability to click links in the quoted HTML page. I think this is a nice feature, but if someone // feels this should be a preference (or should go away all together), I'm ok with that too. -achen 20101130 mQuotedHTML.setWebViewClient(new WebViewClient() { @Override public boolean shouldOverrideUrlLoading(WebView view, String url) { return true; } }); TextWatcher draftNeedsChangingTextWatcher = new SimpleTextWatcher() { @Override public void onTextChanged(CharSequence s, int start, int before, int count) { draftNeedsSaving = true; } }; TextWatcher signTextWatcher = new SimpleTextWatcher() { @Override public void onTextChanged(CharSequence s, int start, int before, int count) { draftNeedsSaving = true; mSignatureChanged = true; } }; recipientMvpView.addTextChangedListener(draftNeedsChangingTextWatcher); mSubjectView.addTextChangedListener(draftNeedsChangingTextWatcher); mMessageContentView.addTextChangedListener(draftNeedsChangingTextWatcher); mQuotedText.addTextChangedListener(draftNeedsChangingTextWatcher); /* * We set this to invisible by default. Other methods will turn it back on if it's * needed. */ showOrHideQuotedText(QuotedTextMode.NONE); mSubjectView.setOnFocusChangeListener(this); mMessageContentView.setOnFocusChangeListener(this); mQuotedTextShow.setOnClickListener(this); mQuotedTextEdit.setOnClickListener(this); mQuotedTextDelete.setOnClickListener(this); if (savedInstanceState != null) { /* * This data gets used in onCreate, so grab it here instead of onRestoreInstanceState */ mSourceMessageProcessed = savedInstanceState.getBoolean(STATE_KEY_SOURCE_MESSAGE_PROCED, false); } if (initFromIntent(intent)) { mAction = Action.COMPOSE; draftNeedsSaving = true; } else { String action = intent.getAction(); if (ACTION_COMPOSE.equals(action)) { mAction = Action.COMPOSE; } else if (ACTION_REPLY.equals(action)) { mAction = Action.REPLY; } else if (ACTION_REPLY_ALL.equals(action)) { mAction = Action.REPLY_ALL; } else if (ACTION_FORWARD.equals(action)) { mAction = Action.FORWARD; } else if (ACTION_EDIT_DRAFT.equals(action)) { mAction = Action.EDIT_DRAFT; } else { // This shouldn't happen Log.w(K9.LOG_TAG, "MessageCompose was started with an unsupported action"); mAction = Action.COMPOSE; } } if (mIdentity == null) { mIdentity = mAccount.getIdentity(0); } if (mAccount.isSignatureBeforeQuotedText()) { mSignatureView = upperSignature; lowerSignature.setVisibility(View.GONE); } else { mSignatureView = lowerSignature; upperSignature.setVisibility(View.GONE); } updateSignature(); mSignatureView.addTextChangedListener(signTextWatcher); if (!mIdentity.getSignatureUse()) { mSignatureView.setVisibility(View.GONE); } mReadReceipt = mAccount.isMessageReadReceiptAlways(); mQuoteStyle = mAccount.getQuoteStyle(); updateFrom(); if (!mSourceMessageProcessed) { if (mAction == Action.REPLY || mAction == Action.REPLY_ALL || mAction == Action.FORWARD || mAction == Action.EDIT_DRAFT) { /* * If we need to load the message we add ourself as a message listener here * so we can kick it off. Normally we add in onResume but we don't * want to reload the message every time the activity is resumed. * There is no harm in adding twice. */ MessagingController.getInstance(getApplication()).addListener(mListener); final Account account = Preferences.getPreferences(this).getAccount(mMessageReference.getAccountUuid()); final String folderName = mMessageReference.getFolderName(); final String sourceMessageUid = mMessageReference.getUid(); MessagingController.getInstance(getApplication()).loadMessageForView(account, folderName, sourceMessageUid, null); } if (mAction != Action.EDIT_DRAFT) { String alwaysBccString = mAccount.getAlwaysBcc(); if (!TextUtils.isEmpty(alwaysBccString)) { recipientPresenter.addBccAddresses(Address.parse(alwaysBccString)); } } } if (mAction == Action.REPLY || mAction == Action.REPLY_ALL) { mMessageReference = mMessageReference.withModifiedFlag(Flag.ANSWERED); } if (mAction == Action.REPLY || mAction == Action.REPLY_ALL || mAction == Action.EDIT_DRAFT) { //change focus to message body. mMessageContentView.requestFocus(); } else { // Explicitly set focus to "To:" input field (see issue 2998) recipientMvpView.requestFocusOnToField(); } if (mAction == Action.FORWARD) { mMessageReference = mMessageReference.withModifiedFlag(Flag.FORWARDED); } mOpenPgpProvider = mAccount.getOpenPgpProvider(); if (isCryptoProviderEnabled()) { // attachKeyCheckBox = (CheckBox) findViewById(R.id.cb_attach_key); // attachKeyCheckBox.setEnabled(mAccount.getCryptoKey() != 0); mOpenPgpServiceConnection = new OpenPgpServiceConnection(this, mOpenPgpProvider, new OnBound() { @Override public void onBound(IOpenPgpService2 service) { recipientPresenter.onCryptoProviderBound(); } @Override public void onError(Exception e) { recipientPresenter.onCryptoProviderError(e); } }); mOpenPgpServiceConnection.bindToService(); updateMessageFormat(); } // Set font size of input controls int fontSize = mFontSizes.getMessageComposeInput(); recipientMvpView.setFontSizes(mFontSizes, fontSize); mFontSizes.setViewTextSize(mSubjectView, fontSize); mFontSizes.setViewTextSize(mMessageContentView, fontSize); mFontSizes.setViewTextSize(mQuotedText, fontSize); mFontSizes.setViewTextSize(mSignatureView, fontSize); updateMessageFormat(); setTitle(); currentMessageBuilder = (MessageBuilder) getLastNonConfigurationInstance(); if (currentMessageBuilder != null) { setProgressBarIndeterminateVisibility(true); currentMessageBuilder.reattachCallback(this); } } @Override public void onDestroy() { super.onDestroy(); if (mOpenPgpServiceConnection != null) { mOpenPgpServiceConnection.unbindFromService(); } } /** * Handle external intents that trigger the message compose activity. * * <p> * Supported external intents: * <ul> * <li>{@link Intent#ACTION_VIEW}</li> * <li>{@link Intent#ACTION_SENDTO}</li> * <li>{@link Intent#ACTION_SEND}</li> * <li>{@link Intent#ACTION_SEND_MULTIPLE}</li> * </ul> * </p> * * @param intent * The (external) intent that started the activity. * * @return {@code true}, if this activity was started by an external intent. {@code false}, * otherwise. */ private boolean initFromIntent(final Intent intent) { boolean startedByExternalIntent = false; final String action = intent.getAction(); if (Intent.ACTION_VIEW.equals(action) || Intent.ACTION_SENDTO.equals(action)) { /* * Someone has clicked a mailto: link. The address is in the URI. */ if (intent.getData() != null) { Uri uri = intent.getData(); if (MailTo.isMailTo(uri)) { MailTo mailTo = MailTo.parse(uri); initializeFromMailto(mailTo); } } /* * Note: According to the documentation ACTION_VIEW and ACTION_SENDTO don't accept * EXTRA_* parameters. * And previously we didn't process these EXTRAs. But it looks like nobody bothers to * read the official documentation and just copies wrong sample code that happens to * work with the AOSP Email application. And because even big players get this wrong, * we're now finally giving in and read the EXTRAs for those actions (below). */ } if (Intent.ACTION_SEND.equals(action) || Intent.ACTION_SEND_MULTIPLE.equals(action) || Intent.ACTION_SENDTO.equals(action) || Intent.ACTION_VIEW.equals(action)) { startedByExternalIntent = true; /* * Note: Here we allow a slight deviation from the documented behavior. * EXTRA_TEXT is used as message body (if available) regardless of the MIME * type of the intent. In addition one or multiple attachments can be added * using EXTRA_STREAM. */ CharSequence text = intent.getCharSequenceExtra(Intent.EXTRA_TEXT); // Only use EXTRA_TEXT if the body hasn't already been set by the mailto URI if (text != null && mMessageContentView.getText().length() == 0) { mMessageContentView.setCharacters(text); } String type = intent.getType(); if (Intent.ACTION_SEND.equals(action)) { Uri stream = intent.getParcelableExtra(Intent.EXTRA_STREAM); if (stream != null) { addAttachment(stream, type); } } else { List<Parcelable> list = intent.getParcelableArrayListExtra(Intent.EXTRA_STREAM); if (list != null) { for (Parcelable parcelable : list) { Uri stream = (Uri) parcelable; if (stream != null) { addAttachment(stream, type); } } } } String subject = intent.getStringExtra(Intent.EXTRA_SUBJECT); // Only use EXTRA_SUBJECT if the subject hasn't already been set by the mailto URI if (subject != null && mSubjectView.getText().length() == 0) { mSubjectView.setText(subject); } recipientPresenter.initFromSendOrViewIntent(intent); } return startedByExternalIntent; } @Override public void onResume() { super.onResume(); MessagingController.getInstance(getApplication()).addListener(mListener); } @Override public void onPause() { super.onPause(); MessagingController.getInstance(getApplication()).removeListener(mListener); boolean isPausingOnConfigurationChange = (getChangingConfigurations() & ActivityInfo.CONFIG_ORIENTATION) == ActivityInfo.CONFIG_ORIENTATION; boolean isCurrentlyBuildingMessage = currentMessageBuilder != null; if (isPausingOnConfigurationChange || isCurrentlyBuildingMessage || isInSubActivity) { return; } checkToSaveDraftImplicitly(); } /** * The framework handles most of the fields, but we need to handle stuff that we * dynamically show and hide: * Attachment list, * Cc field, * Bcc field, * Quoted text, */ @Override protected void onSaveInstanceState(Bundle outState) { super.onSaveInstanceState(outState); outState.putInt(STATE_KEY_NUM_ATTACHMENTS_LOADING, mNumAttachmentsLoading); outState.putString(STATE_KEY_WAITING_FOR_ATTACHMENTS, mWaitingForAttachments.name()); outState.putParcelableArrayList(STATE_KEY_ATTACHMENTS, createAttachmentList()); outState.putSerializable(STATE_KEY_QUOTED_TEXT_MODE, mQuotedTextMode); outState.putBoolean(STATE_KEY_SOURCE_MESSAGE_PROCED, mSourceMessageProcessed); outState.putLong(STATE_KEY_DRAFT_ID, mDraftId); outState.putSerializable(STATE_IDENTITY, mIdentity); outState.putBoolean(STATE_IDENTITY_CHANGED, mIdentityChanged); outState.putString(STATE_IN_REPLY_TO, mInReplyTo); outState.putString(STATE_REFERENCES, mReferences); outState.putSerializable(STATE_KEY_HTML_QUOTE, mQuotedHtmlContent); outState.putBoolean(STATE_KEY_READ_RECEIPT, mReadReceipt); outState.putBoolean(STATE_KEY_DRAFT_NEEDS_SAVING, draftNeedsSaving); outState.putBoolean(STATE_KEY_FORCE_PLAIN_TEXT, mForcePlainText); outState.putSerializable(STATE_KEY_QUOTED_TEXT_FORMAT, mQuotedTextFormat); recipientPresenter.onSaveInstanceState(outState); } @Override public Object onRetainNonConfigurationInstance() { if (currentMessageBuilder != null) { currentMessageBuilder.detachCallback(); } return currentMessageBuilder; } @Override protected void onRestoreInstanceState(Bundle savedInstanceState) { super.onRestoreInstanceState(savedInstanceState); mAttachments.removeAllViews(); mMaxLoaderId = 0; mNumAttachmentsLoading = savedInstanceState.getInt(STATE_KEY_NUM_ATTACHMENTS_LOADING); mWaitingForAttachments = WaitingAction.NONE; try { String waitingFor = savedInstanceState.getString(STATE_KEY_WAITING_FOR_ATTACHMENTS); mWaitingForAttachments = WaitingAction.valueOf(waitingFor); } catch (Exception e) { Log.w(K9.LOG_TAG, "Couldn't read value \" + STATE_KEY_WAITING_FOR_ATTACHMENTS +" + "\" from saved instance state", e); } List<Attachment> attachments = savedInstanceState.getParcelableArrayList(STATE_KEY_ATTACHMENTS); // noinspection ConstantConditions, we know this is set in onSaveInstanceState for (Attachment attachment : attachments) { addAttachmentView(attachment); if (attachment.loaderId > mMaxLoaderId) { mMaxLoaderId = attachment.loaderId; } if (attachment.state == Attachment.LoadingState.URI_ONLY) { initAttachmentInfoLoader(attachment); } else if (attachment.state == Attachment.LoadingState.METADATA) { initAttachmentContentLoader(attachment); } } mReadReceipt = savedInstanceState.getBoolean(STATE_KEY_READ_RECEIPT); recipientPresenter.onRestoreInstanceState(savedInstanceState); mQuotedHtmlContent = (InsertableHtmlContent) savedInstanceState.getSerializable(STATE_KEY_HTML_QUOTE); if (mQuotedHtmlContent != null && mQuotedHtmlContent.getQuotedContent() != null) { mQuotedHTML.setText(mQuotedHtmlContent.getQuotedContent()); } mDraftId = savedInstanceState.getLong(STATE_KEY_DRAFT_ID); mIdentity = (Identity)savedInstanceState.getSerializable(STATE_IDENTITY); mIdentityChanged = savedInstanceState.getBoolean(STATE_IDENTITY_CHANGED); mInReplyTo = savedInstanceState.getString(STATE_IN_REPLY_TO); mReferences = savedInstanceState.getString(STATE_REFERENCES); draftNeedsSaving = savedInstanceState.getBoolean(STATE_KEY_DRAFT_NEEDS_SAVING); mForcePlainText = savedInstanceState.getBoolean(STATE_KEY_FORCE_PLAIN_TEXT); mQuotedTextFormat = (SimpleMessageFormat) savedInstanceState.getSerializable( STATE_KEY_QUOTED_TEXT_FORMAT); showOrHideQuotedText( (QuotedTextMode) savedInstanceState.getSerializable(STATE_KEY_QUOTED_TEXT_MODE)); updateFrom(); updateMessageFormat(); } private void setTitle() { switch (mAction) { case REPLY: { setTitle(R.string.compose_title_reply); break; } case REPLY_ALL: { setTitle(R.string.compose_title_reply_all); break; } case FORWARD: { setTitle(R.string.compose_title_forward); break; } case COMPOSE: default: { setTitle(R.string.compose_title_compose); break; } } } @Nullable private MessageBuilder createMessageBuilder(boolean isDraft) { MessageBuilder builder; if (!recipientPresenter.canSendOrError(isDraft)) { return null; } ComposeCryptoStatus cryptoStatus = recipientPresenter.getCurrentCryptoStatus(); // TODO encrypt drafts for storage if(!isDraft && cryptoStatus.shouldUsePgpMessageBuilder()) { PgpMessageBuilder pgpBuilder = new PgpMessageBuilder(getApplicationContext(), getOpenPgpApi()); pgpBuilder.setCryptoStatus(cryptoStatus); builder = pgpBuilder; } else { builder = new SimpleMessageBuilder(getApplicationContext()); } builder.setSubject(mSubjectView.getText().toString()) .setTo(recipientPresenter.getToAddresses()) .setCc(recipientPresenter.getCcAddresses()) .setBcc(recipientPresenter.getBccAddresses()) .setInReplyTo(mInReplyTo) .setReferences(mReferences) .setRequestReadReceipt(mReadReceipt) .setIdentity(mIdentity) .setMessageFormat(mMessageFormat) .setText(mMessageContentView.getCharacters()) .setAttachments(createAttachmentList()) .setSignature(mSignatureView.getCharacters()) .setQuoteStyle(mQuoteStyle) .setQuotedTextMode(mQuotedTextMode) .setQuotedText(mQuotedText.getCharacters()) .setQuotedHtmlContent(mQuotedHtmlContent) .setReplyAfterQuote(mAccount.isReplyAfterQuote()) .setSignatureBeforeQuotedText(mAccount.isSignatureBeforeQuotedText()) .setIdentityChanged(mIdentityChanged) .setSignatureChanged(mSignatureChanged) .setCursorPosition(mMessageContentView.getSelectionStart()) .setMessageReference(mMessageReference) .setDraft(isDraft); return builder; } private void checkToSendMessage() { if (recipientPresenter.checkRecipientsOkForSending()) { return; } if (mWaitingForAttachments != WaitingAction.NONE) { return; } if (mNumAttachmentsLoading > 0) { mWaitingForAttachments = WaitingAction.SEND; showWaitingForAttachmentDialog(); return; } performSendAfterChecks(); } private void checkToSaveDraftAndSave() { if (!mAccount.hasDraftsFolder()) { Toast.makeText(this, R.string.compose_error_no_draft_folder, Toast.LENGTH_SHORT).show(); return; } if (mWaitingForAttachments != WaitingAction.NONE) { return; } if (mNumAttachmentsLoading > 0) { mWaitingForAttachments = WaitingAction.SAVE; showWaitingForAttachmentDialog(); return; } mFinishAfterDraftSaved = true; performSaveAfterChecks(); } private void checkToSaveDraftImplicitly() { if (!mAccount.hasDraftsFolder()) { return; } if (!draftNeedsSaving) { return; } mFinishAfterDraftSaved = false; performSaveAfterChecks(); } private void performSaveAfterChecks() { currentMessageBuilder = createMessageBuilder(true); if (currentMessageBuilder != null) { setProgressBarIndeterminateVisibility(true); currentMessageBuilder.buildAsync(this); } } public void performSendAfterChecks() { currentMessageBuilder = createMessageBuilder(false); if (currentMessageBuilder != null) { draftNeedsSaving = false; setProgressBarIndeterminateVisibility(true); currentMessageBuilder.buildAsync(this); } } private void onDiscard() { if (mDraftId != INVALID_DRAFT_ID) { MessagingController.getInstance(getApplication()).deleteDraft(mAccount, mDraftId); mDraftId = INVALID_DRAFT_ID; } mHandler.sendEmptyMessage(MSG_DISCARDED_DRAFT); draftNeedsSaving = false; finish(); } private void onReadReceipt() { CharSequence txt; if (!mReadReceipt) { txt = getString(R.string.read_receipt_enabled); mReadReceipt = true; } else { txt = getString(R.string.read_receipt_disabled); mReadReceipt = false; } Context context = getApplicationContext(); Toast toast = Toast.makeText(context, txt, Toast.LENGTH_SHORT); toast.show(); } private ArrayList<Attachment> createAttachmentList() { ArrayList<Attachment> attachments = new ArrayList<>(); for (int i = 0, count = mAttachments.getChildCount(); i < count; i++) { View view = mAttachments.getChildAt(i); Attachment attachment = (Attachment) view.getTag(); attachments.add(attachment); } return attachments; } /** * Kick off a picker for the specified MIME type and let Android take over. */ @SuppressLint("InlinedApi") private void onAddAttachment() { Intent i = new Intent(Intent.ACTION_GET_CONTENT); i.putExtra(Intent.EXTRA_ALLOW_MULTIPLE, true); i.addCategory(Intent.CATEGORY_OPENABLE); i.setType("*/*"); isInSubActivity = true; startActivityForResult(Intent.createChooser(i, null), ACTIVITY_REQUEST_PICK_ATTACHMENT); } private void addAttachment(Uri uri) { addAttachment(uri, null); } private void addAttachment(Uri uri, String contentType) { Attachment attachment = new Attachment(); attachment.state = Attachment.LoadingState.URI_ONLY; attachment.uri = uri; attachment.contentType = contentType; attachment.loaderId = ++mMaxLoaderId; addAttachmentView(attachment); initAttachmentInfoLoader(attachment); } private void initAttachmentInfoLoader(Attachment attachment) { LoaderManager loaderManager = getLoaderManager(); Bundle bundle = new Bundle(); bundle.putParcelable(LOADER_ARG_ATTACHMENT, attachment); loaderManager.initLoader(attachment.loaderId, bundle, mAttachmentInfoLoaderCallback); } private void initAttachmentContentLoader(Attachment attachment) { LoaderManager loaderManager = getLoaderManager(); Bundle bundle = new Bundle(); bundle.putParcelable(LOADER_ARG_ATTACHMENT, attachment); loaderManager.initLoader(attachment.loaderId, bundle, mAttachmentContentLoaderCallback); } private void addAttachmentView(Attachment attachment) { boolean hasMetadata = (attachment.state != Attachment.LoadingState.URI_ONLY); boolean isLoadingComplete = (attachment.state == Attachment.LoadingState.COMPLETE); View view = getLayoutInflater().inflate(R.layout.message_compose_attachment, mAttachments, false); TextView nameView = (TextView) view.findViewById(R.id.attachment_name); View progressBar = view.findViewById(R.id.progressBar); if (hasMetadata) { nameView.setText(attachment.name); } else { nameView.setText(R.string.loading_attachment); } progressBar.setVisibility(isLoadingComplete ? View.GONE : View.VISIBLE); ImageButton delete = (ImageButton) view.findViewById(R.id.attachment_delete); delete.setOnClickListener(MessageCompose.this); delete.setTag(view); view.setTag(attachment); mAttachments.addView(view); } private View getAttachmentView(int loaderId) { for (int i = 0, childCount = mAttachments.getChildCount(); i < childCount; i++) { View view = mAttachments.getChildAt(i); Attachment tag = (Attachment) view.getTag(); if (tag != null && tag.loaderId == loaderId) { return view; } } return null; } private LoaderManager.LoaderCallbacks<Attachment> mAttachmentInfoLoaderCallback = new LoaderManager.LoaderCallbacks<Attachment>() { @Override public Loader<Attachment> onCreateLoader(int id, Bundle args) { onFetchAttachmentStarted(); Attachment attachment = args.getParcelable(LOADER_ARG_ATTACHMENT); return new AttachmentInfoLoader(MessageCompose.this, attachment); } @Override public void onLoadFinished(Loader<Attachment> loader, Attachment attachment) { int loaderId = loader.getId(); View view = getAttachmentView(loaderId); if (view != null) { view.setTag(attachment); TextView nameView = (TextView) view.findViewById(R.id.attachment_name); nameView.setText(attachment.name); attachment.loaderId = ++mMaxLoaderId; initAttachmentContentLoader(attachment); } else { onFetchAttachmentFinished(); } getLoaderManager().destroyLoader(loaderId); } @Override public void onLoaderReset(Loader<Attachment> loader) { onFetchAttachmentFinished(); } }; private LoaderManager.LoaderCallbacks<Attachment> mAttachmentContentLoaderCallback = new LoaderManager.LoaderCallbacks<Attachment>() { @Override public Loader<Attachment> onCreateLoader(int id, Bundle args) { Attachment attachment = args.getParcelable(LOADER_ARG_ATTACHMENT); return new AttachmentContentLoader(MessageCompose.this, attachment); } @Override public void onLoadFinished(Loader<Attachment> loader, Attachment attachment) { int loaderId = loader.getId(); View view = getAttachmentView(loaderId); if (view != null) { if (attachment.state == Attachment.LoadingState.COMPLETE) { view.setTag(attachment); View progressBar = view.findViewById(R.id.progressBar); progressBar.setVisibility(View.GONE); } else { mAttachments.removeView(view); } } onFetchAttachmentFinished(); getLoaderManager().destroyLoader(loaderId); } @Override public void onLoaderReset(Loader<Attachment> loader) { onFetchAttachmentFinished(); } }; public OpenPgpApi getOpenPgpApi() { return new OpenPgpApi(this, mOpenPgpServiceConnection.getService()); } private void onFetchAttachmentStarted() { mNumAttachmentsLoading += 1; } private void onFetchAttachmentFinished() { // We're not allowed to perform fragment transactions when called from onLoadFinished(). // So we use the Handler to call performStalledAction(). mHandler.sendEmptyMessage(MSG_PERFORM_STALLED_ACTION); } private void performStalledAction() { mNumAttachmentsLoading -= 1; WaitingAction waitingFor = mWaitingForAttachments; mWaitingForAttachments = WaitingAction.NONE; if (waitingFor != WaitingAction.NONE) { dismissWaitingForAttachmentDialog(); } switch (waitingFor) { case SEND: { performSendAfterChecks(); break; } case SAVE: { performSaveAfterChecks(); break; } case NONE: break; } } public void showContactPicker(int requestCode) { requestCode |= REQUEST_MASK_RECIPIENT_PRESENTER; isInSubActivity = true; startActivityForResult(mContacts.contactPickerIntent(), requestCode); } @Override protected void onActivityResult(int requestCode, int resultCode, Intent data) { isInSubActivity = false; if ((requestCode & REQUEST_MASK_MESSAGE_BUILDER) == REQUEST_MASK_MESSAGE_BUILDER) { requestCode ^= REQUEST_MASK_MESSAGE_BUILDER; if (currentMessageBuilder == null) { Log.e(K9.LOG_TAG, "Got a message builder activity result for no message builder, " + "this is an illegal state!"); return; } currentMessageBuilder.onActivityResult(this, requestCode, resultCode, data); return; } if ((requestCode & REQUEST_MASK_RECIPIENT_PRESENTER) == REQUEST_MASK_RECIPIENT_PRESENTER) { requestCode ^= REQUEST_MASK_RECIPIENT_PRESENTER; recipientPresenter.onActivityResult(resultCode, requestCode, data); return; } if (resultCode != RESULT_OK) { return; } if (data == null) { return; } switch (requestCode) { case ACTIVITY_REQUEST_PICK_ATTACHMENT: addAttachmentsFromResultIntent(data); draftNeedsSaving = true; break; } } @TargetApi(Build.VERSION_CODES.JELLY_BEAN) private void addAttachmentsFromResultIntent(Intent data) { if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.KITKAT) { ClipData clipData = data.getClipData(); if (clipData != null) { for (int i = 0, end = clipData.getItemCount(); i < end; i++) { Uri uri = clipData.getItemAt(i).getUri(); if (uri != null) { addAttachment(uri); } } return; } } Uri uri = data.getData(); if (uri != null) { addAttachment(uri); } } private void onAccountChosen(Account account, Identity identity) { if (!mAccount.equals(account)) { if (K9.DEBUG) { Log.v(K9.LOG_TAG, "Switching account from " + mAccount + " to " + account); } // on draft edit, make sure we don't keep previous message UID if (mAction == Action.EDIT_DRAFT) { mMessageReference = null; } // test whether there is something to save if (draftNeedsSaving || (mDraftId != INVALID_DRAFT_ID)) { final long previousDraftId = mDraftId; final Account previousAccount = mAccount; // make current message appear as new mDraftId = INVALID_DRAFT_ID; // actual account switch mAccount = account; if (K9.DEBUG) { Log.v(K9.LOG_TAG, "Account switch, saving new draft in new account"); } checkToSaveDraftImplicitly(); if (previousDraftId != INVALID_DRAFT_ID) { if (K9.DEBUG) { Log.v(K9.LOG_TAG, "Account switch, deleting draft from previous account: " + previousDraftId); } MessagingController.getInstance(getApplication()).deleteDraft(previousAccount, previousDraftId); } } else { mAccount = account; } // Show CC/BCC text input field when switching to an account that always wants them // displayed. // Please note that we're not hiding the fields if the user switches back to an account // that doesn't have this setting checked. recipientPresenter.onSwitchAccount(mAccount); // not sure how to handle mFolder, mSourceMessage? } switchToIdentity(identity); } private void switchToIdentity(Identity identity) { mIdentity = identity; mIdentityChanged = true; draftNeedsSaving = true; updateFrom(); updateSignature(); updateMessageFormat(); recipientPresenter.onSwitchIdentity(identity); } private void updateFrom() { mChooseIdentityButton.setText(mIdentity.getEmail()); } private void updateSignature() { if (mIdentity.getSignatureUse()) { mSignatureView.setCharacters(mIdentity.getSignature()); mSignatureView.setVisibility(View.VISIBLE); } else { mSignatureView.setVisibility(View.GONE); } } @Override public void onClick(View view) { switch (view.getId()) { case R.id.attachment_delete: /* * The view is the delete button, and we have previously set the tag of * the delete button to the view that owns it. We don't use parent because the * view is very complex and could change in the future. */ mAttachments.removeView((View) view.getTag()); draftNeedsSaving = true; break; case R.id.quoted_text_show: showOrHideQuotedText(QuotedTextMode.SHOW); updateMessageFormat(); draftNeedsSaving = true; break; case R.id.quoted_text_delete: showOrHideQuotedText(QuotedTextMode.HIDE); updateMessageFormat(); draftNeedsSaving = true; break; case R.id.quoted_text_edit: mForcePlainText = true; if (mMessageReference != null) { // shouldn't happen... // TODO - Should we check if mSourceMessageBody is already present and bypass the MessagingController call? MessagingController.getInstance(getApplication()).addListener(mListener); final Account account = Preferences.getPreferences(this).getAccount(mMessageReference.getAccountUuid()); final String folderName = mMessageReference.getFolderName(); final String sourceMessageUid = mMessageReference.getUid(); MessagingController.getInstance(getApplication()).loadMessageForView(account, folderName, sourceMessageUid, null); } break; case R.id.identity: showDialog(DIALOG_CHOOSE_IDENTITY); break; } } /** * Show or hide the quoted text. * * @param mode * The value to set {@link #mQuotedTextMode} to. */ private void showOrHideQuotedText(QuotedTextMode mode) { mQuotedTextMode = mode; switch (mode) { case NONE: case HIDE: { if (mode == QuotedTextMode.NONE) { mQuotedTextShow.setVisibility(View.GONE); } else { mQuotedTextShow.setVisibility(View.VISIBLE); } mQuotedTextBar.setVisibility(View.GONE); mQuotedText.setVisibility(View.GONE); mQuotedHTML.setVisibility(View.GONE); mQuotedTextEdit.setVisibility(View.GONE); break; } case SHOW: { mQuotedTextShow.setVisibility(View.GONE); mQuotedTextBar.setVisibility(View.VISIBLE); if (mQuotedTextFormat == SimpleMessageFormat.HTML) { mQuotedText.setVisibility(View.GONE); mQuotedHTML.setVisibility(View.VISIBLE); mQuotedTextEdit.setVisibility(View.VISIBLE); } else { mQuotedText.setVisibility(View.VISIBLE); mQuotedHTML.setVisibility(View.GONE); mQuotedTextEdit.setVisibility(View.GONE); } break; } } } private void askBeforeDiscard(){ if (K9.confirmDiscardMessage()) { showDialog(DIALOG_CONFIRM_DISCARD); } else { onDiscard(); } } @Override public boolean onOptionsItemSelected(MenuItem item) { switch (item.getItemId()) { case R.id.send: checkToSendMessage(); break; case R.id.save: checkToSaveDraftAndSave(); break; case R.id.discard: askBeforeDiscard(); break; case R.id.add_from_contacts: recipientPresenter.onMenuAddFromContacts(); break; case R.id.add_attachment: onAddAttachment(); break; case R.id.read_receipt: onReadReceipt(); break; default: return super.onOptionsItemSelected(item); } return true; } @Override public boolean onCreateOptionsMenu(Menu menu) { super.onCreateOptionsMenu(menu); getMenuInflater().inflate(R.menu.message_compose_option, menu); // Disable the 'Save' menu option if Drafts folder is set to -NONE- if (!mAccount.hasDraftsFolder()) { menu.findItem(R.id.save).setEnabled(false); } return true; } @Override public boolean onPrepareOptionsMenu(Menu menu) { super.onPrepareOptionsMenu(menu); recipientPresenter.onPrepareOptionsMenu(menu); return true; } @Override public void onBackPressed() { if (draftNeedsSaving) { if (!mAccount.hasDraftsFolder()) { showDialog(DIALOG_CONFIRM_DISCARD_ON_BACK); } else { showDialog(DIALOG_SAVE_OR_DISCARD_DRAFT_MESSAGE); } } else { // Check if editing an existing draft. if (mDraftId == INVALID_DRAFT_ID) { onDiscard(); } else { super.onBackPressed(); } } } private void showWaitingForAttachmentDialog() { String title; switch (mWaitingForAttachments) { case SEND: { title = getString(R.string.fetching_attachment_dialog_title_send); break; } case SAVE: { title = getString(R.string.fetching_attachment_dialog_title_save); break; } default: { return; } } ProgressDialogFragment fragment = ProgressDialogFragment.newInstance(title, getString(R.string.fetching_attachment_dialog_message)); fragment.show(getFragmentManager(), FRAGMENT_WAITING_FOR_ATTACHMENT); } public void onCancel(ProgressDialogFragment fragment) { attachmentProgressDialogCancelled(); } void attachmentProgressDialogCancelled() { mWaitingForAttachments = WaitingAction.NONE; } private void dismissWaitingForAttachmentDialog() { ProgressDialogFragment fragment = (ProgressDialogFragment) getFragmentManager().findFragmentByTag(FRAGMENT_WAITING_FOR_ATTACHMENT); if (fragment != null) { fragment.dismiss(); } } @Override public Dialog onCreateDialog(int id) { switch (id) { case DIALOG_SAVE_OR_DISCARD_DRAFT_MESSAGE: return new AlertDialog.Builder(this) .setTitle(R.string.save_or_discard_draft_message_dlg_title) .setMessage(R.string.save_or_discard_draft_message_instructions_fmt) .setPositiveButton(R.string.save_draft_action, new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface dialog, int whichButton) { dismissDialog(DIALOG_SAVE_OR_DISCARD_DRAFT_MESSAGE); checkToSaveDraftAndSave(); } }) .setNegativeButton(R.string.discard_action, new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface dialog, int whichButton) { dismissDialog(DIALOG_SAVE_OR_DISCARD_DRAFT_MESSAGE); onDiscard(); } }) .create(); case DIALOG_CONFIRM_DISCARD_ON_BACK: return new AlertDialog.Builder(this) .setTitle(R.string.confirm_discard_draft_message_title) .setMessage(R.string.confirm_discard_draft_message) .setPositiveButton(R.string.cancel_action, new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface dialog, int whichButton) { dismissDialog(DIALOG_CONFIRM_DISCARD_ON_BACK); } }) .setNegativeButton(R.string.discard_action, new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface dialog, int whichButton) { dismissDialog(DIALOG_CONFIRM_DISCARD_ON_BACK); Toast.makeText(MessageCompose.this, getString(R.string.message_discarded_toast), Toast.LENGTH_LONG).show(); onDiscard(); } }) .create(); case DIALOG_CHOOSE_IDENTITY: Context context = new ContextThemeWrapper(this, (K9.getK9Theme() == K9.Theme.LIGHT) ? R.style.Theme_K9_Dialog_Light : R.style.Theme_K9_Dialog_Dark); Builder builder = new AlertDialog.Builder(context); builder.setTitle(R.string.send_as); final IdentityAdapter adapter = new IdentityAdapter(context); builder.setAdapter(adapter, new DialogInterface.OnClickListener() { @Override public void onClick(DialogInterface dialog, int which) { IdentityContainer container = (IdentityContainer) adapter.getItem(which); onAccountChosen(container.account, container.identity); } }); return builder.create(); case DIALOG_CONFIRM_DISCARD: { return new AlertDialog.Builder(this) .setTitle(R.string.dialog_confirm_delete_title) .setMessage(R.string.dialog_confirm_delete_message) .setPositiveButton(R.string.dialog_confirm_delete_confirm_button, new DialogInterface.OnClickListener() { public void onClick(DialogInterface dialog, int which) { onDiscard(); } }) .setNegativeButton(R.string.dialog_confirm_delete_cancel_button, new DialogInterface.OnClickListener() { public void onClick(DialogInterface dialog, int which) { } }) .create(); } } return super.onCreateDialog(id); } /** * Add all attachments of an existing message as if they were added by hand. * * @param part * The message part to check for being an attachment. This method will recurse if it's * a multipart part. * @param depth * The recursion depth. Currently unused. * * @return {@code true} if all attachments were able to be attached, {@code false} otherwise. * * @throws MessagingException * In case of an error */ private boolean loadAttachments(Part part, int depth) throws MessagingException { if (part.getBody() instanceof Multipart) { Multipart mp = (Multipart) part.getBody(); boolean ret = true; for (int i = 0, count = mp.getCount(); i < count; i++) { if (!loadAttachments(mp.getBodyPart(i), depth + 1)) { ret = false; } } return ret; } String contentType = MimeUtility.unfoldAndDecode(part.getContentType()); String name = MimeUtility.getHeaderParameter(contentType, "name"); if (name != null) { if (part instanceof LocalBodyPart) { LocalBodyPart localBodyPart = (LocalBodyPart) part; String accountUuid = localBodyPart.getAccountUuid(); long attachmentId = localBodyPart.getId(); Uri uri = AttachmentProvider.getAttachmentUri(accountUuid, attachmentId); addAttachment(uri); return true; } return false; } return true; } /** * Pull out the parts of the now loaded source message and apply them to the new message * depending on the type of message being composed. * * @param message * The source message used to populate the various text fields. */ private void processSourceMessage(LocalMessage message) { try { switch (mAction) { case REPLY: case REPLY_ALL: { processMessageToReplyTo(message); break; } case FORWARD: { processMessageToForward(message); break; } case EDIT_DRAFT: { processDraftMessage(message); break; } default: { Log.w(K9.LOG_TAG, "processSourceMessage() called with unsupported action"); break; } } } catch (MessagingException me) { /** * Let the user continue composing their message even if we have a problem processing * the source message. Log it as an error, though. */ Log.e(K9.LOG_TAG, "Error while processing source message: ", me); } finally { mSourceMessageProcessed = true; draftNeedsSaving = false; } updateMessageFormat(); } private void processMessageToReplyTo(Message message) throws MessagingException { if (message.getSubject() != null) { final String subject = PREFIX.matcher(message.getSubject()).replaceFirst(""); if (!subject.toLowerCase(Locale.US).startsWith("re:")) { mSubjectView.setText("Re: " + subject); } else { mSubjectView.setText(subject); } } else { mSubjectView.setText(""); } /* * If a reply-to was included with the message use that, otherwise use the from * or sender address. */ recipientPresenter.initFromReplyToMessage(message); if (message.getMessageId() != null && message.getMessageId().length() > 0) { mInReplyTo = message.getMessageId(); String[] refs = message.getReferences(); if (refs != null && refs.length > 0) { mReferences = TextUtils.join("", refs) + " " + mInReplyTo; } else { mReferences = mInReplyTo; } } else { if (K9.DEBUG) { Log.d(K9.LOG_TAG, "could not get Message-ID."); } } // Quote the message and setup the UI. populateUIWithQuotedMessage(mAccount.isDefaultQuotedTextShown()); if (mAction == Action.REPLY || mAction == Action.REPLY_ALL) { Identity useIdentity = IdentityHelper.getRecipientIdentityFromMessage(mAccount, message); Identity defaultIdentity = mAccount.getIdentity(0); if (useIdentity != defaultIdentity) { switchToIdentity(useIdentity); } } } private void processMessageToForward(Message message) throws MessagingException { String subject = message.getSubject(); if (subject != null && !subject.toLowerCase(Locale.US).startsWith("fwd:")) { mSubjectView.setText("Fwd: " + subject); } else { mSubjectView.setText(subject); } mQuoteStyle = QuoteStyle.HEADER; // "Be Like Thunderbird" - on forwarded messages, set the message ID // of the forwarded message in the references and the reply to. TB // only includes ID of the message being forwarded in the reference, // even if there are multiple references. if (!TextUtils.isEmpty(message.getMessageId())) { mInReplyTo = message.getMessageId(); mReferences = mInReplyTo; } else { if (K9.DEBUG) { Log.d(K9.LOG_TAG, "could not get Message-ID."); } } // Quote the message and setup the UI. populateUIWithQuotedMessage(true); if (!mSourceMessageProcessed) { if (message.isSet(Flag.X_DOWNLOADED_PARTIAL) || !loadAttachments(message, 0)) { mHandler.sendEmptyMessage(MSG_SKIPPED_ATTACHMENTS); } } } private void processDraftMessage(LocalMessage message) throws MessagingException { String showQuotedTextMode = "NONE"; mDraftId = MessagingController.getInstance(getApplication()).getId(message); mSubjectView.setText(message.getSubject()); recipientPresenter.initFromDraftMessage(message); // Read In-Reply-To header from draft final String[] inReplyTo = message.getHeader("In-Reply-To"); if (inReplyTo.length >= 1) { mInReplyTo = inReplyTo[0]; } // Read References header from draft final String[] references = message.getHeader("References"); if (references.length >= 1) { mReferences = references[0]; } if (!mSourceMessageProcessed) { loadAttachments(message, 0); } // Decode the identity header when loading a draft. // See buildIdentityHeader(TextBody) for a detailed description of the composition of this blob. Map<IdentityField, String> k9identity = new HashMap<>(); String[] identityHeaders = message.getHeader(K9.IDENTITY_HEADER); if (identityHeaders.length > 0 && identityHeaders[0] != null) { k9identity = IdentityHeaderParser.parse(identityHeaders[0]); } Identity newIdentity = new Identity(); if (k9identity.containsKey(IdentityField.SIGNATURE)) { newIdentity.setSignatureUse(true); newIdentity.setSignature(k9identity.get(IdentityField.SIGNATURE)); mSignatureChanged = true; } else { newIdentity.setSignatureUse(message.getFolder().getSignatureUse()); newIdentity.setSignature(mIdentity.getSignature()); } if (k9identity.containsKey(IdentityField.NAME)) { newIdentity.setName(k9identity.get(IdentityField.NAME)); mIdentityChanged = true; } else { newIdentity.setName(mIdentity.getName()); } if (k9identity.containsKey(IdentityField.EMAIL)) { newIdentity.setEmail(k9identity.get(IdentityField.EMAIL)); mIdentityChanged = true; } else { newIdentity.setEmail(mIdentity.getEmail()); } if (k9identity.containsKey(IdentityField.ORIGINAL_MESSAGE)) { mMessageReference = null; try { String originalMessage = k9identity.get(IdentityField.ORIGINAL_MESSAGE); MessageReference messageReference = new MessageReference(originalMessage); // Check if this is a valid account in our database Preferences prefs = Preferences.getPreferences(getApplicationContext()); Account account = prefs.getAccount(messageReference.getAccountUuid()); if (account != null) { mMessageReference = messageReference; } } catch (MessagingException e) { Log.e(K9.LOG_TAG, "Could not decode message reference in identity.", e); } } int cursorPosition = 0; if (k9identity.containsKey(IdentityField.CURSOR_POSITION)) { try { cursorPosition = Integer.parseInt(k9identity.get(IdentityField.CURSOR_POSITION)); } catch (Exception e) { Log.e(K9.LOG_TAG, "Could not parse cursor position for MessageCompose; continuing.", e); } } if (k9identity.containsKey(IdentityField.QUOTED_TEXT_MODE)) { showQuotedTextMode = k9identity.get(IdentityField.QUOTED_TEXT_MODE); } mIdentity = newIdentity; updateSignature(); updateFrom(); Integer bodyLength = k9identity.get(IdentityField.LENGTH) != null ? Integer.valueOf(k9identity.get(IdentityField.LENGTH)) : 0; Integer bodyOffset = k9identity.get(IdentityField.OFFSET) != null ? Integer.valueOf(k9identity.get(IdentityField.OFFSET)) : 0; Integer bodyFooterOffset = k9identity.get(IdentityField.FOOTER_OFFSET) != null ? Integer.valueOf(k9identity.get(IdentityField.FOOTER_OFFSET)) : null; Integer bodyPlainLength = k9identity.get(IdentityField.PLAIN_LENGTH) != null ? Integer.valueOf(k9identity.get(IdentityField.PLAIN_LENGTH)) : null; Integer bodyPlainOffset = k9identity.get(IdentityField.PLAIN_OFFSET) != null ? Integer.valueOf(k9identity.get(IdentityField.PLAIN_OFFSET)) : null; mQuoteStyle = k9identity.get(IdentityField.QUOTE_STYLE) != null ? QuoteStyle.valueOf(k9identity.get(IdentityField.QUOTE_STYLE)) : mAccount.getQuoteStyle(); QuotedTextMode quotedMode; try { quotedMode = QuotedTextMode.valueOf(showQuotedTextMode); } catch (Exception e) { quotedMode = QuotedTextMode.NONE; } // Always respect the user's current composition format preference, even if the // draft was saved in a different format. // TODO - The current implementation doesn't allow a user in HTML mode to edit a draft that wasn't saved with K9mail. String messageFormatString = k9identity.get(IdentityField.MESSAGE_FORMAT); MessageFormat messageFormat = null; if (messageFormatString != null) { try { messageFormat = MessageFormat.valueOf(messageFormatString); } catch (Exception e) { /* do nothing */ } } if (messageFormat == null) { // This message probably wasn't created by us. The exception is legacy // drafts created before the advent of HTML composition. In those cases, // we'll display the whole message (including the quoted part) in the // composition window. If that's the case, try and convert it to text to // match the behavior in text mode. mMessageContentView.setCharacters(getBodyTextFromMessage(message, SimpleMessageFormat.TEXT)); mForcePlainText = true; showOrHideQuotedText(quotedMode); return; } if (messageFormat == MessageFormat.HTML) { Part part = MimeUtility.findFirstPartByMimeType(message, "text/html"); if (part != null) { // Shouldn't happen if we were the one who saved it. mQuotedTextFormat = SimpleMessageFormat.HTML; String text = MessageExtractor.getTextFromPart(part); if (K9.DEBUG) { Log.d(K9.LOG_TAG, "Loading message with offset " + bodyOffset + ", length " + bodyLength + ". Text length is " + text.length() + "."); } if (bodyOffset + bodyLength > text.length()) { // The draft was edited outside of K-9 Mail? Log.d(K9.LOG_TAG, "The identity field from the draft contains an invalid LENGTH/OFFSET"); bodyOffset = 0; bodyLength = 0; } // Grab our reply text. String bodyText = text.substring(bodyOffset, bodyOffset + bodyLength); mMessageContentView.setCharacters(HtmlConverter.htmlToText(bodyText)); // Regenerate the quoted html without our user content in it. StringBuilder quotedHTML = new StringBuilder(); quotedHTML.append(text.substring(0, bodyOffset)); // stuff before the reply quotedHTML.append(text.substring(bodyOffset + bodyLength)); if (quotedHTML.length() > 0) { mQuotedHtmlContent = new InsertableHtmlContent(); mQuotedHtmlContent.setQuotedContent(quotedHTML); // We don't know if bodyOffset refers to the header or to the footer mQuotedHtmlContent.setHeaderInsertionPoint(bodyOffset); if (bodyFooterOffset != null) { mQuotedHtmlContent.setFooterInsertionPoint(bodyFooterOffset); } else { mQuotedHtmlContent.setFooterInsertionPoint(bodyOffset); } mQuotedHTML.setText(mQuotedHtmlContent.getQuotedContent()); } } if (bodyPlainOffset != null && bodyPlainLength != null) { processSourceMessageText(message, bodyPlainOffset, bodyPlainLength, false); } } else if (messageFormat == MessageFormat.TEXT) { mQuotedTextFormat = SimpleMessageFormat.TEXT; processSourceMessageText(message, bodyOffset, bodyLength, true); } else { Log.e(K9.LOG_TAG, "Unhandled message format."); } // Set the cursor position if we have it. try { mMessageContentView.setSelection(cursorPosition); } catch (Exception e) { Log.e(K9.LOG_TAG, "Could not set cursor position in MessageCompose; ignoring.", e); } showOrHideQuotedText(quotedMode); } /** * Pull out the parts of the now loaded source message and apply them to the new message * depending on the type of message being composed. * @param message Source message * @param bodyOffset Insertion point for reply. * @param bodyLength Length of reply. * @param viewMessageContent Update mMessageContentView or not. * @throws MessagingException */ private void processSourceMessageText(Message message, Integer bodyOffset, Integer bodyLength, boolean viewMessageContent) throws MessagingException { Part textPart = MimeUtility.findFirstPartByMimeType(message, "text/plain"); if (textPart != null) { String text = MessageExtractor.getTextFromPart(textPart); if (K9.DEBUG) { Log.d(K9.LOG_TAG, "Loading message with offset " + bodyOffset + ", length " + bodyLength + ". Text length is " + text.length() + "."); } // If we had a body length (and it was valid), separate the composition from the quoted text // and put them in their respective places in the UI. if (bodyLength > 0) { try { String bodyText = text.substring(bodyOffset, bodyOffset + bodyLength); // Regenerate the quoted text without our user content in it nor added newlines. StringBuilder quotedText = new StringBuilder(); if (bodyOffset == 0 && text.substring(bodyLength, bodyLength + 4).equals("\r\n\r\n")) { // top-posting: ignore two newlines at start of quote quotedText.append(text.substring(bodyLength + 4)); } else if (bodyOffset + bodyLength == text.length() && text.substring(bodyOffset - 2, bodyOffset).equals("\r\n")) { // bottom-posting: ignore newline at end of quote quotedText.append(text.substring(0, bodyOffset - 2)); } else { quotedText.append(text.substring(0, bodyOffset)); // stuff before the reply quotedText.append(text.substring(bodyOffset + bodyLength)); } if (viewMessageContent) { mMessageContentView.setCharacters(bodyText); } mQuotedText.setCharacters(quotedText); } catch (IndexOutOfBoundsException e) { // Invalid bodyOffset or bodyLength. The draft was edited outside of K-9 Mail? Log.d(K9.LOG_TAG, "The identity field from the draft contains an invalid bodyOffset/bodyLength"); if (viewMessageContent) { mMessageContentView.setCharacters(text); } } } else { if (viewMessageContent) { mMessageContentView.setCharacters(text); } } } } // Regexes to check for signature. private static final Pattern DASH_SIGNATURE_PLAIN = Pattern.compile("\r\n-- \r\n.*", Pattern.DOTALL); private static final Pattern DASH_SIGNATURE_HTML = Pattern.compile("(<br( /)?>|\r?\n)-- <br( /)?>", Pattern.CASE_INSENSITIVE); private static final Pattern BLOCKQUOTE_START = Pattern.compile("<blockquote", Pattern.CASE_INSENSITIVE); private static final Pattern BLOCKQUOTE_END = Pattern.compile("</blockquote>", Pattern.CASE_INSENSITIVE); /** * Build and populate the UI with the quoted message. * * @param showQuotedText * {@code true} if the quoted text should be shown, {@code false} otherwise. * * @throws MessagingException */ private void populateUIWithQuotedMessage(boolean showQuotedText) throws MessagingException { MessageFormat origMessageFormat = mAccount.getMessageFormat(); if (mForcePlainText || origMessageFormat == MessageFormat.TEXT) { // Use plain text for the quoted message mQuotedTextFormat = SimpleMessageFormat.TEXT; } else if (origMessageFormat == MessageFormat.AUTO) { // Figure out which message format to use for the quoted text by looking if the source // message contains a text/html part. If it does, we use that. mQuotedTextFormat = (MimeUtility.findFirstPartByMimeType(mSourceMessage, "text/html") == null) ? SimpleMessageFormat.TEXT : SimpleMessageFormat.HTML; } else { mQuotedTextFormat = SimpleMessageFormat.HTML; } // TODO -- I am assuming that mSourceMessageBody will always be a text part. Is this a safe assumption? // Handle the original message in the reply // If we already have mSourceMessageBody, use that. It's pre-populated if we've got crypto going on. String content = (mSourceMessageBody != null) ? mSourceMessageBody : getBodyTextFromMessage(mSourceMessage, mQuotedTextFormat); if (mQuotedTextFormat == SimpleMessageFormat.HTML) { // Strip signature. // closing tags such as </div>, </span>, </table>, </pre> will be cut off. if (mAccount.isStripSignature() && (mAction == Action.REPLY || mAction == Action.REPLY_ALL)) { Matcher dashSignatureHtml = DASH_SIGNATURE_HTML.matcher(content); if (dashSignatureHtml.find()) { Matcher blockquoteStart = BLOCKQUOTE_START.matcher(content); Matcher blockquoteEnd = BLOCKQUOTE_END.matcher(content); List<Integer> start = new ArrayList<>(); List<Integer> end = new ArrayList<>(); while (blockquoteStart.find()) { start.add(blockquoteStart.start()); } while (blockquoteEnd.find()) { end.add(blockquoteEnd.start()); } if (start.size() != end.size()) { Log.d(K9.LOG_TAG, "There are " + start.size() + " <blockquote> tags, but " + end.size() + " </blockquote> tags. Refusing to strip."); } else if (start.size() > 0) { // Ignore quoted signatures in blockquotes. dashSignatureHtml.region(0, start.get(0)); if (dashSignatureHtml.find()) { // before first <blockquote>. content = content.substring(0, dashSignatureHtml.start()); } else { for (int i = 0; i < start.size() - 1; i++) { // within blockquotes. if (end.get(i) < start.get(i + 1)) { dashSignatureHtml.region(end.get(i), start.get(i + 1)); if (dashSignatureHtml.find()) { content = content.substring(0, dashSignatureHtml.start()); break; } } } if (end.get(end.size() - 1) < content.length()) { // after last </blockquote>. dashSignatureHtml.region(end.get(end.size() - 1), content.length()); if (dashSignatureHtml.find()) { content = content.substring(0, dashSignatureHtml.start()); } } } } else { // No blockquotes found. content = content.substring(0, dashSignatureHtml.start()); } } // Fix the stripping off of closing tags if a signature was stripped, // as well as clean up the HTML of the quoted message. HtmlCleaner cleaner = new HtmlCleaner(); CleanerProperties properties = cleaner.getProperties(); // see http://htmlcleaner.sourceforge.net/parameters.php for descriptions properties.setNamespacesAware(false); properties.setAdvancedXmlEscape(false); properties.setOmitXmlDeclaration(true); properties.setOmitDoctypeDeclaration(false); properties.setTranslateSpecialEntities(false); properties.setRecognizeUnicodeChars(false); TagNode node = cleaner.clean(content); SimpleHtmlSerializer htmlSerialized = new SimpleHtmlSerializer(properties); content = htmlSerialized.getAsString(node, "UTF8"); } // Add the HTML reply header to the top of the content. mQuotedHtmlContent = quoteOriginalHtmlMessage(mSourceMessage, content, mQuoteStyle); // Load the message with the reply header. mQuotedHTML.setText(mQuotedHtmlContent.getQuotedContent()); // TODO: Also strip the signature from the text/plain part mQuotedText.setCharacters(quoteOriginalTextMessage(mSourceMessage, getBodyTextFromMessage(mSourceMessage, SimpleMessageFormat.TEXT), mQuoteStyle)); } else if (mQuotedTextFormat == SimpleMessageFormat.TEXT) { if (mAccount.isStripSignature() && (mAction == Action.REPLY || mAction == Action.REPLY_ALL)) { if (DASH_SIGNATURE_PLAIN.matcher(content).find()) { content = DASH_SIGNATURE_PLAIN.matcher(content).replaceFirst("\r\n"); } } mQuotedText.setCharacters(quoteOriginalTextMessage(mSourceMessage, content, mQuoteStyle)); } if (showQuotedText) { showOrHideQuotedText(QuotedTextMode.SHOW); } else { showOrHideQuotedText(QuotedTextMode.HIDE); } } /** * Fetch the body text from a message in the desired message format. This method handles * conversions between formats (html to text and vice versa) if necessary. * @param message Message to analyze for body part. * @param format Desired format. * @return Text in desired format. * @throws MessagingException */ private String getBodyTextFromMessage(final Message message, final SimpleMessageFormat format) throws MessagingException { Part part; if (format == SimpleMessageFormat.HTML) { // HTML takes precedence, then text. part = MimeUtility.findFirstPartByMimeType(message, "text/html"); if (part != null) { if (K9.DEBUG) { Log.d(K9.LOG_TAG, "getBodyTextFromMessage: HTML requested, HTML found."); } return MessageExtractor.getTextFromPart(part); } part = MimeUtility.findFirstPartByMimeType(message, "text/plain"); if (part != null) { if (K9.DEBUG) { Log.d(K9.LOG_TAG, "getBodyTextFromMessage: HTML requested, text found."); } String text = MessageExtractor.getTextFromPart(part); return HtmlConverter.textToHtml(text); } } else if (format == SimpleMessageFormat.TEXT) { // Text takes precedence, then html. part = MimeUtility.findFirstPartByMimeType(message, "text/plain"); if (part != null) { if (K9.DEBUG) { Log.d(K9.LOG_TAG, "getBodyTextFromMessage: Text requested, text found."); } return MessageExtractor.getTextFromPart(part); } part = MimeUtility.findFirstPartByMimeType(message, "text/html"); if (part != null) { if (K9.DEBUG) { Log.d(K9.LOG_TAG, "getBodyTextFromMessage: Text requested, HTML found."); } String text = MessageExtractor.getTextFromPart(part); return HtmlConverter.htmlToText(text); } } // If we had nothing interesting, return an empty string. return ""; } // Regular expressions to look for various HTML tags. This is no HTML::Parser, but hopefully it's good enough for // our purposes. private static final Pattern FIND_INSERTION_POINT_HTML = Pattern.compile("(?si:.*?(<html(?:>|\\s+[^>]*>)).*)"); private static final Pattern FIND_INSERTION_POINT_HEAD = Pattern.compile("(?si:.*?(<head(?:>|\\s+[^>]*>)).*)"); private static final Pattern FIND_INSERTION_POINT_BODY = Pattern.compile("(?si:.*?(<body(?:>|\\s+[^>]*>)).*)"); private static final Pattern FIND_INSERTION_POINT_HTML_END = Pattern.compile("(?si:.*(</html>).*?)"); private static final Pattern FIND_INSERTION_POINT_BODY_END = Pattern.compile("(?si:.*(</body>).*?)"); // The first group in a Matcher contains the first capture group. We capture the tag found in the above REs so that // we can locate the *end* of that tag. private static final int FIND_INSERTION_POINT_FIRST_GROUP = 1; // HTML bits to insert as appropriate // TODO is it safe to assume utf-8 here? private static final String FIND_INSERTION_POINT_HTML_CONTENT = "<!DOCTYPE html PUBLIC \"-//W3C//DTD HTML 4.0 Transitional//EN\">\r\n<html>"; private static final String FIND_INSERTION_POINT_HTML_END_CONTENT = "</html>"; private static final String FIND_INSERTION_POINT_HEAD_CONTENT = "<head><meta content=\"text/html; charset=utf-8\" http-equiv=\"Content-Type\"></head>"; // Index of the start of the beginning of a String. private static final int FIND_INSERTION_POINT_START_OF_STRING = 0; /** * <p>Find the start and end positions of the HTML in the string. This should be the very top * and bottom of the displayable message. It returns a {@link InsertableHtmlContent}, which * contains both the insertion points and potentially modified HTML. The modified HTML should be * used in place of the HTML in the original message.</p> * * <p>This method loosely mimics the HTML forward/reply behavior of BlackBerry OS 4.5/BIS 2.5, which in turn mimics * Outlook 2003 (as best I can tell).</p> * * @param content Content to examine for HTML insertion points * @return Insertion points and HTML to use for insertion. */ private InsertableHtmlContent findInsertionPoints(final String content) { InsertableHtmlContent insertable = new InsertableHtmlContent(); // If there is no content, don't bother doing any of the regex dancing. if (content == null || content.equals("")) { return insertable; } // Search for opening tags. boolean hasHtmlTag = false; boolean hasHeadTag = false; boolean hasBodyTag = false; // First see if we have an opening HTML tag. If we don't find one, we'll add one later. Matcher htmlMatcher = FIND_INSERTION_POINT_HTML.matcher(content); if (htmlMatcher.matches()) { hasHtmlTag = true; } // Look for a HEAD tag. If we're missing a BODY tag, we'll use the close of the HEAD to start our content. Matcher headMatcher = FIND_INSERTION_POINT_HEAD.matcher(content); if (headMatcher.matches()) { hasHeadTag = true; } // Look for a BODY tag. This is the ideal place for us to start our content. Matcher bodyMatcher = FIND_INSERTION_POINT_BODY.matcher(content); if (bodyMatcher.matches()) { hasBodyTag = true; } if (K9.DEBUG) { Log.d(K9.LOG_TAG, "Open: hasHtmlTag:" + hasHtmlTag + " hasHeadTag:" + hasHeadTag + " hasBodyTag:" + hasBodyTag); } // Given our inspections, let's figure out where to start our content. // This is the ideal case -- there's a BODY tag and we insert ourselves just after it. if (hasBodyTag) { insertable.setQuotedContent(new StringBuilder(content)); insertable.setHeaderInsertionPoint(bodyMatcher.end(FIND_INSERTION_POINT_FIRST_GROUP)); } else if (hasHeadTag) { // Now search for a HEAD tag. We can insert after there. // If BlackBerry sees a HEAD tag, it inserts right after that, so long as there is no BODY tag. It doesn't // try to add BODY, either. Right or wrong, it seems to work fine. insertable.setQuotedContent(new StringBuilder(content)); insertable.setHeaderInsertionPoint(headMatcher.end(FIND_INSERTION_POINT_FIRST_GROUP)); } else if (hasHtmlTag) { // Lastly, check for an HTML tag. // In this case, it will add a HEAD, but no BODY. StringBuilder newContent = new StringBuilder(content); // Insert the HEAD content just after the HTML tag. newContent.insert(htmlMatcher.end(FIND_INSERTION_POINT_FIRST_GROUP), FIND_INSERTION_POINT_HEAD_CONTENT); insertable.setQuotedContent(newContent); // The new insertion point is the end of the HTML tag, plus the length of the HEAD content. insertable.setHeaderInsertionPoint(htmlMatcher.end(FIND_INSERTION_POINT_FIRST_GROUP) + FIND_INSERTION_POINT_HEAD_CONTENT.length()); } else { // If we have none of the above, we probably have a fragment of HTML. Yahoo! and Gmail both do this. // Again, we add a HEAD, but not BODY. StringBuilder newContent = new StringBuilder(content); // Add the HTML and HEAD tags. newContent.insert(FIND_INSERTION_POINT_START_OF_STRING, FIND_INSERTION_POINT_HEAD_CONTENT); newContent.insert(FIND_INSERTION_POINT_START_OF_STRING, FIND_INSERTION_POINT_HTML_CONTENT); // Append the </HTML> tag. newContent.append(FIND_INSERTION_POINT_HTML_END_CONTENT); insertable.setQuotedContent(newContent); insertable.setHeaderInsertionPoint(FIND_INSERTION_POINT_HTML_CONTENT.length() + FIND_INSERTION_POINT_HEAD_CONTENT.length()); } // Search for closing tags. We have to do this after we deal with opening tags since it may // have modified the message. boolean hasHtmlEndTag = false; boolean hasBodyEndTag = false; // First see if we have an opening HTML tag. If we don't find one, we'll add one later. Matcher htmlEndMatcher = FIND_INSERTION_POINT_HTML_END.matcher(insertable.getQuotedContent()); if (htmlEndMatcher.matches()) { hasHtmlEndTag = true; } // Look for a BODY tag. This is the ideal place for us to place our footer. Matcher bodyEndMatcher = FIND_INSERTION_POINT_BODY_END.matcher(insertable.getQuotedContent()); if (bodyEndMatcher.matches()) { hasBodyEndTag = true; } if (K9.DEBUG) { Log.d(K9.LOG_TAG, "Close: hasHtmlEndTag:" + hasHtmlEndTag + " hasBodyEndTag:" + hasBodyEndTag); } // Now figure out where to put our footer. // This is the ideal case -- there's a BODY tag and we insert ourselves just before it. if (hasBodyEndTag) { insertable.setFooterInsertionPoint(bodyEndMatcher.start(FIND_INSERTION_POINT_FIRST_GROUP)); } else if (hasHtmlEndTag) { // Check for an HTML tag. Add ourselves just before it. insertable.setFooterInsertionPoint(htmlEndMatcher.start(FIND_INSERTION_POINT_FIRST_GROUP)); } else { // If we have none of the above, we probably have a fragment of HTML. // Set our footer insertion point as the end of the string. insertable.setFooterInsertionPoint(insertable.getQuotedContent().length()); } return insertable; } static class SendMessageTask extends AsyncTask<Void, Void, Void> { Context context; Account account; Contacts contacts; Message message; Long draftId; SendMessageTask(Context context, Account account, Contacts contacts, Message message, Long draftId) { this.context = context; this.account = account; this.contacts = contacts; this.message = message; this.draftId = draftId; } @Override protected Void doInBackground(Void... params) { try { contacts.markAsContacted(message.getRecipients(RecipientType.TO)); contacts.markAsContacted(message.getRecipients(RecipientType.CC)); contacts.markAsContacted(message.getRecipients(RecipientType.BCC)); } catch (Exception e) { Log.e(K9.LOG_TAG, "Failed to mark contact as contacted.", e); } MessagingController.getInstance(context).sendMessage(account, message, null); if (draftId != null) { // TODO set draft id to invalid in MessageCompose! MessagingController.getInstance(context).deleteDraft(account, draftId); } return null; } } class Listener extends MessagingListener { @Override public void loadMessageForViewStarted(Account account, String folder, String uid) { if ((mMessageReference == null) || !mMessageReference.getUid().equals(uid)) { return; } mHandler.sendEmptyMessage(MSG_PROGRESS_ON); } @Override public void loadMessageForViewFinished(Account account, String folder, String uid, LocalMessage message) { if ((mMessageReference == null) || !mMessageReference.getUid().equals(uid)) { return; } mHandler.sendEmptyMessage(MSG_PROGRESS_OFF); } @Override public void loadMessageForViewBodyAvailable(Account account, String folder, String uid, final Message message) { if ((mMessageReference == null) || !mMessageReference.getUid().equals(uid)) { return; } mSourceMessage = message; runOnUiThread(new Runnable() { @Override public void run() { loadLocalMessageForDisplay((LocalMessage) message); } }); } @Override public void loadMessageForViewFailed(Account account, String folder, String uid, Throwable t) { if ((mMessageReference == null) || !mMessageReference.getUid().equals(uid)) { return; } mHandler.sendEmptyMessage(MSG_PROGRESS_OFF); // TODO show network error } @Override public void messageUidChanged(Account account, String folder, String oldUid, String newUid) { // Track UID changes of the source message if (mMessageReference != null) { final Account sourceAccount = Preferences.getPreferences(MessageCompose.this).getAccount(mMessageReference.getAccountUuid()); final String sourceFolder = mMessageReference.getFolderName(); final String sourceMessageUid = mMessageReference.getUid(); if (account.equals(sourceAccount) && (folder.equals(sourceFolder))) { if (oldUid.equals(sourceMessageUid)) { mMessageReference = mMessageReference.withModifiedUid(newUid); } if ((mSourceMessage != null) && (oldUid.equals(mSourceMessage.getUid()))) { mSourceMessage.setUid(newUid); } } } } } private void loadLocalMessageForDisplay(LocalMessage message) { // We check to see if we've previously processed the source message since this // could be called when switching from HTML to text replies. If that happens, we // only want to update the UI with quoted text (which picks the appropriate // part). if (mSourceProcessed) { try { populateUIWithQuotedMessage(true); } catch (MessagingException e) { // Hm, if we couldn't populate the UI after source reprocessing, let's just delete it? showOrHideQuotedText(QuotedTextMode.HIDE); Log.e(K9.LOG_TAG, "Could not re-process source message; deleting quoted text to be safe.", e); } updateMessageFormat(); } else { processSourceMessage(message); mSourceProcessed = true; } } /** * When we are launched with an intent that includes a mailto: URI, we can actually * gather quite a few of our message fields from it. * * @param mailTo * The MailTo object we use to initialize message field */ private void initializeFromMailto(MailTo mailTo) { recipientPresenter.initFromMailto(mailTo); String subject = mailTo.getSubject(); if (subject != null && !subject.isEmpty()) { mSubjectView.setText(subject); } String body = mailTo.getBody(); if (body != null && !subject.isEmpty()) { mMessageContentView.setCharacters(body); } } private static class SaveMessageTask extends AsyncTask<Void, Void, Void> { Context context; Account account; Contacts contacts; Handler handler; Message message; long draftId; boolean saveRemotely; SaveMessageTask(Context context, Account account, Contacts contacts, Handler handler, Message message, long draftId, boolean saveRemotely) { this.context = context; this.account = account; this.contacts = contacts; this.handler = handler; this.message = message; this.draftId = draftId; this.saveRemotely = saveRemotely; } @Override protected Void doInBackground(Void... params) { final MessagingController messagingController = MessagingController.getInstance(context); Message draftMessage = messagingController.saveDraft(account, message, draftId, saveRemotely); draftId = messagingController.getId(draftMessage); android.os.Message msg = android.os.Message.obtain(handler, MSG_SAVED_DRAFT, draftId); handler.sendMessage(msg); return null; } } private static final int REPLY_WRAP_LINE_WIDTH = 72; private static final int QUOTE_BUFFER_LENGTH = 512; // amount of extra buffer to allocate to accommodate quoting headers or prefixes /** * Add quoting markup to a text message. * @param originalMessage Metadata for message being quoted. * @param messageBody Text of the message to be quoted. * @param quoteStyle Style of quoting. * @return Quoted text. * @throws MessagingException */ private String quoteOriginalTextMessage(final Message originalMessage, final String messageBody, final QuoteStyle quoteStyle) throws MessagingException { String body = messageBody == null ? "" : messageBody; String sentDate = getSentDateText(originalMessage); if (quoteStyle == QuoteStyle.PREFIX) { StringBuilder quotedText = new StringBuilder(body.length() + QUOTE_BUFFER_LENGTH); if (sentDate.length() != 0) { quotedText.append(String.format( getString(R.string.message_compose_reply_header_fmt_with_date) + "\r\n", sentDate, Address.toString(originalMessage.getFrom()))); } else { quotedText.append(String.format( getString(R.string.message_compose_reply_header_fmt) + "\r\n", Address.toString(originalMessage.getFrom())) ); } final String prefix = mAccount.getQuotePrefix(); final String wrappedText = Utility.wrap(body, REPLY_WRAP_LINE_WIDTH - prefix.length()); // "$" and "\" in the quote prefix have to be escaped for // the replaceAll() invocation. final String escapedPrefix = prefix.replaceAll("(\\\\|\\$)", "\\\\$1"); quotedText.append(wrappedText.replaceAll("(?m)^", escapedPrefix)); return quotedText.toString().replaceAll("\\\r", ""); } else if (quoteStyle == QuoteStyle.HEADER) { StringBuilder quotedText = new StringBuilder(body.length() + QUOTE_BUFFER_LENGTH); quotedText.append("\r\n"); quotedText.append(getString(R.string.message_compose_quote_header_separator)).append("\r\n"); if (originalMessage.getFrom() != null && Address.toString(originalMessage.getFrom()).length() != 0) { quotedText.append(getString(R.string.message_compose_quote_header_from)).append(" ").append(Address.toString(originalMessage.getFrom())).append("\r\n"); } if (sentDate.length() != 0) { quotedText.append(getString(R.string.message_compose_quote_header_send_date)).append(" ").append(sentDate).append("\r\n"); } if (originalMessage.getRecipients(RecipientType.TO) != null && originalMessage.getRecipients(RecipientType.TO).length != 0) { quotedText.append(getString(R.string.message_compose_quote_header_to)).append(" ").append(Address.toString(originalMessage.getRecipients(RecipientType.TO))).append("\r\n"); } if (originalMessage.getRecipients(RecipientType.CC) != null && originalMessage.getRecipients(RecipientType.CC).length != 0) { quotedText.append(getString(R.string.message_compose_quote_header_cc)).append(" ").append(Address.toString(originalMessage.getRecipients(RecipientType.CC))).append("\r\n"); } if (originalMessage.getSubject() != null) { quotedText.append(getString(R.string.message_compose_quote_header_subject)).append(" ").append(originalMessage.getSubject()).append("\r\n"); } quotedText.append("\r\n"); quotedText.append(body); return quotedText.toString(); } else { // Shouldn't ever happen. return body; } } /** * Add quoting markup to a HTML message. * @param originalMessage Metadata for message being quoted. * @param messageBody Text of the message to be quoted. * @param quoteStyle Style of quoting. * @return Modified insertable message. * @throws MessagingException */ private InsertableHtmlContent quoteOriginalHtmlMessage(final Message originalMessage, final String messageBody, final QuoteStyle quoteStyle) throws MessagingException { InsertableHtmlContent insertable = findInsertionPoints(messageBody); String sentDate = getSentDateText(originalMessage); if (quoteStyle == QuoteStyle.PREFIX) { StringBuilder header = new StringBuilder(QUOTE_BUFFER_LENGTH); header.append("<div class=\"gmail_quote\">"); if (sentDate.length() != 0) { header.append(HtmlConverter.textToHtmlFragment(String.format( getString(R.string.message_compose_reply_header_fmt_with_date), sentDate, Address.toString(originalMessage.getFrom())) )); } else { header.append(HtmlConverter.textToHtmlFragment(String.format( getString(R.string.message_compose_reply_header_fmt), Address.toString(originalMessage.getFrom())) )); } header.append("<blockquote class=\"gmail_quote\" " + "style=\"margin: 0pt 0pt 0pt 0.8ex; border-left: 1px solid rgb(204, 204, 204); padding-left: 1ex;\">\r\n"); String footer = "</blockquote></div>"; insertable.insertIntoQuotedHeader(header.toString()); insertable.insertIntoQuotedFooter(footer); } else if (quoteStyle == QuoteStyle.HEADER) { StringBuilder header = new StringBuilder(); header.append("<div style='font-size:10.0pt;font-family:\"Tahoma\",\"sans-serif\";padding:3.0pt 0in 0in 0in'>\r\n"); header.append("<hr style='border:none;border-top:solid #E1E1E1 1.0pt'>\r\n"); // This gets converted into a horizontal line during html to text conversion. if (originalMessage.getFrom() != null && Address.toString(originalMessage.getFrom()).length() != 0) { header.append("<b>").append(getString(R.string.message_compose_quote_header_from)).append("</b> ") .append(HtmlConverter.textToHtmlFragment(Address.toString(originalMessage.getFrom()))) .append("<br>\r\n"); } if (sentDate.length() != 0) { header.append("<b>").append(getString(R.string.message_compose_quote_header_send_date)).append("</b> ") .append(sentDate) .append("<br>\r\n"); } if (originalMessage.getRecipients(RecipientType.TO) != null && originalMessage.getRecipients(RecipientType.TO).length != 0) { header.append("<b>").append(getString(R.string.message_compose_quote_header_to)).append("</b> ") .append(HtmlConverter.textToHtmlFragment(Address.toString(originalMessage.getRecipients(RecipientType.TO)))) .append("<br>\r\n"); } if (originalMessage.getRecipients(RecipientType.CC) != null && originalMessage.getRecipients(RecipientType.CC).length != 0) { header.append("<b>").append(getString(R.string.message_compose_quote_header_cc)).append("</b> ") .append(HtmlConverter.textToHtmlFragment(Address.toString(originalMessage.getRecipients(RecipientType.CC)))) .append("<br>\r\n"); } if (originalMessage.getSubject() != null) { header.append("<b>").append(getString(R.string.message_compose_quote_header_subject)).append("</b> ") .append(HtmlConverter.textToHtmlFragment(originalMessage.getSubject())) .append("<br>\r\n"); } header.append("</div>\r\n"); header.append("<br>\r\n"); insertable.insertIntoQuotedHeader(header.toString()); } return insertable; } /** * Used to store an {@link Identity} instance together with the {@link Account} it belongs to. * * @see IdentityAdapter */ static class IdentityContainer { public final Identity identity; public final Account account; IdentityContainer(Identity identity, Account account) { this.identity = identity; this.account = account; } } /** * Adapter for the <em>Choose identity</em> list view. * * <p> * Account names are displayed as section headers, identities as selectable list items. * </p> */ static class IdentityAdapter extends BaseAdapter { private LayoutInflater mLayoutInflater; private List<Object> mItems; public IdentityAdapter(Context context) { mLayoutInflater = (LayoutInflater) context.getSystemService( Context.LAYOUT_INFLATER_SERVICE); List<Object> items = new ArrayList<>(); Preferences prefs = Preferences.getPreferences(context.getApplicationContext()); Collection<Account> accounts = prefs.getAvailableAccounts(); for (Account account : accounts) { items.add(account); List<Identity> identities = account.getIdentities(); for (Identity identity : identities) { items.add(new IdentityContainer(identity, account)); } } mItems = items; } @Override public int getCount() { return mItems.size(); } @Override public int getViewTypeCount() { return 2; } @Override public int getItemViewType(int position) { return (mItems.get(position) instanceof Account) ? 0 : 1; } @Override public boolean isEnabled(int position) { return (mItems.get(position) instanceof IdentityContainer); } @Override public Object getItem(int position) { return mItems.get(position); } @Override public long getItemId(int position) { return position; } @Override public boolean hasStableIds() { return false; } @Override public View getView(int position, View convertView, ViewGroup parent) { Object item = mItems.get(position); View view = null; if (item instanceof Account) { if (convertView != null && convertView.getTag() instanceof AccountHolder) { view = convertView; } else { view = mLayoutInflater.inflate(R.layout.choose_account_item, parent, false); AccountHolder holder = new AccountHolder(); holder.name = (TextView) view.findViewById(R.id.name); holder.chip = view.findViewById(R.id.chip); view.setTag(holder); } Account account = (Account) item; AccountHolder holder = (AccountHolder) view.getTag(); holder.name.setText(account.getDescription()); holder.chip.setBackgroundColor(account.getChipColor()); } else if (item instanceof IdentityContainer) { if (convertView != null && convertView.getTag() instanceof IdentityHolder) { view = convertView; } else { view = mLayoutInflater.inflate(R.layout.choose_identity_item, parent, false); IdentityHolder holder = new IdentityHolder(); holder.name = (TextView) view.findViewById(R.id.name); holder.description = (TextView) view.findViewById(R.id.description); view.setTag(holder); } IdentityContainer identityContainer = (IdentityContainer) item; Identity identity = identityContainer.identity; IdentityHolder holder = (IdentityHolder) view.getTag(); holder.name.setText(identity.getDescription()); holder.description.setText(getIdentityDescription(identity)); } return view; } static class AccountHolder { public TextView name; public View chip; } static class IdentityHolder { public TextView name; public TextView description; } } private static String getIdentityDescription(Identity identity) { return String.format("%s <%s>", identity.getName(), identity.getEmail()); } private void setMessageFormat(SimpleMessageFormat format) { // This method will later be used to enable/disable the rich text editing mode. mMessageFormat = format; } private void updateMessageFormat() { MessageFormat origMessageFormat = mAccount.getMessageFormat(); SimpleMessageFormat messageFormat; if (origMessageFormat == MessageFormat.TEXT) { // The user wants to send text/plain messages. We don't override that choice under // any circumstances. messageFormat = SimpleMessageFormat.TEXT; } else if (mForcePlainText && includeQuotedText()) { // Right now we send a text/plain-only message when the quoted text was edited, no // matter what the user selected for the message format. messageFormat = SimpleMessageFormat.TEXT; } else if (recipientPresenter.isForceTextMessageFormat()) { // Right now we only support PGP inline which doesn't play well with HTML. So force // plain text in those cases. messageFormat = SimpleMessageFormat.TEXT; } else if (origMessageFormat == MessageFormat.AUTO) { if (mAction == Action.COMPOSE || mQuotedTextFormat == SimpleMessageFormat.TEXT || !includeQuotedText()) { // If the message format is set to "AUTO" we use text/plain whenever possible. That // is, when composing new messages and replying to or forwarding text/plain // messages. messageFormat = SimpleMessageFormat.TEXT; } else { messageFormat = SimpleMessageFormat.HTML; } } else { // In all other cases use HTML messageFormat = SimpleMessageFormat.HTML; } setMessageFormat(messageFormat); } private boolean includeQuotedText() { return (mQuotedTextMode == QuotedTextMode.SHOW); } /** * Extract the date from a message and convert it into a locale-specific * date string suitable for use in a header for a quoted message. * * @return A string with the formatted date/time */ private String getSentDateText(Message message) { try { final int dateStyle = DateFormat.LONG; final int timeStyle = DateFormat.LONG; Date date = message.getSentDate(); Locale locale = getResources().getConfiguration().locale; return DateFormat.getDateTimeInstance(dateStyle, timeStyle, locale) .format(date); } catch (Exception e) { return ""; } } private boolean isCryptoProviderEnabled() { return mOpenPgpProvider != null; } @Override public void onMessageBuildSuccess(MimeMessage message, boolean isDraft) { if (isDraft) { draftNeedsSaving = false; currentMessageBuilder = null; if (mAction == Action.EDIT_DRAFT && mMessageReference != null) { message.setUid(mMessageReference.getUid()); } boolean saveRemotely = recipientPresenter.isAllowSavingDraftRemotely(); new SaveMessageTask(getApplicationContext(), mAccount, mContacts, mHandler, message, mDraftId, saveRemotely).execute(); if (mFinishAfterDraftSaved) { finish(); } else { setProgressBarIndeterminateVisibility(false); } } else { currentMessageBuilder = null; new SendMessageTask(getApplicationContext(), mAccount, mContacts, message, mDraftId != INVALID_DRAFT_ID ? mDraftId : null).execute(); finish(); } } @Override public void onMessageBuildCancel() { currentMessageBuilder = null; setProgressBarIndeterminateVisibility(false); } @Override public void onMessageBuildException(MessagingException me) { Log.e(K9.LOG_TAG, "Error sending message", me); Toast.makeText(MessageCompose.this, getString(R.string.send_aborted, me.getLocalizedMessage()), Toast.LENGTH_LONG).show(); currentMessageBuilder = null; setProgressBarIndeterminateVisibility(false); } @Override public void onMessageBuildReturnPendingIntent(PendingIntent pendingIntent, int requestCode) { requestCode |= REQUEST_MASK_MESSAGE_BUILDER; try { startIntentSenderForResult(pendingIntent.getIntentSender(), requestCode, null, 0, 0, 0); } catch (SendIntentException e) { Log.e(K9.LOG_TAG, "Error starting pending intent from builder!", e); } } public void launchUserInteractionPendingIntent(PendingIntent pendingIntent, int requestCode) { requestCode |= REQUEST_MASK_RECIPIENT_PRESENTER; try { startIntentSenderForResult(pendingIntent.getIntentSender(), requestCode, null, 0, 0, 0); } catch (SendIntentException e) { e.printStackTrace(); } } }
1
13,429
Please restore the empty line after the group of `STATE_*` constants. This visual separation makes the code easier to read.
k9mail-k-9
java
@@ -213,9 +213,10 @@ module.exports = class GoldenRetriever extends Plugin { this.IndexedDBStore.delete(fileID) }) - this.core.on('core:success', (fileIDs) => { + this.core.on('core:complete', ({ successful }) => { + const fileIDs = successful.map((file) => file.id) this.deleteBlobs(fileIDs).then(() => { - this.core.log(`[GoldenRetriever] removed ${fileIDs.length} files that finished uploading`) + this.core.log(`RestoreFiles: removed ${successful.length} files that finished uploading`) }) })
1
const Plugin = require('../Plugin') const ServiceWorkerStore = require('./ServiceWorkerStore') const IndexedDBStore = require('./IndexedDBStore') const MetaDataStore = require('./MetaDataStore') /** * The Golden Retriever plugin — restores selected files and resumes uploads * after a closed tab or a browser crash! * * Uses localStorage, IndexedDB and ServiceWorker to do its magic, read more: * https://uppy.io/blog/2017/07/golden-retriever/ */ module.exports = class GoldenRetriever extends Plugin { constructor (core, opts) { super(core, opts) this.type = 'debugger' this.id = 'GoldenRetriever' this.title = 'Restore Files' const defaultOptions = { expires: 24 * 60 * 60 * 1000, // 24 hours serviceWorker: false } this.opts = Object.assign({}, defaultOptions, opts) this.MetaDataStore = new MetaDataStore({ expires: this.opts.expires, storeName: core.getID() }) this.ServiceWorkerStore = null if (this.opts.serviceWorker) { this.ServiceWorkerStore = new ServiceWorkerStore({ storeName: core.getID() }) } this.IndexedDBStore = new IndexedDBStore(Object.assign( { expires: this.opts.expires }, opts.indexedDB || {}, { storeName: core.getID() })) this.saveFilesStateToLocalStorage = this.saveFilesStateToLocalStorage.bind(this) this.loadFilesStateFromLocalStorage = this.loadFilesStateFromLocalStorage.bind(this) this.loadFileBlobsFromServiceWorker = this.loadFileBlobsFromServiceWorker.bind(this) this.loadFileBlobsFromIndexedDB = this.loadFileBlobsFromIndexedDB.bind(this) this.onBlobsLoaded = this.onBlobsLoaded.bind(this) } loadFilesStateFromLocalStorage () { const savedState = this.MetaDataStore.load() if (savedState) { this.core.log('Recovered some state from Local Storage') this.core.setState(savedState) } } /** * Get file objects that are currently waiting: they've been selected, * but aren't yet being uploaded. */ getWaitingFiles () { const waitingFiles = {} const allFiles = this.core.state.files Object.keys(allFiles).forEach((fileID) => { const file = this.core.getFile(fileID) if (!file.progress || !file.progress.uploadStarted) { waitingFiles[fileID] = file } }) return waitingFiles } /** * Get file objects that are currently being uploaded. If a file has finished * uploading, but the other files in the same batch have not, the finished * file is also returned. */ getUploadingFiles () { const uploadingFiles = {} const { currentUploads } = this.core.state if (currentUploads) { const uploadIDs = Object.keys(currentUploads) uploadIDs.forEach((uploadID) => { const filesInUpload = currentUploads[uploadID].fileIDs filesInUpload.forEach((fileID) => { uploadingFiles[fileID] = this.core.getFile(fileID) }) }) } return uploadingFiles } saveFilesStateToLocalStorage () { const filesToSave = Object.assign( this.getWaitingFiles(), this.getUploadingFiles() ) this.MetaDataStore.save({ currentUploads: this.core.state.currentUploads, files: filesToSave }) } loadFileBlobsFromServiceWorker () { this.ServiceWorkerStore.list().then((blobs) => { const numberOfFilesRecovered = Object.keys(blobs).length const numberOfFilesTryingToRecover = Object.keys(this.core.state.files).length if (numberOfFilesRecovered === numberOfFilesTryingToRecover) { this.core.log(`Successfully recovered ${numberOfFilesRecovered} blobs from Service Worker!`) this.core.info(`Successfully recovered ${numberOfFilesRecovered} files`, 'success', 3000) this.onBlobsLoaded(blobs) } else { this.core.log('Failed to recover blobs from Service Worker, trying IndexedDB now...') this.loadFileBlobsFromIndexedDB() } }) } loadFileBlobsFromIndexedDB () { this.IndexedDBStore.list().then((blobs) => { const numberOfFilesRecovered = Object.keys(blobs).length if (numberOfFilesRecovered > 0) { this.core.log(`Successfully recovered ${numberOfFilesRecovered} blobs from Indexed DB!`) this.core.info(`Successfully recovered ${numberOfFilesRecovered} files`, 'success', 3000) return this.onBlobsLoaded(blobs) } this.core.log('Couldn’t recover anything from IndexedDB :(') }) } onBlobsLoaded (blobs) { const obsoleteBlobs = [] const updatedFiles = Object.assign({}, this.core.state.files) Object.keys(blobs).forEach((fileID) => { const originalFile = this.core.getFile(fileID) if (!originalFile) { obsoleteBlobs.push(fileID) return } const cachedData = blobs[fileID] const updatedFileData = { data: cachedData, isRestored: true } const updatedFile = Object.assign({}, originalFile, updatedFileData) updatedFiles[fileID] = updatedFile this.core.generatePreview(updatedFile) }) this.core.setState({ files: updatedFiles }) this.core.emit('core:restored') if (obsoleteBlobs.length) { this.deleteBlobs(obsoleteBlobs).then(() => { this.core.log(`[GoldenRetriever] cleaned up ${obsoleteBlobs.length} old files`) }) } } deleteBlobs (fileIDs) { const promises = [] fileIDs.forEach((id) => { if (this.ServiceWorkerStore) { promises.push(this.ServiceWorkerStore.delete(id)) } if (this.IndexedDBStore) { promises.push(this.IndexedDBStore.delete(id)) } }) return Promise.all(promises) } install () { this.loadFilesStateFromLocalStorage() if (Object.keys(this.core.state.files).length > 0) { if (this.ServiceWorkerStore) { this.core.log('Attempting to load files from Service Worker...') this.loadFileBlobsFromServiceWorker() } else { this.core.log('Attempting to load files from Indexed DB...') this.loadFileBlobsFromIndexedDB() } } this.core.on('core:file-added', (file) => { if (file.isRemote) return if (this.ServiceWorkerStore) { this.ServiceWorkerStore.put(file).catch((err) => { this.core.log('Could not store file', 'error') this.core.log(err) }) } this.IndexedDBStore.put(file).catch((err) => { this.core.log('Could not store file', 'error') this.core.log(err) }) }) this.core.on('core:file-removed', (fileID) => { if (this.ServiceWorkerStore) this.ServiceWorkerStore.delete(fileID) this.IndexedDBStore.delete(fileID) }) this.core.on('core:success', (fileIDs) => { this.deleteBlobs(fileIDs).then(() => { this.core.log(`[GoldenRetriever] removed ${fileIDs.length} files that finished uploading`) }) }) this.core.on('core:state-update', this.saveFilesStateToLocalStorage) this.core.on('core:restored', () => { // start all uploads again when file blobs are restored const { currentUploads } = this.core.getState() if (currentUploads) { Object.keys(currentUploads).forEach((uploadId) => { this.core.restore(uploadId, currentUploads[uploadId]) }) } }) } }
1
10,113
Unsure why this is `RestoreFiles` now vs `GoldenRetriever`, thought we deprecated the first?
transloadit-uppy
js
@@ -436,6 +436,10 @@ namespace pwiz.Skyline.Controls.Graphs { // Match each file status with a progress control. bool first = true; + var width = flowFileStatus.Width - 2 - + (flowFileStatus.VerticalScroll.Visible + ? SystemInformation.VerticalScrollBarWidth + : 0); foreach (var loadingStatus in status.ProgressList) { var filePath = loadingStatus.FilePath;
1
/* * Original author: Don Marsh <donmarsh .at. u.washington.edu>, * MacCoss Lab, Department of Genome Sciences, UW * * Copyright 2013 University of Washington - Seattle, WA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ using System; using System.Collections.Generic; using System.Diagnostics; using System.Drawing; using System.Text; using System.Windows.Forms; using pwiz.Skyline.Model; using pwiz.Skyline.Model.AuditLog; using pwiz.Skyline.Model.DocSettings; using pwiz.Skyline.Model.Results; using pwiz.Skyline.Properties; using pwiz.Skyline.Util; using pwiz.Skyline.Util.Extensions; namespace pwiz.Skyline.Controls.Graphs { /// <summary> /// A window that progressively displays chromatogram data during file import. /// </summary> public partial class AllChromatogramsGraph : FormEx { private readonly Stopwatch _stopwatch; private int _selected = -1; private bool _selectionIsSticky; private readonly int _multiFileWindowWidth; private readonly List<MsDataFileUri> _partialProgressList = new List<MsDataFileUri>(); private DateTime _retryTime; private int _nextRetry; private ImportResultsRetryCountdownDlg _retryDlg; private const int RETRY_INTERVAL = 10; private const int RETRY_COUNTDOWN = 30; //private static readonly Log LOG = new Log<AllChromatogramsGraph>(); public AllChromatogramsGraph() { InitializeComponent(); toolStrip1.Renderer = new CustomToolStripProfessionalRenderer(); _stopwatch = new Stopwatch(); _multiFileWindowWidth = Size.Width; } protected override void OnLoad(EventArgs e) { base.OnLoad(e); if (DesignMode) return; Icon = Resources.Skyline; // Restore window placement. if (Program.DemoMode) { var rectScreen = Screen.PrimaryScreen.WorkingArea; StartPosition = FormStartPosition.Manual; Location = new Point(rectScreen.Right - Size.Width, rectScreen.Bottom - Size.Height); } else { Point location = Settings.Default.AllChromatogramsLocation; if (!location.IsEmpty) { StartPosition = FormStartPosition.Manual; // Make sure the window is entirely on screen Location = location; ForceOnScreen(); } } Move += WindowMove; btnAutoCloseWindow.Image = imageListPushPin.Images[Settings.Default.ImportResultsAutoCloseWindow ? 1 : 0]; btnAutoScaleGraphs.Image = imageListLock.Images[Settings.Default.ImportResultsAutoScaleGraph ? 1 : 0]; _stopwatch.Start(); _retryTime = DateTime.MaxValue; elapsedTimer.Tick += ElapsedTimer_Tick; } protected override void OnClosed(EventArgs e) { graphChromatograms.Finish(); } private void ElapsedTimer_Tick(object sender, EventArgs e) { // Update timer and overall progress bar. // ReSharper disable LocalizableElement lblDuration.Text = _stopwatch.Elapsed.ToString(@"hh\:mm\:ss"); // ReSharper restore LocalizableElement // Determine if we should automatically retry any failed file. if (_retryTime <= DateTime.Now) { _retryTime = DateTime.MaxValue; _retryDlg = new ImportResultsRetryCountdownDlg(RETRY_COUNTDOWN, () => { for (int i = 0; i < flowFileStatus.Controls.Count; i++) { var control = (FileProgressControl) flowFileStatus.Controls[_nextRetry]; if (++_nextRetry == flowFileStatus.Controls.Count) _nextRetry = 0; if (control.Error != null) { ChromatogramManager.RemoveFile(control.FilePath); Retry(control.Status); break; } } _retryDlg.Dispose(); }, () => { _stopwatch.Stop(); elapsedTimer.Stop(); _retryDlg.Dispose(); }); _retryDlg.ShowDialog(this); } } public ChromatogramManager ChromatogramManager { get; set; } public bool IsUserCanceled { get; private set; } public string Error { get { return textBoxError.Text; } } public FileProgressControl SelectedControl { get { return _selected >= 0 && _selected < flowFileStatus.Controls.Count ? (FileProgressControl) flowFileStatus.Controls[_selected] : null; } } public int Selected { get { return _selected; } set { if (_selected != value) { SetSelectedControl(false); _selected = value; SetSelectedControl(true); RefreshSelectedControl(); } } } private void SetSelectedControl(bool selected) { if (SelectedControl != null) SelectedControl.Selected = selected; } private void RefreshSelectedControl() { if (SelectedControl == null) return; flowFileStatus.AutoScroll = true; flowFileStatus.ScrollControlIntoView(SelectedControl); SelectedControl.Invalidate(); graphChromatograms.IsCanceled = SelectedControl.IsCanceled; if (SelectedControl.Error != null || SelectedControl.Warning != null) { textBoxError.Text = SelectedControl.GetErrorLog(cbMoreInfo.Checked); ShowControl(panelError); } else if (SelectedControl.Progress == 0) { labelFileName.Text = SelectedControl.FilePath.GetFileNameWithoutExtension(); ShowControl(labelFileName); } else { graphChromatograms.Key = SelectedControl.FilePath.GetFilePath(); ShowControl(graphChromatograms); } } public IEnumerable<FileStatus> Files { get { foreach (FileProgressControl control in flowFileStatus.Controls) { yield return new FileStatus { FilePath = control.FilePath, Progress = control.Progress, Error = control.Error }; } } } public class FileStatus { public MsDataFileUri FilePath { get; set; } public int Progress { get; set; } public string Error { get; set; } } public void RetryImport(int index) { Retry(((FileProgressControl) flowFileStatus.Controls[index]).Status); } public bool IsItemComplete(int index) { return ((FileProgressControl) flowFileStatus.Controls[index]).Status.IsComplete; } public bool IsItemCanceled(int index) { return ((FileProgressControl)flowFileStatus.Controls[index]).Status.IsCanceled; } protected override bool ProcessCmdKey(ref Message msg, Keys keyData) { switch (keyData) { case Keys.Up: if (Selected > 0) Selected--; break; case Keys.Down: if (Selected < flowFileStatus.Controls.Count - 1) Selected++; break; default: return base.ProcessCmdKey(ref msg, keyData); } return true; } /// <summary> /// Show final results of import before closing window. /// </summary> public void Finish() { // During retry interval, don't change anything. if (_retryTime < DateTime.MaxValue) return; _partialProgressList.Clear(); // Finish all files and remove from status. bool hadErrors = false; if (Settings.Default.ImportResultsDoAutoRetry) { foreach (FileProgressControl control in flowFileStatus.Controls) { control.Finish(); if (control.Error != null) hadErrors = true; } } if (hadErrors) { _retryTime = DateTime.Now + TimeSpan.FromSeconds(RETRY_INTERVAL); } else { _stopwatch.Stop(); elapsedTimer.Stop(); } progressBarTotal.Visible = false; btnCancel.Visible = false; btnHide.Text = Resources.AllChromatogramsGraph_Finish_Close; } public bool HasErrors { get { foreach (FileProgressControl control in flowFileStatus.Controls) { if (control.Error != null || control.Warning != null) return true; } return false; } } private void WindowMove(object sender, EventArgs e) { if (WindowState == FormWindowState.Normal) Settings.Default.AllChromatogramsLocation = Location; } /// <summary> /// Display chromatogram data. /// </summary> /// <param name="status"></param> public void UpdateStatus(MultiProgressStatus status) { // Update overall progress bar. if (_partialProgressList.Count == 0) { if (status.PercentComplete >= 0) // -1 value means "unknown" (possible if we are mid-completion). Just leave things alone in that case. { progressBarTotal.Value = status.PercentComplete; } } else { int percentComplete = 0; foreach (var path in _partialProgressList) { var matchingStatus = FindStatus(status, path); if (matchingStatus != null) { percentComplete += matchingStatus.IsFinal ? 100 : matchingStatus.PercentComplete; } } progressBarTotal.Value = percentComplete/_partialProgressList.Count; } // Add any new files. AddProgressControls(status); // Cancel missing files. CancelMissingFiles(status); if (Selected < 0) Selected = 0; // Update progress control for each file. for (int i = 0; i < status.ProgressList.Count; i++) { var loadingStatus = status.ProgressList[i]; graphChromatograms.UpdateStatus(loadingStatus); var progressControl = FindProgressControl(loadingStatus.FilePath); bool wasError = progressControl.Error != null; progressControl.SetStatus(loadingStatus); if (loadingStatus.IsError && !wasError) { if (!_selectionIsSticky) { _selectionIsSticky = true; Selected = i; flowFileStatus.ScrollControlIntoView(progressControl); } RemoveFailedFile(loadingStatus); } } RefreshSelectedControl(); // Update status for a single file. if (flowFileStatus.Controls.Count == 1) { Width = _multiFileWindowWidth - panelFileList.Width; panelFileList.Visible = false; btnAutoScaleGraphs.Visible = false; return; } Width = _multiFileWindowWidth; panelFileList.Visible = true; btnAutoScaleGraphs.Visible = true; graphChromatograms.ScaleIsLocked = !Settings.Default.ImportResultsAutoScaleGraph; // If a file is successfully completed, automatically select another loading file. if (!_selectionIsSticky && (SelectedControl == null || SelectedControl.Progress == 100)) { for (int i = Selected + 1; i < flowFileStatus.Controls.Count; i++) { var control = (FileProgressControl) flowFileStatus.Controls[i]; if (!control.IsCanceled && control.Progress > 0 && control.Progress < 100) { Selected = i; flowFileStatus.ScrollControlIntoView(control); } } } if (!Finished) { btnCancel.Visible = true; btnHide.Text = Resources.AllChromatogramsGraph_UpdateStatus_Hide; progressBarTotal.Visible = true; _stopwatch.Start(); elapsedTimer.Start(); } } private void ShowControl(Control control) { panelError.Visible = ReferenceEquals(control, panelError); labelFileName.Visible = ReferenceEquals(control, labelFileName); graphChromatograms.Visible = ReferenceEquals(control, graphChromatograms); } private ChromatogramLoadingStatus FindStatus(MultiProgressStatus status, MsDataFileUri filePath) { foreach (ChromatogramLoadingStatus loadingStatus in status.ProgressList) { if (loadingStatus.FilePath.Equals(filePath)) { return loadingStatus; } } return null; } private void AddProgressControls(MultiProgressStatus status) { // Match each file status with a progress control. bool first = true; foreach (var loadingStatus in status.ProgressList) { var filePath = loadingStatus.FilePath; var progressControl = FindProgressControl(filePath); if (progressControl != null) continue; // Create a progress control for new file. progressControl = new FileProgressControl { Number = flowFileStatus.Controls.Count + 1, FilePath = filePath }; progressControl.SetToolTip(toolTip1, filePath.GetFilePath()); int index = progressControl.Number - 1; progressControl.ControlMouseDown += (sender, args) => { Selected = index; }; var thisLoadingStatus = loadingStatus; progressControl.Retry += (sender, args) => Retry(thisLoadingStatus); progressControl.Cancel += (sender, args) => Cancel(thisLoadingStatus); progressControl.ShowGraph += (sender, args) => ShowGraph(); progressControl.ShowLog += (sender, args) => ShowLog(); flowFileStatus.Controls.Add(progressControl); progressControl.BackColor = SystemColors.Control; if (first) { first = false; progressControl.Selected = true; } } foreach (FileProgressControl progressControl in flowFileStatus.Controls) { progressControl.Width = flowFileStatus.Width - 2 - (flowFileStatus.VerticalScroll.Visible ? SystemInformation.VerticalScrollBarWidth : 0); } } private void CancelMissingFiles(MultiProgressStatus status) { foreach (FileProgressControl progressControl in flowFileStatus.Controls) { if (!progressControl.IsComplete && !progressControl.IsCanceled) { bool found = false; foreach (var loadingStatus in status.ProgressList) { if (progressControl.FilePath.Equals(loadingStatus.FilePath)) { found = true; break; } } if (!found) progressControl.IsCanceled = true; } } } private FileProgressControl FindProgressControl(MsDataFileUri filePath) { foreach (FileProgressControl fileProgressControl in flowFileStatus.Controls) { if (fileProgressControl.FilePath.Equals(filePath)) return fileProgressControl; } return null; } private void Retry(ChromatogramLoadingStatus status) { ChromatogramManager.RemoveFile(status.FilePath); if (!_partialProgressList.Contains(status.FilePath)) _partialProgressList.Add(status.FilePath); graphChromatograms.ClearGraph(status.FilePath); for (int i = 0; i < flowFileStatus.Controls.Count; i++) { var control = (FileProgressControl) flowFileStatus.Controls[i]; if (control.FilePath.Equals(status.FilePath)) { control.Reset(); Selected = i; break; } } // Add this file back into the chromatogram set for each of its replicates. ModifyDocument(Resources.AllChromatogramsGraph_Retry_Retry_import_results, monitor => { Program.MainWindow.ModifyDocumentNoUndo(doc => { var oldResults = doc.Settings.MeasuredResults ?? new MeasuredResults(new ChromatogramSet[0]); var newResults = oldResults.AddDataFile(status.FilePath, status.ReplicateNames); return doc.ChangeMeasuredResults(newResults, monitor); }); }); } private void ModifyDocument(string message, Action<SrmSettingsChangeMonitor> modifyAction) { using (var longWaitDlg = new LongWaitDlg(Program.MainWindow) { Text = Text, // Same as dialog box Message = message, ProgressValue = 0 }) { try { longWaitDlg.PerformWork(this, 800, progressMonitor => { using (var settingsChangeMonitor = new SrmSettingsChangeMonitor(progressMonitor, message, Program.MainWindow)) { modifyAction(settingsChangeMonitor); } }); } catch (OperationCanceledException) { // SrmSettingsChangeMonitor can throw OperationCancelledException without LongWaitDlg knowing about it. } } } private void Cancel(ChromatogramLoadingStatus status) { // Remove this file from document. var canceledPath = status.FilePath; ModifyDocument(Resources.AllChromatogramsGraph_Cancel_Cancel_file_import, monitor => Program.MainWindow.ModifyDocumentNoUndo( doc => FilterFiles(doc, info => !info.FilePath.Equals(canceledPath)))); } private void RemoveFailedFile(ChromatogramLoadingStatus status) { // Remove this file from document. var canceledPath = status.FilePath; ModifyDocument(Resources.AllChromatogramsGraph_RemoveFailedFile_Remove_failed_file, monitor => Program.MainWindow.ModifyDocumentNoUndo( doc => FilterFiles(doc, info => !info.FilePath.Equals(canceledPath)))); } private void ShowGraph() { ShowControl(graphChromatograms); } private void ShowLog() { ShowControl(panelError); } // Close the window. private void btnClose_Click(object sender, EventArgs e) { ClickClose(); } public void ClickClose() { if (Finished) Program.MainWindow.DestroyAllChromatogramsGraph(); else Hide(); } public bool Finished { get { foreach (FileProgressControl control in flowFileStatus.Controls) { if (!control.IsCanceled && control.Error == null && control.Progress < 100) return false; } return true; } } // Cancel all uncached files. private void btnCancel_Click(object sender, EventArgs e) { ClickCancel(); } public void ClickCancel() { graphChromatograms.IsCanceled = IsUserCanceled = true; Program.MainWindow.ModifyDocument(Resources.AllChromatogramsGraph_btnCancel_Click_Cancel_import, doc => FilterFiles(doc, info => IsCachedFile(doc, info)), docPair => AuditLogEntry.CreateSimpleEntry(MessageType.canceled_import, docPair.OldDocumentType)); } private bool IsCachedFile(SrmDocument doc, ChromFileInfo info) { return doc.Settings.MeasuredResults.IsCachedFile(info.FilePath); } /// <summary> /// Filters document chromatograms for all but a selected set of files. /// </summary> private SrmDocument FilterFiles(SrmDocument doc, Func<ChromFileInfo, bool> selectFilesToKeepFunc) { if (doc.Settings.MeasuredResults == null) return doc; var measuredResultsNew = doc.Settings.MeasuredResults.FilterFiles(selectFilesToKeepFunc); // If nothing changed, don't create a new document instance if (measuredResultsNew != null && ArrayUtil.ReferencesEqual(measuredResultsNew.Chromatograms, doc.Settings.MeasuredResults.Chromatograms)) { return doc; } return doc.ChangeMeasuredResults(measuredResultsNew); } private void btnAutoCloseWindow_Click(object sender, EventArgs e) { ClickAutoCloseWindow(); } public void ClickAutoCloseWindow() { Settings.Default.ImportResultsAutoCloseWindow = !Settings.Default.ImportResultsAutoCloseWindow; btnAutoCloseWindow.Image = imageListPushPin.Images[Settings.Default.ImportResultsAutoCloseWindow ? 1 : 0]; } private void btnAutoScaleGraphs_Click(object sender, EventArgs e) { ClickAutoScaleGraphs(); } public void ClickAutoScaleGraphs() { Settings.Default.ImportResultsAutoScaleGraph = !Settings.Default.ImportResultsAutoScaleGraph; btnAutoScaleGraphs.Image = imageListLock.Images[Settings.Default.ImportResultsAutoScaleGraph ? 1 : 0]; graphChromatograms.ScaleIsLocked = !Settings.Default.ImportResultsAutoScaleGraph; } private void cbShowErrorDetails_CheckedChanged(object sender, EventArgs e) { textBoxError.Text = GetSelectedControlErrorLog(); } private string GetSelectedControlErrorLog() { return SelectedControl == null ? string.Empty : SelectedControl.GetErrorLog(cbMoreInfo.Checked); } private void btnCopyText_Click(object sender, EventArgs e) { ClipboardHelper.SetClipboardText(this, GetSelectedControlErrorLog()); } #region Testing Support public int ProgressTotalPercent { get { return (100*(progressBarTotal.Value - -progressBarTotal.Minimum))/(progressBarTotal.Maximum - progressBarTotal.Minimum); } } // Click the button for this named file - first click is cancel, which toggles to retry public void FileButtonClick(string name) { foreach (FileProgressControl control in flowFileStatus.Controls) { if (control.FilePath.GetFileName().Contains(name)) { control.ButtonClick(); break; } } } public IEnumerable<string> GetErrorMessages() { foreach (FileProgressControl control in flowFileStatus.Controls) { if (control.Error != null) yield return control.GetErrorLog(true); } } public override string DetailedMessage { get { var sb = new StringBuilder(); foreach (FileProgressControl control in flowFileStatus.Controls) { if (ReferenceEquals(SelectedControl, control)) sb.Append(@"-> "); if (control.Error != null) sb.AppendLine(string.Format(@"{0}: Error - {1}", control.FilePath, control.Error)); else if (control.IsCanceled) sb.AppendLine(string.Format(@"{0}: Canceled", control.FilePath)); else sb.AppendLine(string.Format(@"{0}: {1}%", control.FilePath, control.Progress)); } return TextUtil.LineSeparate(sb.ToString(), string.Format(@"Total complete: {0}%", ProgressTotalPercent)); } } #endregion } public class DisabledRichTextBox : RichTextBox { private const int WM_SETFOCUS = 0x07; private const int WM_ENABLE = 0x0A; private const int WM_SETCURSOR = 0x20; protected override void WndProc(ref Message m) { if (!(m.Msg == WM_SETFOCUS || m.Msg == WM_ENABLE || m.Msg == WM_SETCURSOR)) base.WndProc(ref m); } } class CustomToolStripProfessionalRenderer : ToolStripProfessionalRenderer { protected override void OnRenderToolStripBorder(ToolStripRenderEventArgs e) { // Don't draw a border } } }
1
12,839
Not sure what this is. Bad merge?
ProteoWizard-pwiz
.cs
@@ -171,7 +171,7 @@ func (v *veth) create(n *network, nspid int) (err error) { netlink.LinkDel(veth) } }() - if err := v.attach(&n.Network); err != nil { + if err = v.attach(&n.Network); err != nil { return err } child, err := netlink.LinkByName(n.TempVethPeerName)
1
// +build linux package libcontainer import ( "fmt" "io/ioutil" "net" "path/filepath" "strconv" "strings" "github.com/opencontainers/runc/libcontainer/configs" "github.com/opencontainers/runc/libcontainer/utils" "github.com/vishvananda/netlink" ) var strategies = map[string]networkStrategy{ "veth": &veth{}, "loopback": &loopback{}, } // networkStrategy represents a specific network configuration for // a container's networking stack type networkStrategy interface { create(*network, int) error initialize(*network) error detach(*configs.Network) error attach(*configs.Network) error } // getStrategy returns the specific network strategy for the // provided type. func getStrategy(tpe string) (networkStrategy, error) { s, exists := strategies[tpe] if !exists { return nil, fmt.Errorf("unknown strategy type %q", tpe) } return s, nil } // Returns the network statistics for the network interfaces represented by the NetworkRuntimeInfo. func getNetworkInterfaceStats(interfaceName string) (*NetworkInterface, error) { out := &NetworkInterface{Name: interfaceName} // This can happen if the network runtime information is missing - possible if the // container was created by an old version of libcontainer. if interfaceName == "" { return out, nil } type netStatsPair struct { // Where to write the output. Out *uint64 // The network stats file to read. File string } // Ingress for host veth is from the container. Hence tx_bytes stat on the host veth is actually number of bytes received by the container. netStats := []netStatsPair{ {Out: &out.RxBytes, File: "tx_bytes"}, {Out: &out.RxPackets, File: "tx_packets"}, {Out: &out.RxErrors, File: "tx_errors"}, {Out: &out.RxDropped, File: "tx_dropped"}, {Out: &out.TxBytes, File: "rx_bytes"}, {Out: &out.TxPackets, File: "rx_packets"}, {Out: &out.TxErrors, File: "rx_errors"}, {Out: &out.TxDropped, File: "rx_dropped"}, } for _, netStat := range netStats { data, err := readSysfsNetworkStats(interfaceName, netStat.File) if err != nil { return nil, err } *(netStat.Out) = data } return out, nil } // Reads the specified statistics available under /sys/class/net/<EthInterface>/statistics func readSysfsNetworkStats(ethInterface, statsFile string) (uint64, error) { data, err := ioutil.ReadFile(filepath.Join("/sys/class/net", ethInterface, "statistics", statsFile)) if err != nil { return 0, err } return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) } // loopback is a network strategy that provides a basic loopback device type loopback struct { } func (l *loopback) create(n *network, nspid int) error { return nil } func (l *loopback) initialize(config *network) error { return netlink.LinkSetUp(&netlink.Device{LinkAttrs: netlink.LinkAttrs{Name: "lo"}}) } func (l *loopback) attach(n *configs.Network) (err error) { return nil } func (l *loopback) detach(n *configs.Network) (err error) { return nil } // veth is a network strategy that uses a bridge and creates // a veth pair, one that is attached to the bridge on the host and the other // is placed inside the container's namespace type veth struct { } func (v *veth) detach(n *configs.Network) (err error) { return netlink.LinkSetMaster(&netlink.Device{LinkAttrs: netlink.LinkAttrs{Name: n.HostInterfaceName}}, nil) } // attach a container network interface to an external network func (v *veth) attach(n *configs.Network) (err error) { brl, err := netlink.LinkByName(n.Bridge) if err != nil { return err } br, ok := brl.(*netlink.Bridge) if !ok { return fmt.Errorf("Wrong device type %T", brl) } host, err := netlink.LinkByName(n.HostInterfaceName) if err != nil { return err } if err := netlink.LinkSetMaster(host, br); err != nil { return err } if err := netlink.LinkSetMTU(host, n.Mtu); err != nil { return err } if n.HairpinMode { if err := netlink.LinkSetHairpin(host, true); err != nil { return err } } if err := netlink.LinkSetUp(host); err != nil { return err } return nil } func (v *veth) create(n *network, nspid int) (err error) { tmpName, err := v.generateTempPeerName() if err != nil { return err } n.TempVethPeerName = tmpName if n.Bridge == "" { return fmt.Errorf("bridge is not specified") } veth := &netlink.Veth{ LinkAttrs: netlink.LinkAttrs{ Name: n.HostInterfaceName, TxQLen: n.TxQueueLen, }, PeerName: n.TempVethPeerName, } if err := netlink.LinkAdd(veth); err != nil { return err } defer func() { if err != nil { netlink.LinkDel(veth) } }() if err := v.attach(&n.Network); err != nil { return err } child, err := netlink.LinkByName(n.TempVethPeerName) if err != nil { return err } return netlink.LinkSetNsPid(child, nspid) } func (v *veth) generateTempPeerName() (string, error) { return utils.GenerateRandomName("veth", 7) } func (v *veth) initialize(config *network) error { peer := config.TempVethPeerName if peer == "" { return fmt.Errorf("peer is not specified") } child, err := netlink.LinkByName(peer) if err != nil { return err } if err := netlink.LinkSetDown(child); err != nil { return err } if err := netlink.LinkSetName(child, config.Name); err != nil { return err } // get the interface again after we changed the name as the index also changes. if child, err = netlink.LinkByName(config.Name); err != nil { return err } if config.MacAddress != "" { mac, err := net.ParseMAC(config.MacAddress) if err != nil { return err } if err := netlink.LinkSetHardwareAddr(child, mac); err != nil { return err } } ip, err := netlink.ParseAddr(config.Address) if err != nil { return err } if err := netlink.AddrAdd(child, ip); err != nil { return err } if config.IPv6Address != "" { ip6, err := netlink.ParseAddr(config.IPv6Address) if err != nil { return err } if err := netlink.AddrAdd(child, ip6); err != nil { return err } } if err := netlink.LinkSetMTU(child, config.Mtu); err != nil { return err } if err := netlink.LinkSetUp(child); err != nil { return err } if config.Gateway != "" { gw := net.ParseIP(config.Gateway) if err := netlink.RouteAdd(&netlink.Route{ Scope: netlink.SCOPE_UNIVERSE, LinkIndex: child.Attrs().Index, Gw: gw, }); err != nil { return err } } if config.IPv6Gateway != "" { gw := net.ParseIP(config.IPv6Gateway) if err := netlink.RouteAdd(&netlink.Route{ Scope: netlink.SCOPE_UNIVERSE, LinkIndex: child.Attrs().Index, Gw: gw, }); err != nil { return err } } return nil }
1
14,423
I'm confused, what did this actually fix? Is there a reason we are setting `err` in the parent scope to `nil` in this round-about way?
opencontainers-runc
go
@@ -1103,6 +1103,10 @@ func TestApplyEnv_Image_HealthCheck(t *testing.T) { } } +func TestApplyEnv_Platform(t *testing.T) { + // TODO: Add test for platform in the next PR +} + func TestApplyEnv_Entrypoint(t *testing.T) { testCases := map[string]struct { inSvc func(svc *LoadBalancedWebService)
1
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package manifest import ( "testing" "time" "github.com/aws/aws-sdk-go/aws" "github.com/stretchr/testify/require" ) /** How to add `ApplyEnv` unit test to a new manifest field: When writing tests for a field F (e.g. When writing `TestApplyEnv_Image`, where F would be the `image` field): For each subfield f in F: - If f has subfields || f is a composite type (e.g. `StringOrStringSlice`, `BuildStringOrArgs`) -> 1. Write a test case when f field is nil. 2. Write a test case when f field is non-nil. - Otherwise, write three test cases for f -> 1. Write a test case when f field is nil. 2. Write a test case when f field is non-nil, and the referenced value is empty (e.g., it is "", {}, 0). 3. Write a test case when f field is non-nil, and the referenced value is NOT empty. For each subfield f in F: - If f is mutually exclusive with another subfield g of F (e.g. `image.location` and `image.build` are mutually exclusive) -> 1. Write a test case that make sure f is nil when g is non-nil 2. Write a test case that make sure g is nil when f is non-nil For each subfield f in F: - If f has subfields || if f is a composite field -> Write another test group for this field (e.g. F is `image` and f is `image.build`, write another test functions named `TestApplyEnv_Image_Build`) Expected Behaviors: - Slice type: override-only. Take `security_groups` (which takes []string) as an example. If original is `[]string{1, 2}`, and environment override is `[]string{3}`, the result should be `[]string{3}`. - Map: override value of existing keys, append non-existing keys. */ func TestApplyEnv_Image(t *testing.T) { testCases := map[string]struct { inSvc func(svc *LoadBalancedWebService) wanted func(svc *LoadBalancedWebService) }{ "exclusive fields: build overridden if location is not nil": { inSvc: func(svc *LoadBalancedWebService) { svc.ImageConfig.Build = BuildArgsOrString{ BuildString: aws.String("mockBuild"), } svc.Environments["test"].ImageConfig.Location = aws.String("mockLocation") }, wanted: func(svc *LoadBalancedWebService) { svc.ImageConfig.Location = aws.String("mockLocation") svc.ImageConfig.Build = BuildArgsOrString{ BuildString: nil, BuildArgs: DockerBuildArgs{}, } }, }, "exclusive fields: location overridden if build is not nil": { inSvc: func(svc *LoadBalancedWebService) { svc.ImageConfig.Location = aws.String("mockLocation") svc.Environments["test"].ImageConfig.Build = BuildArgsOrString{ BuildArgs: DockerBuildArgs{ Dockerfile: aws.String("mockDockerfile"), }, } }, wanted: func(svc *LoadBalancedWebService) { svc.ImageConfig.Location = nil svc.ImageConfig.Build = BuildArgsOrString{ BuildString: nil, BuildArgs: DockerBuildArgs{ Dockerfile: aws.String("mockDockerfile"), }, } }, }, "build overridden": { inSvc: func(svc *LoadBalancedWebService) { svc.ImageConfig.Build = BuildArgsOrString{ BuildString: aws.String("mockBuild"), } svc.Environments["test"].ImageConfig.Build = BuildArgsOrString{ BuildString: aws.String("mockBuildTest"), } }, wanted: func(svc *LoadBalancedWebService) { svc.ImageConfig.Build = BuildArgsOrString{ BuildString: aws.String("mockBuildTest"), } }, }, "build not overridden": { inSvc: func(svc *LoadBalancedWebService) { svc.ImageConfig.Build = BuildArgsOrString{ BuildString: aws.String("mockBuild"), } svc.Environments["test"].ImageConfig.Build = BuildArgsOrString{} }, wanted: func(svc *LoadBalancedWebService) { svc.ImageConfig.Build = BuildArgsOrString{ BuildString: aws.String("mockBuild"), } }, }, "location overridden": { inSvc: func(svc *LoadBalancedWebService) { svc.ImageConfig.Location = aws.String("mockLocation") svc.Environments["test"].ImageConfig.Location = aws.String("mockLocationTest") }, wanted: func(svc *LoadBalancedWebService) { svc.ImageConfig.Location = aws.String("mockLocationTest") }, }, "location explicitly overridden by zero value": { inSvc: func(svc *LoadBalancedWebService) { svc.ImageConfig.Location = aws.String("mockLocation") svc.Environments["test"].ImageConfig.Location = aws.String("") }, wanted: func(svc *LoadBalancedWebService) { svc.ImageConfig.Location = aws.String("") }, }, "location not overridden": { inSvc: func(svc *LoadBalancedWebService) { svc.ImageConfig.Location = aws.String("mockLocation") svc.Environments["test"].ImageConfig.Image = Image{} }, wanted: func(svc *LoadBalancedWebService) { svc.ImageConfig.Location = aws.String("mockLocation") }, }, "credentials overridden": { inSvc: func(svc *LoadBalancedWebService) { svc.ImageConfig.Credentials = aws.String("mockCredentials") svc.Environments["test"].ImageConfig.Credentials = aws.String("mockCredentialsTest") }, wanted: func(svc *LoadBalancedWebService) { svc.ImageConfig.Credentials = aws.String("mockCredentialsTest") }, }, "credentials explicitly overridden by zero value": { inSvc: func(svc *LoadBalancedWebService) { svc.ImageConfig.Credentials = aws.String("mockCredentials") svc.Environments["test"].ImageConfig.Credentials = aws.String("") }, wanted: func(svc *LoadBalancedWebService) { svc.ImageConfig.Credentials = aws.String("") }, }, "credentials not overridden": { inSvc: func(svc *LoadBalancedWebService) { svc.ImageConfig.Credentials = aws.String("mockCredentials") svc.Environments["test"].ImageConfig.Image = Image{} }, wanted: func(svc *LoadBalancedWebService) { svc.ImageConfig.Credentials = aws.String("mockCredentials") }, }, "labels overridden": { inSvc: func(svc *LoadBalancedWebService) { svc.ImageConfig.DockerLabels = map[string]string{ "mockLabel1": "1", "mockLabel2": "2", } svc.Environments["test"].ImageConfig.DockerLabels = map[string]string{ "mockLabel1": "3", // Override the value of mockLabel1 "mockLabel3": "3", // Append a new label mockLabel3 } }, wanted: func(svc *LoadBalancedWebService) { svc.ImageConfig.DockerLabels = map[string]string{ "mockLabel1": "3", "mockLabel2": "2", "mockLabel3": "3", } }, }, "labels not overridden by empty map": { inSvc: func(svc *LoadBalancedWebService) { svc.ImageConfig.DockerLabels = map[string]string{ "mockLabel1": "mockValue1", "mockLabel2": "mockValue2", } svc.Environments["test"].ImageConfig.DockerLabels = map[string]string{} }, wanted: func(svc *LoadBalancedWebService) { svc.ImageConfig.DockerLabels = map[string]string{ "mockLabel1": "mockValue1", "mockLabel2": "mockValue2", } }, }, "labels not overridden by nil": { inSvc: func(svc *LoadBalancedWebService) { svc.ImageConfig.DockerLabels = map[string]string{ "mockLabel1": "mockValue1", "mockLabel2": "mockValue2", } svc.Environments["test"].ImageConfig.Image = Image{} }, wanted: func(svc *LoadBalancedWebService) { svc.ImageConfig.DockerLabels = map[string]string{ "mockLabel1": "mockValue1", "mockLabel2": "mockValue2", } }, }, "depends_on overridden": { inSvc: func(svc *LoadBalancedWebService) { svc.ImageConfig.DependsOn = map[string]string{ "mockContainer1": "1", "mockContainer2": "2", } svc.Environments["test"].ImageConfig.DependsOn = map[string]string{ "mockContainer1": "3", // Override the condition of mockContainer1 "mockContainer3": "3", // Append a new dependency on mockContainer3 } }, wanted: func(svc *LoadBalancedWebService) { svc.ImageConfig.DependsOn = map[string]string{ "mockContainer1": "3", "mockContainer2": "2", "mockContainer3": "3", } }, }, "depends_on not overridden by empty map": { inSvc: func(svc *LoadBalancedWebService) { svc.ImageConfig.DependsOn = map[string]string{ "mockContainer1": "1", "mockContainer2": "2", } svc.Environments["test"].ImageConfig.DependsOn = map[string]string{} }, wanted: func(svc *LoadBalancedWebService) { svc.ImageConfig.DependsOn = map[string]string{ "mockContainer1": "1", "mockContainer2": "2", } }, }, "depends_on not overridden by nil": { inSvc: func(svc *LoadBalancedWebService) { svc.ImageConfig.DependsOn = map[string]string{ "mockContainer1": "1", "mockContainer2": "2", } svc.Environments["test"].ImageConfig.Image = Image{} }, wanted: func(svc *LoadBalancedWebService) { svc.ImageConfig.DependsOn = map[string]string{ "mockContainer1": "1", "mockContainer2": "2", } }, }, "port overridden": { inSvc: func(svc *LoadBalancedWebService) { svc.ImageConfig.Port = aws.Uint16(1) svc.Environments["test"].ImageConfig.Port = aws.Uint16(2) }, wanted: func(svc *LoadBalancedWebService) { svc.ImageConfig.Port = aws.Uint16(2) }, }, "port explicitly overridden by zero value": { inSvc: func(svc *LoadBalancedWebService) { svc.ImageConfig.Port = aws.Uint16(1) svc.Environments["test"].ImageConfig.Port = aws.Uint16(0) }, wanted: func(svc *LoadBalancedWebService) { svc.ImageConfig.Port = aws.Uint16(0) }, }, "port not overridden": { inSvc: func(svc *LoadBalancedWebService) { svc.ImageConfig.Port = aws.Uint16(1) svc.Environments["test"].ImageConfig.Image = Image{} }, wanted: func(svc *LoadBalancedWebService) { svc.ImageConfig.Port = aws.Uint16(1) }, }, "healthcheck overridden": { inSvc: func(svc *LoadBalancedWebService) { svc.ImageConfig.HealthCheck = &ContainerHealthCheck{ Retries: aws.Int(3), } mockInterval1Minute := 60 * time.Second svc.Environments["test"].ImageConfig.HealthCheck = &ContainerHealthCheck{ Interval: &mockInterval1Minute, Retries: aws.Int(5), } }, wanted: func(svc *LoadBalancedWebService) { mockInterval1Minute := 60 * time.Second svc.ImageConfig.HealthCheck = &ContainerHealthCheck{ Interval: &mockInterval1Minute, Retries: aws.Int(5), } }, }, "healthcheck not overridden": { inSvc: func(svc *LoadBalancedWebService) { svc.ImageConfig.HealthCheck = &ContainerHealthCheck{ Retries: aws.Int(3), } svc.Environments["test"].ImageConfig.Image = Image{} }, wanted: func(svc *LoadBalancedWebService) { svc.ImageConfig.HealthCheck = &ContainerHealthCheck{ Command: nil, Interval: nil, Retries: aws.Int(3), Timeout: nil, StartPeriod: nil, } }, }, } for name, tc := range testCases { t.Run(name, func(t *testing.T) { var inSvc, wantedSvc LoadBalancedWebService inSvc.Environments = map[string]*LoadBalancedWebServiceConfig{ "test": {}, } tc.inSvc(&inSvc) tc.wanted(&wantedSvc) got, err := inSvc.ApplyEnv("test") require.NoError(t, err) require.Equal(t, &wantedSvc, got) }) } } func TestApplyEnv_Image_Build(t *testing.T) { testCases := map[string]struct { inSvc func(svc *LoadBalancedWebService) wanted func(svc *LoadBalancedWebService) }{ "composite fields: build string is overridden if build args is not nil": { inSvc: func(svc *LoadBalancedWebService) { svc.ImageConfig.Build = BuildArgsOrString{ BuildString: aws.String("mockBuild"), } svc.Environments["test"].ImageConfig.Build = BuildArgsOrString{ BuildArgs: DockerBuildArgs{ Context: aws.String("mockContext"), Dockerfile: aws.String("mockDockerfile"), }, } }, wanted: func(svc *LoadBalancedWebService) { svc.ImageConfig.Build = BuildArgsOrString{ BuildString: nil, BuildArgs: DockerBuildArgs{ Context: aws.String("mockContext"), Dockerfile: aws.String("mockDockerfile"), }, } }, }, "composite fields: build args is overridden if build string is not nil": { inSvc: func(svc *LoadBalancedWebService) { svc.ImageConfig.Build = BuildArgsOrString{ BuildString: nil, BuildArgs: DockerBuildArgs{ Context: aws.String("mockContext"), Dockerfile: aws.String("mockDockerfile"), }, } svc.Environments["test"].ImageConfig.Build = BuildArgsOrString{ BuildString: aws.String("mockBuild"), } }, wanted: func(svc *LoadBalancedWebService) { svc.ImageConfig.Build = BuildArgsOrString{ BuildString: aws.String("mockBuild"), BuildArgs: DockerBuildArgs{}, } }, }, "build string overridden": { inSvc: func(svc *LoadBalancedWebService) { svc.ImageConfig.Build = BuildArgsOrString{ BuildString: aws.String("mockBuild"), } svc.Environments["test"].ImageConfig.Build = BuildArgsOrString{ BuildString: aws.String("mockBuildTest"), } }, wanted: func(svc *LoadBalancedWebService) { svc.ImageConfig.Build = BuildArgsOrString{ BuildString: aws.String("mockBuildTest"), BuildArgs: DockerBuildArgs{}, } }, }, "build string explicitly overridden by empty value": { inSvc: func(svc *LoadBalancedWebService) { svc.ImageConfig.Build = BuildArgsOrString{ BuildString: aws.String("mockBuild"), } svc.Environments["test"].ImageConfig.Build = BuildArgsOrString{ BuildString: aws.String(""), } }, wanted: func(svc *LoadBalancedWebService) { svc.ImageConfig.Build = BuildArgsOrString{ BuildString: aws.String(""), BuildArgs: DockerBuildArgs{}, } }, }, "build string not overridden": { inSvc: func(svc *LoadBalancedWebService) { svc.ImageConfig.Build = BuildArgsOrString{ BuildString: aws.String("mockBuild"), } svc.Environments["test"].ImageConfig.Build = BuildArgsOrString{} }, wanted: func(svc *LoadBalancedWebService) { svc.ImageConfig.Build = BuildArgsOrString{ BuildString: aws.String("mockBuild"), BuildArgs: DockerBuildArgs{}, } }, }, "build arg overridden": { inSvc: func(svc *LoadBalancedWebService) { svc.ImageConfig.Build = BuildArgsOrString{ BuildArgs: DockerBuildArgs{ Context: aws.String("mockContext"), }, } svc.Environments["test"].ImageConfig.Build = BuildArgsOrString{ BuildArgs: DockerBuildArgs{ Context: aws.String("mockContextTest"), Dockerfile: aws.String("mockDockerfileTest"), }, } }, wanted: func(svc *LoadBalancedWebService) { svc.ImageConfig.Build = BuildArgsOrString{ BuildString: nil, BuildArgs: DockerBuildArgs{ Context: aws.String("mockContextTest"), Dockerfile: aws.String("mockDockerfileTest"), }, } }, }, "build arg not overridden": { inSvc: func(svc *LoadBalancedWebService) { svc.ImageConfig.Build = BuildArgsOrString{ BuildArgs: DockerBuildArgs{ Context: aws.String("mockContext"), }, } svc.Environments["test"].ImageConfig.Build = BuildArgsOrString{} }, wanted: func(svc *LoadBalancedWebService) { svc.ImageConfig.Build = BuildArgsOrString{ BuildString: nil, BuildArgs: DockerBuildArgs{ Context: aws.String("mockContext"), }, } }, }, "context overridden": { inSvc: func(svc *LoadBalancedWebService) { svc.ImageConfig.Build = BuildArgsOrString{ BuildArgs: DockerBuildArgs{ Context: aws.String("mockContext"), }, } svc.Environments["test"].ImageConfig.Build = BuildArgsOrString{ BuildArgs: DockerBuildArgs{ Context: aws.String("mockContextTest"), }, } }, wanted: func(svc *LoadBalancedWebService) { svc.ImageConfig.Build = BuildArgsOrString{ BuildString: nil, BuildArgs: DockerBuildArgs{ Context: aws.String("mockContextTest"), }, } }, }, "context explicitly overridden by empty value": { inSvc: func(svc *LoadBalancedWebService) { svc.ImageConfig.Build = BuildArgsOrString{ BuildArgs: DockerBuildArgs{ Context: aws.String("mockContext"), }, } svc.Environments["test"].ImageConfig.Build = BuildArgsOrString{ BuildArgs: DockerBuildArgs{ Context: aws.String(""), }, } }, wanted: func(svc *LoadBalancedWebService) { svc.ImageConfig.Build = BuildArgsOrString{ BuildString: nil, BuildArgs: DockerBuildArgs{ Context: aws.String(""), }, } }, }, "context not overridden": { inSvc: func(svc *LoadBalancedWebService) { svc.ImageConfig.Build = BuildArgsOrString{ BuildArgs: DockerBuildArgs{ Context: aws.String("mockContext"), }, } svc.Environments["test"].ImageConfig.Build = BuildArgsOrString{ BuildArgs: DockerBuildArgs{}, } }, wanted: func(svc *LoadBalancedWebService) { svc.ImageConfig.Build = BuildArgsOrString{ BuildString: nil, BuildArgs: DockerBuildArgs{ Context: aws.String("mockContext"), }, } }, }, "dockerfile overridden": { inSvc: func(svc *LoadBalancedWebService) { svc.ImageConfig.Build = BuildArgsOrString{ BuildArgs: DockerBuildArgs{ Dockerfile: aws.String("mockDockerfile"), }, } svc.Environments["test"].ImageConfig.Build = BuildArgsOrString{ BuildArgs: DockerBuildArgs{ Dockerfile: aws.String("mockDockerfileTest"), }, } }, wanted: func(svc *LoadBalancedWebService) { svc.ImageConfig.Build = BuildArgsOrString{ BuildString: nil, BuildArgs: DockerBuildArgs{ Dockerfile: aws.String("mockDockerfileTest"), }, } }, }, "dockerfile explicitly overridden by empty value": { inSvc: func(svc *LoadBalancedWebService) { svc.ImageConfig.Build = BuildArgsOrString{ BuildArgs: DockerBuildArgs{ Dockerfile: aws.String("mockDockerfile"), }, } svc.Environments["test"].ImageConfig.Build = BuildArgsOrString{ BuildArgs: DockerBuildArgs{ Dockerfile: aws.String(""), }, } }, wanted: func(svc *LoadBalancedWebService) { svc.ImageConfig.Build = BuildArgsOrString{ BuildString: nil, BuildArgs: DockerBuildArgs{ Dockerfile: aws.String(""), }, } }, }, "dockerfile not overridden": { inSvc: func(svc *LoadBalancedWebService) { svc.ImageConfig.Build = BuildArgsOrString{ BuildArgs: DockerBuildArgs{ Dockerfile: aws.String("mockDockerfile"), }, } svc.Environments["test"].ImageConfig.Build = BuildArgsOrString{ BuildArgs: DockerBuildArgs{}, } }, wanted: func(svc *LoadBalancedWebService) { svc.ImageConfig.Build = BuildArgsOrString{ BuildString: nil, BuildArgs: DockerBuildArgs{ Dockerfile: aws.String("mockDockerfile"), }, } }, }, //"FAILED TEST: args overridden": { // inSvc: func(svc *LoadBalancedWebService) { // svc.ImageConfig.Build = BuildArgsOrString{ // BuildArgs: DockerBuildArgs{ // Args: map[string]string{ // "mockArg1": "1", // "mockArg2": "2", // }, // }, // } // svc.Environments["test"].ImageConfig.Build = BuildArgsOrString{ // BuildArgs: DockerBuildArgs{ // Args: map[string]string{ // "mockArg1": "3", // Override value for mockArg1 // "mockArg3": "3", // Append an arg mockArg3 // }, // }, // } // }, // wanted: func(svc *LoadBalancedWebService) { // svc.ImageConfig.Build = BuildArgsOrString{ // BuildString: nil, // BuildArgs: DockerBuildArgs{ // Args: map[string]string{ // "mockArg1": "3", // "mockArg2": "2", // "mockArg3": "3", // }, // }, // } // }, //}, //"FAILED TEST: args not overridden by empty map": { // inSvc: func(svc *LoadBalancedWebService) { // svc.ImageConfig.Build = BuildArgsOrString{ // BuildArgs: DockerBuildArgs{ // Args: map[string]string{ // "mockArg1": "1", // "mockArg2": "2", // }, // }, // } // svc.Environments["test"].ImageConfig.Build = BuildArgsOrString{ // BuildArgs: DockerBuildArgs{ // Args: map[string]string{}, // }, // } // }, // wanted: func(svc *LoadBalancedWebService) { // svc.ImageConfig.Build = BuildArgsOrString{ // BuildString: nil, // BuildArgs: DockerBuildArgs{ // Args: map[string]string{ // "mockArg1": "1", // "mockArg2": "2", // }, // }, // } // }, //}, "args not overridden by nil": { inSvc: func(svc *LoadBalancedWebService) { svc.ImageConfig.Build = BuildArgsOrString{ BuildArgs: DockerBuildArgs{ Args: map[string]string{ "mockArg1": "1", "mockArg2": "2", }, }, } svc.Environments["test"].ImageConfig.Build = BuildArgsOrString{ BuildArgs: DockerBuildArgs{}, } }, wanted: func(svc *LoadBalancedWebService) { svc.ImageConfig.Build = BuildArgsOrString{ BuildString: nil, BuildArgs: DockerBuildArgs{ Args: map[string]string{ "mockArg1": "1", "mockArg2": "2", }, }, } }, }, "target overridden": { inSvc: func(svc *LoadBalancedWebService) { svc.ImageConfig.Build = BuildArgsOrString{ BuildArgs: DockerBuildArgs{ Target: aws.String("mockTarget"), }, } svc.Environments["test"].ImageConfig.Build = BuildArgsOrString{ BuildArgs: DockerBuildArgs{ Target: aws.String("mockTargetTest"), }, } }, wanted: func(svc *LoadBalancedWebService) { svc.ImageConfig.Build = BuildArgsOrString{ BuildString: nil, BuildArgs: DockerBuildArgs{ Target: aws.String("mockTargetTest"), }, } }, }, "target explicitly overridden by empty value": { inSvc: func(svc *LoadBalancedWebService) { svc.ImageConfig.Build = BuildArgsOrString{ BuildArgs: DockerBuildArgs{ Target: aws.String("mockTarget"), }, } svc.Environments["test"].ImageConfig.Build = BuildArgsOrString{ BuildArgs: DockerBuildArgs{ Target: aws.String(""), }, } }, wanted: func(svc *LoadBalancedWebService) { svc.ImageConfig.Build = BuildArgsOrString{ BuildString: nil, BuildArgs: DockerBuildArgs{ Target: aws.String(""), }, } }, }, "target not overridden": { inSvc: func(svc *LoadBalancedWebService) { svc.ImageConfig.Build = BuildArgsOrString{ BuildArgs: DockerBuildArgs{ Target: aws.String("mockTarget"), }, } svc.Environments["test"].ImageConfig.Build = BuildArgsOrString{ BuildArgs: DockerBuildArgs{}, } }, wanted: func(svc *LoadBalancedWebService) { svc.ImageConfig.Build = BuildArgsOrString{ BuildString: nil, BuildArgs: DockerBuildArgs{ Target: aws.String("mockTarget"), }, } }, }, "cacheFrom overridden": { inSvc: func(svc *LoadBalancedWebService) { svc.ImageConfig.Build = BuildArgsOrString{ BuildArgs: DockerBuildArgs{ CacheFrom: []string{"mock", "Cache"}, }, } svc.Environments["test"].ImageConfig.Build = BuildArgsOrString{ BuildArgs: DockerBuildArgs{ CacheFrom: []string{"mock", "CacheTest", "Test"}, }, } }, wanted: func(svc *LoadBalancedWebService) { svc.ImageConfig.Build = BuildArgsOrString{ BuildString: nil, BuildArgs: DockerBuildArgs{ CacheFrom: []string{"mock", "CacheTest", "Test"}, }, } }, }, "cacheFrom overridden by zero slice": { inSvc: func(svc *LoadBalancedWebService) { svc.ImageConfig.Build = BuildArgsOrString{ BuildArgs: DockerBuildArgs{ CacheFrom: []string{"mock", "Cache"}, }, } svc.Environments["test"].ImageConfig.Build = BuildArgsOrString{ BuildArgs: DockerBuildArgs{ CacheFrom: []string{}, }, } }, wanted: func(svc *LoadBalancedWebService) { svc.ImageConfig.Build = BuildArgsOrString{ BuildString: nil, BuildArgs: DockerBuildArgs{ CacheFrom: []string{}, }, } }, }, "cacheFrom not overridden": { inSvc: func(svc *LoadBalancedWebService) { svc.ImageConfig.Build = BuildArgsOrString{ BuildArgs: DockerBuildArgs{ CacheFrom: []string{"mock", "Cache"}, }, } svc.Environments["test"].ImageConfig.Build = BuildArgsOrString{ BuildArgs: DockerBuildArgs{}, } }, wanted: func(svc *LoadBalancedWebService) { svc.ImageConfig.Build = BuildArgsOrString{ BuildString: nil, BuildArgs: DockerBuildArgs{ CacheFrom: []string{"mock", "Cache"}, }, } }, }, } for name, tc := range testCases { t.Run(name, func(t *testing.T) { var inSvc, wantedSvc LoadBalancedWebService inSvc.Environments = map[string]*LoadBalancedWebServiceConfig{ "test": {}, } tc.inSvc(&inSvc) tc.wanted(&wantedSvc) got, err := inSvc.ApplyEnv("test") require.NoError(t, err) require.Equal(t, &wantedSvc, got) }) } } func TestApplyEnv_Image_HealthCheck(t *testing.T) { testCases := map[string]struct { inSvc func(svc *LoadBalancedWebService) wanted func(svc *LoadBalancedWebService) }{ "command appended": { inSvc: func(svc *LoadBalancedWebService) { svc.ImageConfig.HealthCheck = &ContainerHealthCheck{ Command: []string{"mock", "command"}, } svc.Environments["test"].ImageConfig.HealthCheck = &ContainerHealthCheck{ Command: []string{"mock", "command_test", "test"}, } }, wanted: func(svc *LoadBalancedWebService) { svc.ImageConfig.HealthCheck = &ContainerHealthCheck{ Command: []string{"mock", "command_test", "test"}, } }, }, "command overridden by zero slice": { inSvc: func(svc *LoadBalancedWebService) { svc.ImageConfig.HealthCheck = &ContainerHealthCheck{ Command: []string{"mock", "command"}, } svc.Environments["test"].ImageConfig.HealthCheck = &ContainerHealthCheck{ Command: []string{}, } }, wanted: func(svc *LoadBalancedWebService) { svc.ImageConfig.HealthCheck = &ContainerHealthCheck{ Command: []string{}, } }, }, //"FAILED TEST: command not overridden": { // inSvc: func(svc *LoadBalancedWebService) { // svc.ImageConfig.HealthCheck = &ContainerHealthCheck{ // Command: []string{"mock", "command"}, // } // svc.Environments["test"].ImageConfig.HealthCheck = &ContainerHealthCheck{} // }, // wanted: func(svc *LoadBalancedWebService) { // svc.ImageConfig.HealthCheck = &ContainerHealthCheck{ // Command: []string{"mock", "command"}, // } // }, //}, "interval overridden": { inSvc: func(svc *LoadBalancedWebService) { mockInterval := 600 * time.Second mockIntervalTest := 50 * time.Second svc.ImageConfig.HealthCheck = &ContainerHealthCheck{ Interval: &mockInterval, } svc.Environments["test"].ImageConfig.HealthCheck = &ContainerHealthCheck{ Interval: &mockIntervalTest, } }, wanted: func(svc *LoadBalancedWebService) { mockIntervalTest := 50 * time.Second svc.ImageConfig.HealthCheck = &ContainerHealthCheck{ Interval: &mockIntervalTest, } }, }, "interval explicitly overridden by empty value": { inSvc: func(svc *LoadBalancedWebService) { mockInterval := 600 * time.Second mockIntervalTest := 0 * time.Second svc.ImageConfig.HealthCheck = &ContainerHealthCheck{ Interval: &mockInterval, } svc.Environments["test"].ImageConfig.HealthCheck = &ContainerHealthCheck{ Interval: &mockIntervalTest, } }, wanted: func(svc *LoadBalancedWebService) { mockIntervalTest := 0 * time.Second svc.ImageConfig.HealthCheck = &ContainerHealthCheck{ Interval: &mockIntervalTest, } }, }, "interval not overridden": { inSvc: func(svc *LoadBalancedWebService) { mockInterval := 600 * time.Second svc.ImageConfig.HealthCheck = &ContainerHealthCheck{ Interval: &mockInterval, } svc.Environments["test"].ImageConfig.HealthCheck = &ContainerHealthCheck{} }, wanted: func(svc *LoadBalancedWebService) { mockIntervalTest := 600 * time.Second svc.ImageConfig.HealthCheck = &ContainerHealthCheck{ Interval: &mockIntervalTest, } }, }, "retries overridden": { inSvc: func(svc *LoadBalancedWebService) { svc.ImageConfig.HealthCheck = &ContainerHealthCheck{ Retries: aws.Int(13), } svc.Environments["test"].ImageConfig.HealthCheck = &ContainerHealthCheck{ Retries: aws.Int(42), } }, wanted: func(svc *LoadBalancedWebService) { svc.ImageConfig.HealthCheck = &ContainerHealthCheck{ Retries: aws.Int(42), } }, }, "retries explicitly overridden by empty value": { inSvc: func(svc *LoadBalancedWebService) { svc.ImageConfig.HealthCheck = &ContainerHealthCheck{ Retries: aws.Int(13), } svc.Environments["test"].ImageConfig.HealthCheck = &ContainerHealthCheck{ Retries: aws.Int(0), } }, wanted: func(svc *LoadBalancedWebService) { svc.ImageConfig.HealthCheck = &ContainerHealthCheck{ Retries: aws.Int(0), } }, }, "retries not overridden": { inSvc: func(svc *LoadBalancedWebService) { svc.ImageConfig.HealthCheck = &ContainerHealthCheck{ Retries: aws.Int(13), } svc.Environments["test"].ImageConfig.HealthCheck = &ContainerHealthCheck{} }, wanted: func(svc *LoadBalancedWebService) { svc.ImageConfig.HealthCheck = &ContainerHealthCheck{ Retries: aws.Int(13), } }, }, "timeout overridden": { inSvc: func(svc *LoadBalancedWebService) { mockTimeout := 60 * time.Second mockTimeoutTest := 400 * time.Second svc.ImageConfig.HealthCheck = &ContainerHealthCheck{ Timeout: &mockTimeout, } svc.Environments["test"].ImageConfig.HealthCheck = &ContainerHealthCheck{ Timeout: &mockTimeoutTest, } }, wanted: func(svc *LoadBalancedWebService) { mockTimeoutTest := 400 * time.Second svc.ImageConfig.HealthCheck = &ContainerHealthCheck{ Timeout: &mockTimeoutTest, } }, }, "timeout explicitly overridden by empty value": { inSvc: func(svc *LoadBalancedWebService) { mockTimeout := 60 * time.Second mockTimeoutTest := 0 * time.Second svc.ImageConfig.HealthCheck = &ContainerHealthCheck{ Timeout: &mockTimeout, } svc.Environments["test"].ImageConfig.HealthCheck = &ContainerHealthCheck{ Timeout: &mockTimeoutTest, } }, wanted: func(svc *LoadBalancedWebService) { mockTimeoutTest := 0 * time.Second svc.ImageConfig.HealthCheck = &ContainerHealthCheck{ Timeout: &mockTimeoutTest, } }, }, "timeout not overridden": { inSvc: func(svc *LoadBalancedWebService) { mockTimeout := 60 * time.Second svc.ImageConfig.HealthCheck = &ContainerHealthCheck{ Timeout: &mockTimeout, } svc.Environments["test"].ImageConfig.HealthCheck = &ContainerHealthCheck{} }, wanted: func(svc *LoadBalancedWebService) { mockTimeoutTest := 60 * time.Second svc.ImageConfig.HealthCheck = &ContainerHealthCheck{ Timeout: &mockTimeoutTest, } }, }, "start_period overridden": { inSvc: func(svc *LoadBalancedWebService) { mockStartPeriod := 10 * time.Second mockStartPeriodTest := 300 * time.Second svc.ImageConfig.HealthCheck = &ContainerHealthCheck{ StartPeriod: &mockStartPeriod, } svc.Environments["test"].ImageConfig.HealthCheck = &ContainerHealthCheck{ StartPeriod: &mockStartPeriodTest, } }, wanted: func(svc *LoadBalancedWebService) { mockStartPeriod := 300 * time.Second svc.ImageConfig.HealthCheck = &ContainerHealthCheck{ StartPeriod: &mockStartPeriod, } }, }, "start_period explicitly overridden by empty value": { inSvc: func(svc *LoadBalancedWebService) { mockStartPeriod := 10 * time.Second mockStartPeriodTest := 0 * time.Second svc.ImageConfig.HealthCheck = &ContainerHealthCheck{ StartPeriod: &mockStartPeriod, } svc.Environments["test"].ImageConfig.HealthCheck = &ContainerHealthCheck{ StartPeriod: &mockStartPeriodTest, } }, wanted: func(svc *LoadBalancedWebService) { mockStartPeriod := 0 * time.Second svc.ImageConfig.HealthCheck = &ContainerHealthCheck{ StartPeriod: &mockStartPeriod, } }, }, "start_period not overridden": { inSvc: func(svc *LoadBalancedWebService) { mockStartPeriod := 10 * time.Second svc.ImageConfig.HealthCheck = &ContainerHealthCheck{ StartPeriod: &mockStartPeriod, } svc.Environments["test"].ImageConfig.HealthCheck = &ContainerHealthCheck{} }, wanted: func(svc *LoadBalancedWebService) { mockStartPeriod := 10 * time.Second svc.ImageConfig.HealthCheck = &ContainerHealthCheck{ StartPeriod: &mockStartPeriod, } }, }, } for name, tc := range testCases { t.Run(name, func(t *testing.T) { var inSvc, wantedSvc LoadBalancedWebService inSvc.Environments = map[string]*LoadBalancedWebServiceConfig{ "test": {}, } tc.inSvc(&inSvc) tc.wanted(&wantedSvc) got, err := inSvc.ApplyEnv("test") require.NoError(t, err) require.Equal(t, &wantedSvc, got) }) } } func TestApplyEnv_Entrypoint(t *testing.T) { testCases := map[string]struct { inSvc func(svc *LoadBalancedWebService) wanted func(svc *LoadBalancedWebService) }{ "composite fields: string slice is overridden if string is not nil": { inSvc: func(svc *LoadBalancedWebService) { svc.EntryPoint = &EntryPointOverride{ StringSlice: []string{"mock", "entrypoint"}, } svc.Environments["test"].EntryPoint = &EntryPointOverride{ String: aws.String("mock entrypoint test"), } }, wanted: func(svc *LoadBalancedWebService) { svc.EntryPoint = &EntryPointOverride{ String: aws.String("mock entrypoint test"), } }, }, //"FAILED TEST: composite fields: string is overridden if string slice is not nil": { // inSvc: func(svc *LoadBalancedWebService) { // svc.EntryPoint = &EntryPointOverride{ // String: aws.String("mock entrypoint"), // } // svc.Environments["test"].EntryPoint = &EntryPointOverride{ // StringSlice: []string{"mock", "entrypoint_test", "test"}, // } // }, // wanted: func(svc *LoadBalancedWebService) { // svc.EntryPoint = &EntryPointOverride{ // StringSlice: []string{"mock", "entrypoint_test", "test"}, // } // }, //}, "string overridden": { inSvc: func(svc *LoadBalancedWebService) { svc.EntryPoint = &EntryPointOverride{ String: aws.String("mock entrypoint"), } svc.Environments["test"].EntryPoint = &EntryPointOverride{ String: aws.String("mock entrypoint test"), } }, wanted: func(svc *LoadBalancedWebService) { svc.EntryPoint = &EntryPointOverride{ String: aws.String("mock entrypoint test"), } }, }, "string explicitly overridden by empty value": { inSvc: func(svc *LoadBalancedWebService) { svc.EntryPoint = &EntryPointOverride{ String: aws.String("mock entrypoint"), } svc.Environments["test"].EntryPoint = &EntryPointOverride{ String: aws.String(""), } }, wanted: func(svc *LoadBalancedWebService) { svc.EntryPoint = &EntryPointOverride{ String: aws.String(""), } }, }, "string not overridden": { inSvc: func(svc *LoadBalancedWebService) { svc.EntryPoint = &EntryPointOverride{ String: aws.String("mock entrypoint"), } svc.Environments["test"].ImageOverride = ImageOverride{} }, wanted: func(svc *LoadBalancedWebService) { svc.EntryPoint = &EntryPointOverride{ String: aws.String("mock entrypoint"), } }, }, "string slice overridden": { inSvc: func(svc *LoadBalancedWebService) { svc.EntryPoint = &EntryPointOverride{ StringSlice: []string{"mock", "entrypoint"}, } svc.Environments["test"].EntryPoint = &EntryPointOverride{ StringSlice: []string{"mock", "entrypoint_test", "test"}, } }, wanted: func(svc *LoadBalancedWebService) { svc.EntryPoint = &EntryPointOverride{ StringSlice: []string{"mock", "entrypoint_test", "test"}, } }, }, "string slice explicitly overridden by zero slice": { inSvc: func(svc *LoadBalancedWebService) { svc.EntryPoint = &EntryPointOverride{ StringSlice: []string{"mock", "entrypoint"}, } svc.Environments["test"].EntryPoint = &EntryPointOverride{ StringSlice: []string{}, } }, wanted: func(svc *LoadBalancedWebService) { svc.EntryPoint = &EntryPointOverride{ StringSlice: []string{}, } }, }, "string slice not overridden": { inSvc: func(svc *LoadBalancedWebService) { svc.EntryPoint = &EntryPointOverride{ StringSlice: []string{"mock", "entrypoint"}, } svc.Environments["test"].ImageOverride = ImageOverride{} }, wanted: func(svc *LoadBalancedWebService) { svc.EntryPoint = &EntryPointOverride{ StringSlice: []string{"mock", "entrypoint"}, } }, }, } for name, tc := range testCases { t.Run(name, func(t *testing.T) { var inSvc, wantedSvc LoadBalancedWebService inSvc.Environments = map[string]*LoadBalancedWebServiceConfig{ "test": {}, } tc.inSvc(&inSvc) tc.wanted(&wantedSvc) got, err := inSvc.ApplyEnv("test") require.NoError(t, err) require.Equal(t, &wantedSvc, got) }) } } func TestApplyEnv_Command(t *testing.T) { testCases := map[string]struct { inSvc func(svc *LoadBalancedWebService) wanted func(svc *LoadBalancedWebService) }{ "composite fields: string slice is overridden if string is not nil": { inSvc: func(svc *LoadBalancedWebService) { svc.Command = &CommandOverride{ StringSlice: []string{"mock", "command"}, } svc.Environments["test"].Command = &CommandOverride{ String: aws.String("mock command test"), } }, wanted: func(svc *LoadBalancedWebService) { svc.Command = &CommandOverride{ String: aws.String("mock command test"), } }, }, //"FAILED TEST: composite fields: string is overridden if string slice is not nil": { // inSvc: func(svc *LoadBalancedWebService) { // svc.Command = &CommandOverride{ // String: aws.String("mock command"), // } // svc.Environments["test"].Command = &CommandOverride{ // StringSlice: []string{"mock", "command_test", "test"}, // } // }, // wanted: func(svc *LoadBalancedWebService) { // svc.Command = &CommandOverride{ // StringSlice: []string{"mock", "command_test", "test"}, // } // }, //}, "string overridden": { inSvc: func(svc *LoadBalancedWebService) { svc.Command = &CommandOverride{ String: aws.String("mock command"), } svc.Environments["test"].Command = &CommandOverride{ String: aws.String("mock command test"), } }, wanted: func(svc *LoadBalancedWebService) { svc.Command = &CommandOverride{ String: aws.String("mock command test"), } }, }, "string explicitly overridden by empty value": { inSvc: func(svc *LoadBalancedWebService) { svc.Command = &CommandOverride{ String: aws.String("mock command"), } svc.Environments["test"].Command = &CommandOverride{ String: aws.String(""), } }, wanted: func(svc *LoadBalancedWebService) { svc.Command = &CommandOverride{ String: aws.String(""), } }, }, "string not overridden": { inSvc: func(svc *LoadBalancedWebService) { svc.Command = &CommandOverride{ String: aws.String("mock command"), } svc.Environments["test"].ImageOverride = ImageOverride{} }, wanted: func(svc *LoadBalancedWebService) { svc.Command = &CommandOverride{ String: aws.String("mock command"), } }, }, "string slice overridden": { inSvc: func(svc *LoadBalancedWebService) { svc.Command = &CommandOverride{ StringSlice: []string{"mock", "command"}, } svc.Environments["test"].Command = &CommandOverride{ StringSlice: []string{"mock", "command_test", "test"}, } }, wanted: func(svc *LoadBalancedWebService) { svc.Command = &CommandOverride{ StringSlice: []string{"mock", "command_test", "test"}, } }, }, "string slice explicitly overridden by zero slice": { inSvc: func(svc *LoadBalancedWebService) { svc.Command = &CommandOverride{ StringSlice: []string{"mock", "command"}, } svc.Environments["test"].Command = &CommandOverride{ StringSlice: []string{}, } }, wanted: func(svc *LoadBalancedWebService) { svc.Command = &CommandOverride{ StringSlice: []string{}, } }, }, "string slice not overridden": { inSvc: func(svc *LoadBalancedWebService) { svc.Command = &CommandOverride{ StringSlice: []string{"mock", "command"}, } svc.Environments["test"].ImageOverride = ImageOverride{} }, wanted: func(svc *LoadBalancedWebService) { svc.Command = &CommandOverride{ StringSlice: []string{"mock", "command"}, } }, }, } for name, tc := range testCases { t.Run(name, func(t *testing.T) { var inSvc, wantedSvc LoadBalancedWebService inSvc.Environments = map[string]*LoadBalancedWebServiceConfig{ "test": {}, } tc.inSvc(&inSvc) tc.wanted(&wantedSvc) got, err := inSvc.ApplyEnv("test") require.NoError(t, err) require.Equal(t, &wantedSvc, got) }) } } func TestApplyEnv_Logging(t *testing.T) { testCases := map[string]struct { inSvc func(svc *LoadBalancedWebService) wanted func(svc *LoadBalancedWebService) }{ "image overridden": { inSvc: func(svc *LoadBalancedWebService) { svc.Logging = &Logging{ Image: aws.String("mockImage"), } svc.Environments["test"].Logging = &Logging{ Image: aws.String("mockImageTest"), } }, wanted: func(svc *LoadBalancedWebService) { svc.Logging = &Logging{ Image: aws.String("mockImageTest"), } }, }, "image explicitly overridden by empty value": { inSvc: func(svc *LoadBalancedWebService) { svc.Logging = &Logging{ Image: aws.String("mockImage"), } svc.Environments["test"].Logging = &Logging{ Image: aws.String(""), } }, wanted: func(svc *LoadBalancedWebService) { svc.Logging = &Logging{ Image: aws.String(""), } }, }, "image not overridden": { inSvc: func(svc *LoadBalancedWebService) { svc.Logging = &Logging{ Image: aws.String("mockImage"), } svc.Environments["test"].Logging = &Logging{} }, wanted: func(svc *LoadBalancedWebService) { svc.Logging = &Logging{ Image: aws.String("mockImage"), } }, }, "destination overridden": { inSvc: func(svc *LoadBalancedWebService) { svc.Logging = &Logging{ Destination: map[string]string{ "mockDestination1": "1", "mockDestination2": "2", }, } svc.Environments["test"].Logging = &Logging{ Destination: map[string]string{ "mockDestination1": "3", // Modify the value of mockDestination1. "mockDestination3": "3", // Append mockDestination3. }, } }, wanted: func(svc *LoadBalancedWebService) { svc.Logging = &Logging{ Destination: map[string]string{ "mockDestination1": "3", "mockDestination2": "2", "mockDestination3": "3", }, } }, }, "destination not overridden by empty map": { inSvc: func(svc *LoadBalancedWebService) { svc.Logging = &Logging{ Destination: map[string]string{ "mockDestination1": "1", "mockDestination2": "2", }, } svc.Environments["test"].Logging = &Logging{ Destination: map[string]string{}, } }, wanted: func(svc *LoadBalancedWebService) { svc.Logging = &Logging{ Destination: map[string]string{ "mockDestination1": "1", "mockDestination2": "2", }, } }, }, "destination not overridden": { inSvc: func(svc *LoadBalancedWebService) { svc.Logging = &Logging{ Destination: map[string]string{ "mockDestination1": "1", "mockDestination2": "2", }, } svc.Environments["test"].Logging = &Logging{} }, wanted: func(svc *LoadBalancedWebService) { svc.Logging = &Logging{ Destination: map[string]string{ "mockDestination1": "1", "mockDestination2": "2", }, } }, }, "enableMetadata overridden": { inSvc: func(svc *LoadBalancedWebService) { svc.Logging = &Logging{ EnableMetadata: aws.Bool(false), } svc.Environments["test"].Logging = &Logging{ EnableMetadata: aws.Bool(true), } }, wanted: func(svc *LoadBalancedWebService) { svc.Logging = &Logging{ EnableMetadata: aws.Bool(true), } }, }, "enableMetadata explicitly overridden by empty value": { inSvc: func(svc *LoadBalancedWebService) { svc.Logging = &Logging{ EnableMetadata: aws.Bool(true), } svc.Environments["test"].Logging = &Logging{ EnableMetadata: aws.Bool(false), } }, wanted: func(svc *LoadBalancedWebService) { svc.Logging = &Logging{ EnableMetadata: aws.Bool(false), } }, }, "enableMetadata not overridden": { inSvc: func(svc *LoadBalancedWebService) { svc.Logging = &Logging{ EnableMetadata: aws.Bool(true), } svc.Environments["test"].Logging = &Logging{} }, wanted: func(svc *LoadBalancedWebService) { svc.Logging = &Logging{ EnableMetadata: aws.Bool(true), } }, }, "secretOptions overridden": { inSvc: func(svc *LoadBalancedWebService) { svc.Logging = &Logging{ SecretOptions: map[string]string{ "mockSecretOption1": "1", "mockSecretOption2": "2", }, } svc.Environments["test"].Logging = &Logging{ SecretOptions: map[string]string{ "mockSecretOption1": "3", // Modify the value of mockSecretOption1. "mockSecretOption3": "3", // Append mockSecretOption3. }, } }, wanted: func(svc *LoadBalancedWebService) { svc.Logging = &Logging{ SecretOptions: map[string]string{ "mockSecretOption1": "3", "mockSecretOption2": "2", "mockSecretOption3": "3", }, } }, }, "secretOptions not overridden by empty map": { inSvc: func(svc *LoadBalancedWebService) { svc.Logging = &Logging{ SecretOptions: map[string]string{ "mockSecretOption1": "1", "mockSecretOption2": "2", }, } svc.Environments["test"].Logging = &Logging{ SecretOptions: map[string]string{}, } }, wanted: func(svc *LoadBalancedWebService) { svc.Logging = &Logging{ SecretOptions: map[string]string{ "mockSecretOption1": "1", "mockSecretOption2": "2", }, } }, }, "secretOptions not overridden": { inSvc: func(svc *LoadBalancedWebService) { svc.Logging = &Logging{ SecretOptions: map[string]string{ "mockSecretOption1": "1", "mockSecretOption2": "2", }, } svc.Environments["test"].Logging = &Logging{} }, wanted: func(svc *LoadBalancedWebService) { svc.Logging = &Logging{ SecretOptions: map[string]string{ "mockSecretOption1": "1", "mockSecretOption2": "2", }, } }, }, "configFilePath overridden": { inSvc: func(svc *LoadBalancedWebService) { svc.Logging = &Logging{ ConfigFile: aws.String("mockPath"), } svc.Environments["test"].Logging = &Logging{ ConfigFile: aws.String("mockPathTest"), } }, wanted: func(svc *LoadBalancedWebService) { svc.Logging = &Logging{ ConfigFile: aws.String("mockPathTest"), } }, }, "configFilePath explicitly overridden by empty value": { inSvc: func(svc *LoadBalancedWebService) { svc.Logging = &Logging{ ConfigFile: aws.String("mockPath"), } svc.Environments["test"].Logging = &Logging{ ConfigFile: aws.String(""), } }, wanted: func(svc *LoadBalancedWebService) { svc.Logging = &Logging{ ConfigFile: aws.String(""), } }, }, "configFilePath not overridden": { inSvc: func(svc *LoadBalancedWebService) { svc.Logging = &Logging{ ConfigFile: aws.String("mockPath"), } svc.Environments["test"].Logging = &Logging{} }, wanted: func(svc *LoadBalancedWebService) { svc.Logging = &Logging{ ConfigFile: aws.String("mockPath"), } }, }, } for name, tc := range testCases { t.Run(name, func(t *testing.T) { var inSvc, wantedSvc LoadBalancedWebService inSvc.Environments = map[string]*LoadBalancedWebServiceConfig{ "test": {}, } tc.inSvc(&inSvc) tc.wanted(&wantedSvc) got, err := inSvc.ApplyEnv("test") require.NoError(t, err) require.Equal(t, &wantedSvc, got) }) } } func TestApplyEnv_Network(t *testing.T) { testCases := map[string]struct { inSvc func(svc *LoadBalancedWebService) wanted func(svc *LoadBalancedWebService) }{ "vpc overridden": { inSvc: func(svc *LoadBalancedWebService) { svc.Network = &NetworkConfig{ VPC: &vpcConfig{ Placement: aws.String("mockPlacement"), }, } svc.Environments["test"].Network = &NetworkConfig{ VPC: &vpcConfig{ Placement: aws.String("mockPlacementTest"), SecurityGroups: []string{"mock", "security", "group"}, }, } }, wanted: func(svc *LoadBalancedWebService) { svc.Network = &NetworkConfig{ VPC: &vpcConfig{ Placement: aws.String("mockPlacementTest"), SecurityGroups: []string{"mock", "security", "group"}, }, } }, }, "vpc not overridden": { inSvc: func(svc *LoadBalancedWebService) { svc.Network = &NetworkConfig{ VPC: &vpcConfig{ Placement: aws.String("mockPlacement"), }, } svc.Environments["test"].Network = &NetworkConfig{} }, wanted: func(svc *LoadBalancedWebService) { svc.Network = &NetworkConfig{ VPC: &vpcConfig{ Placement: aws.String("mockPlacement"), }, } }, }, } for name, tc := range testCases { t.Run(name, func(t *testing.T) { var inSvc, wantedSvc LoadBalancedWebService inSvc.Environments = map[string]*LoadBalancedWebServiceConfig{ "test": {}, } tc.inSvc(&inSvc) tc.wanted(&wantedSvc) got, err := inSvc.ApplyEnv("test") require.NoError(t, err) require.Equal(t, &wantedSvc, got) }) } } func TestApplyEnv_Network_VPC(t *testing.T) { testCases := map[string]struct { inSvc func(svc *LoadBalancedWebService) wanted func(svc *LoadBalancedWebService) }{ "placement overridden": { inSvc: func(svc *LoadBalancedWebService) { svc.Network = &NetworkConfig{ VPC: &vpcConfig{ Placement: aws.String("mockPlacement"), }, } svc.Environments["test"].Network = &NetworkConfig{ VPC: &vpcConfig{ Placement: aws.String("mockPlacementTest"), }, } }, wanted: func(svc *LoadBalancedWebService) { svc.Network = &NetworkConfig{ VPC: &vpcConfig{ Placement: aws.String("mockPlacementTest"), }, } }, }, "placement explicitly overridden by empty value": { inSvc: func(svc *LoadBalancedWebService) { svc.Network = &NetworkConfig{ VPC: &vpcConfig{ Placement: aws.String("mockPlacement"), }, } svc.Environments["test"].Network = &NetworkConfig{ VPC: &vpcConfig{ Placement: aws.String(""), }, } }, wanted: func(svc *LoadBalancedWebService) { svc.Network = &NetworkConfig{ VPC: &vpcConfig{ Placement: aws.String(""), }, } }, }, "placement not overridden": { inSvc: func(svc *LoadBalancedWebService) { svc.Network = &NetworkConfig{ VPC: &vpcConfig{ Placement: aws.String("mockPlacement"), }, } svc.Environments["test"].Network = &NetworkConfig{ VPC: &vpcConfig{}, } }, wanted: func(svc *LoadBalancedWebService) { svc.Network = &NetworkConfig{ VPC: &vpcConfig{ Placement: aws.String("mockPlacement"), }, } }, }, "security_groups overridden": { inSvc: func(svc *LoadBalancedWebService) { svc.Network = &NetworkConfig{ VPC: &vpcConfig{ SecurityGroups: []string{"mock", "security_group"}, }, } svc.Environments["test"].Network = &NetworkConfig{ VPC: &vpcConfig{ SecurityGroups: []string{"mock", "security_group_test", "test"}, }, } }, wanted: func(svc *LoadBalancedWebService) { svc.Network = &NetworkConfig{ VPC: &vpcConfig{ SecurityGroups: []string{"mock", "security_group_test", "test"}, }, } }, }, "security_groups overridden by zero slice": { inSvc: func(svc *LoadBalancedWebService) { svc.Network = &NetworkConfig{ VPC: &vpcConfig{ SecurityGroups: []string{"mock", "security_group"}, }, } svc.Environments["test"].Network = &NetworkConfig{ VPC: &vpcConfig{ SecurityGroups: []string{}, }, } }, wanted: func(svc *LoadBalancedWebService) { svc.Network = &NetworkConfig{ VPC: &vpcConfig{ SecurityGroups: []string{}, }, } }, }, //"FAILED TEST: security_groups not overridden": { // inSvc: func(svc *LoadBalancedWebService) { // svc.Network = &NetworkConfig{ // VPC: &vpcConfig{ // SecurityGroups: []string{"mock", "security_group"}, // }, // } // svc.Environments["test"].Network = &NetworkConfig{ // VPC: &vpcConfig{}, // } // }, // wanted: func(svc *LoadBalancedWebService) { // svc.Network = &NetworkConfig{ // VPC: &vpcConfig{ // SecurityGroups: []string{"mock", "security_group"}, // }, // } // }, //}, } for name, tc := range testCases { t.Run(name, func(t *testing.T) { var inSvc, wantedSvc LoadBalancedWebService inSvc.Environments = map[string]*LoadBalancedWebServiceConfig{ "test": {}, } tc.inSvc(&inSvc) tc.wanted(&wantedSvc) got, err := inSvc.ApplyEnv("test") require.NoError(t, err) require.Equal(t, &wantedSvc, got) }) } }
1
18,808
Can we remove this ?
aws-copilot-cli
go
@@ -0,0 +1,7 @@ +class NewLanguageConfirmationsController < ApplicationController + def index + redirect_to welcome_to_upcase_path( + confirmation: true, language_selected: params[:language], + ), notice: "Thanks for signing up. We will be in touch!" + end +end
1
1
18,254
Put a comma after the last parameter of a multiline method call.
thoughtbot-upcase
rb
@@ -48,12 +48,13 @@ func TestBlockDAO(t *testing.T) { tsf3, err := testutil.SignedTransfer(testaddress.Addrinfo["charlie"].String(), testaddress.Keyinfo["charlie"].PriKey, 3, big.NewInt(int64(amount)), nil, genesis.Default.ActionGasLimit, big.NewInt(0)) require.NoError(t, err) - // create testing votes - vote1, err := testutil.SignedVote(testaddress.Addrinfo["alfa"].String(), testaddress.Keyinfo["alfa"].PriKey, 1, 100000, big.NewInt(10)) + tsf4, err := testutil.SignedTransfer(testaddress.Addrinfo["alfa"].String(), testaddress.Keyinfo["alfa"].PriKey, 2, big.NewInt(int64(amount)), nil, genesis.Default.ActionGasLimit, big.NewInt(0)) require.NoError(t, err) - vote2, err := testutil.SignedVote(testaddress.Addrinfo["bravo"].String(), testaddress.Keyinfo["bravo"].PriKey, 1, 100000, big.NewInt(10)) + + tsf5, err := testutil.SignedTransfer(testaddress.Addrinfo["bravo"].String(), testaddress.Keyinfo["bravo"].PriKey, 3, big.NewInt(int64(amount)), nil, genesis.Default.ActionGasLimit, big.NewInt(0)) require.NoError(t, err) - vote3, err := testutil.SignedVote(testaddress.Addrinfo["charlie"].String(), testaddress.Keyinfo["charlie"].PriKey, 1, 100000, big.NewInt(10)) + + tsf6, err := testutil.SignedTransfer(testaddress.Addrinfo["charlie"].String(), testaddress.Keyinfo["charlie"].PriKey, 4, big.NewInt(int64(amount)), nil, genesis.Default.ActionGasLimit, big.NewInt(0)) require.NoError(t, err) // create testing executions
1
// Copyright (c) 2018 IoTeX // This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no // warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent // permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache // License 2.0 that can be found in the LICENSE file. package blockchain import ( "context" "fmt" "hash/fnv" "io/ioutil" "math/big" "math/rand" "os" "path/filepath" "testing" "time" "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/iotexproject/iotex-core/action" "github.com/iotexproject/iotex-core/blockchain/block" "github.com/iotexproject/iotex-core/blockchain/genesis" "github.com/iotexproject/iotex-core/config" "github.com/iotexproject/iotex-core/db" "github.com/iotexproject/iotex-core/pkg/hash" "github.com/iotexproject/iotex-core/pkg/unit" "github.com/iotexproject/iotex-core/pkg/util/fileutil" "github.com/iotexproject/iotex-core/test/identityset" "github.com/iotexproject/iotex-core/test/testaddress" "github.com/iotexproject/iotex-core/testutil" ) func TestBlockDAO(t *testing.T) { getBlocks := func() []*block.Block { amount := uint64(50 << 22) tsf1, err := testutil.SignedTransfer(testaddress.Addrinfo["alfa"].String(), testaddress.Keyinfo["alfa"].PriKey, 1, big.NewInt(int64(amount)), nil, genesis.Default.ActionGasLimit, big.NewInt(0)) require.NoError(t, err) tsf2, err := testutil.SignedTransfer(testaddress.Addrinfo["bravo"].String(), testaddress.Keyinfo["bravo"].PriKey, 2, big.NewInt(int64(amount)), nil, genesis.Default.ActionGasLimit, big.NewInt(0)) require.NoError(t, err) tsf3, err := testutil.SignedTransfer(testaddress.Addrinfo["charlie"].String(), testaddress.Keyinfo["charlie"].PriKey, 3, big.NewInt(int64(amount)), nil, genesis.Default.ActionGasLimit, big.NewInt(0)) require.NoError(t, err) // create testing votes vote1, err := testutil.SignedVote(testaddress.Addrinfo["alfa"].String(), testaddress.Keyinfo["alfa"].PriKey, 1, 100000, big.NewInt(10)) require.NoError(t, err) vote2, err := testutil.SignedVote(testaddress.Addrinfo["bravo"].String(), testaddress.Keyinfo["bravo"].PriKey, 1, 100000, big.NewInt(10)) require.NoError(t, err) vote3, err := testutil.SignedVote(testaddress.Addrinfo["charlie"].String(), testaddress.Keyinfo["charlie"].PriKey, 1, 100000, big.NewInt(10)) require.NoError(t, err) // create testing executions execution1, err := testutil.SignedExecution(testaddress.Addrinfo["delta"].String(), testaddress.Keyinfo["alfa"].PriKey, 1, big.NewInt(1), 0, big.NewInt(0), nil) require.NoError(t, err) execution2, err := testutil.SignedExecution(testaddress.Addrinfo["delta"].String(), testaddress.Keyinfo["bravo"].PriKey, 2, big.NewInt(0), 0, big.NewInt(0), nil) require.NoError(t, err) execution3, err := testutil.SignedExecution(testaddress.Addrinfo["delta"].String(), testaddress.Keyinfo["charlie"].PriKey, 3, big.NewInt(2), 0, big.NewInt(0), nil) require.NoError(t, err) // create testing create deposit actions deposit1 := action.NewCreateDeposit( 4, 2, big.NewInt(1), testaddress.Addrinfo["delta"].String(), testutil.TestGasLimit, big.NewInt(0), ) bd := &action.EnvelopeBuilder{} elp := bd.SetNonce(4). SetGasLimit(testutil.TestGasLimit). SetAction(deposit1).Build() sdeposit1, err := action.Sign(elp, testaddress.Keyinfo["alfa"].PriKey) require.NoError(t, err) deposit2 := action.NewCreateDeposit( 5, 2, big.NewInt(2), testaddress.Addrinfo["delta"].String(), testutil.TestGasLimit, big.NewInt(0), ) bd = &action.EnvelopeBuilder{} elp = bd.SetNonce(5). SetGasLimit(testutil.TestGasLimit). SetAction(deposit2).Build() sdeposit2, err := action.Sign(elp, testaddress.Keyinfo["bravo"].PriKey) require.NoError(t, err) deposit3 := action.NewCreateDeposit( 6, 2, big.NewInt(3), testaddress.Addrinfo["delta"].String(), testutil.TestGasLimit, big.NewInt(0), ) bd = &action.EnvelopeBuilder{} elp = bd.SetNonce(6). SetGasLimit(testutil.TestGasLimit). SetAction(deposit3).Build() sdeposit3, err := action.Sign(elp, testaddress.Keyinfo["charlie"].PriKey) require.NoError(t, err) hash1 := hash.Hash256{} fnv.New32().Sum(hash1[:]) blk1, err := block.NewTestingBuilder(). SetHeight(1). SetPrevBlockHash(hash1). SetTimeStamp(testutil.TimestampNow()). AddActions(tsf1, vote1, execution1, sdeposit1). SignAndBuild(testaddress.Keyinfo["producer"].PubKey, testaddress.Keyinfo["producer"].PriKey) require.NoError(t, err) hash2 := hash.Hash256{} fnv.New32().Sum(hash2[:]) blk2, err := block.NewTestingBuilder(). SetHeight(2). SetPrevBlockHash(hash2). SetTimeStamp(testutil.TimestampNow()). AddActions(tsf2, vote2, execution2, sdeposit2). SignAndBuild(testaddress.Keyinfo["producer"].PubKey, testaddress.Keyinfo["producer"].PriKey) require.NoError(t, err) hash3 := hash.Hash256{} fnv.New32().Sum(hash3[:]) blk3, err := block.NewTestingBuilder(). SetHeight(3). SetPrevBlockHash(hash3). SetTimeStamp(testutil.TimestampNow()). AddActions(tsf3, vote3, execution3, sdeposit3). SignAndBuild(testaddress.Keyinfo["producer"].PubKey, testaddress.Keyinfo["producer"].PriKey) require.NoError(t, err) return []*block.Block{&blk1, &blk2, &blk3} } blks := getBlocks() assert.Equal(t, 3, len(blks)) testBlockDao := func(kvstore db.KVStore, t *testing.T) { ctx := context.Background() dao := newBlockDAO(kvstore, false, false, 0) err := dao.Start(ctx) assert.Nil(t, err) defer func() { err = dao.Stop(ctx) assert.Nil(t, err) }() height, err := dao.getBlockchainHeight() assert.Nil(t, err) assert.Equal(t, uint64(0), height) // block put order is 0 2 1 err = dao.putBlock(blks[0]) assert.Nil(t, err) blk, err := dao.getBlock(blks[0].HashBlock()) assert.Nil(t, err) require.NotNil(t, blk) assert.Equal(t, blks[0].Actions[0].Hash(), blk.Actions[0].Hash()) height, err = dao.getBlockchainHeight() assert.Nil(t, err) assert.Equal(t, uint64(1), height) err = dao.putBlock(blks[2]) assert.Nil(t, err) blk, err = dao.getBlock(blks[2].HashBlock()) assert.Nil(t, err) assert.NotNil(t, blk) assert.Equal(t, blks[2].Actions[0].Hash(), blk.Actions[0].Hash()) height, err = dao.getBlockchainHeight() assert.Nil(t, err) assert.Equal(t, uint64(3), height) err = dao.putBlock(blks[1]) assert.Nil(t, err) blk, err = dao.getBlock(blks[1].HashBlock()) assert.Nil(t, err) assert.NotNil(t, blk) assert.Equal(t, blks[1].Actions[0].Hash(), blk.Actions[0].Hash()) height, err = dao.getBlockchainHeight() assert.Nil(t, err) assert.Equal(t, uint64(3), height) // test getting hash by height hash, err := dao.getBlockHash(1) assert.Nil(t, err) assert.Equal(t, blks[0].HashBlock(), hash) hash, err = dao.getBlockHash(2) assert.Nil(t, err) assert.Equal(t, blks[1].HashBlock(), hash) hash, err = dao.getBlockHash(3) assert.Nil(t, err) assert.Equal(t, blks[2].HashBlock(), hash) // test getting height by hash height, err = dao.getBlockHeight(blks[0].HashBlock()) assert.Nil(t, err) assert.Equal(t, blks[0].Height(), height) height, err = dao.getBlockHeight(blks[1].HashBlock()) assert.Nil(t, err) assert.Equal(t, blks[1].Height(), height) height, err = dao.getBlockHeight(blks[2].HashBlock()) assert.Nil(t, err) assert.Equal(t, blks[2].Height(), height) } testActionsDao := func(kvstore db.KVStore, t *testing.T) { ctx := context.Background() dao := newBlockDAO(kvstore, true, false, 0) err := dao.Start(ctx) assert.Nil(t, err) defer func() { err = dao.Stop(ctx) assert.Nil(t, err) }() err = dao.putBlock(blks[0]) assert.Nil(t, err) err = dao.putBlock(blks[1]) assert.Nil(t, err) err = dao.putBlock(blks[2]) depositHash1 := blks[0].Actions[3].Hash() depositHash2 := blks[1].Actions[3].Hash() depositHash3 := blks[2].Actions[3].Hash() blkHash1 := blks[0].HashBlock() blkHash2 := blks[1].HashBlock() blkHash3 := blks[2].HashBlock() // Test getBlockHashByActionHash blkHash, err := getBlockHashByActionHash(dao.kvstore, depositHash1) require.NoError(t, err) require.Equal(t, blkHash1, blkHash) blkHash, err = getBlockHashByActionHash(dao.kvstore, depositHash2) require.NoError(t, err) require.Equal(t, blkHash2, blkHash) blkHash, err = getBlockHashByActionHash(dao.kvstore, depositHash3) require.NoError(t, err) require.Equal(t, blkHash3, blkHash) // Test get actions senderActionCount, err := getActionCountBySenderAddress(dao.kvstore, hash.BytesToHash160(testaddress.Addrinfo["alfa"].Bytes())) require.NoError(t, err) require.Equal(t, uint64(4), senderActionCount) senderActions, err := getActionsBySenderAddress(dao.kvstore, hash.BytesToHash160(testaddress.Addrinfo["alfa"].Bytes())) require.NoError(t, err) require.Equal(t, 4, len(senderActions)) require.Equal(t, depositHash1, senderActions[3]) recipientActionCount, err := getActionCountByRecipientAddress(dao.kvstore, hash.BytesToHash160(testaddress.Addrinfo["alfa"].Bytes())) require.NoError(t, err) require.Equal(t, uint64(2), recipientActionCount) recipientActions, err := getActionsByRecipientAddress(dao.kvstore, hash.BytesToHash160(testaddress.Addrinfo["alfa"].Bytes())) require.NoError(t, err) require.Equal(t, 2, len(recipientActions)) senderActionCount, err = getActionCountBySenderAddress(dao.kvstore, hash.BytesToHash160(testaddress.Addrinfo["bravo"].Bytes())) require.NoError(t, err) require.Equal(t, uint64(4), senderActionCount) senderActions, err = getActionsBySenderAddress(dao.kvstore, hash.BytesToHash160(testaddress.Addrinfo["bravo"].Bytes())) require.NoError(t, err) require.Equal(t, 4, len(senderActions)) require.Equal(t, depositHash2, senderActions[3]) recipientActionCount, err = getActionCountByRecipientAddress(dao.kvstore, hash.BytesToHash160(testaddress.Addrinfo["bravo"].Bytes())) require.NoError(t, err) require.Equal(t, uint64(2), recipientActionCount) recipientActions, err = getActionsByRecipientAddress(dao.kvstore, hash.BytesToHash160(testaddress.Addrinfo["bravo"].Bytes())) require.NoError(t, err) require.Equal(t, 2, len(recipientActions)) senderActionCount, err = getActionCountBySenderAddress(dao.kvstore, hash.BytesToHash160(testaddress.Addrinfo["charlie"].Bytes())) require.NoError(t, err) require.Equal(t, uint64(4), senderActionCount) senderActions, err = getActionsBySenderAddress(dao.kvstore, hash.BytesToHash160(testaddress.Addrinfo["charlie"].Bytes())) require.NoError(t, err) require.Equal(t, 4, len(senderActions)) require.Equal(t, depositHash3, senderActions[3]) recipientActionCount, err = getActionCountByRecipientAddress(dao.kvstore, hash.BytesToHash160(testaddress.Addrinfo["charlie"].Bytes())) require.NoError(t, err) require.Equal(t, uint64(2), recipientActionCount) recipientActions, err = getActionsByRecipientAddress(dao.kvstore, hash.BytesToHash160(testaddress.Addrinfo["charlie"].Bytes())) require.NoError(t, err) require.Equal(t, 2, len(recipientActions)) recipientActionCount, err = getActionCountByRecipientAddress(dao.kvstore, hash.BytesToHash160(testaddress.Addrinfo["delta"].Bytes())) require.NoError(t, err) require.Equal(t, uint64(6), recipientActionCount) recipientActions, err = getActionsByRecipientAddress(dao.kvstore, hash.BytesToHash160(testaddress.Addrinfo["delta"].Bytes())) require.NoError(t, err) require.Equal(t, 6, len(recipientActions)) require.Equal(t, depositHash1, recipientActions[1]) require.Equal(t, depositHash2, recipientActions[3]) require.Equal(t, depositHash3, recipientActions[5]) } testDeleteDao := func(kvstore db.KVStore, t *testing.T) { require := require.New(t) ctx := context.Background() dao := newBlockDAO(kvstore, true, false, 0) err := dao.Start(ctx) require.NoError(err) defer func() { err = dao.Stop(ctx) assert.Nil(t, err) }() // Put blocks first err = dao.putBlock(blks[0]) require.NoError(err) err = dao.putBlock(blks[1]) require.NoError(err) err = dao.putBlock(blks[2]) require.NoError(err) tipHeight, err := dao.getBlockchainHeight() require.NoError(err) require.Equal(uint64(3), tipHeight) blk, err := dao.getBlock(blks[2].HashBlock()) require.NoError(err) require.NotNil(blk) // Delete tip block err = dao.deleteTipBlock() require.NoError(err) tipHeight, err = dao.getBlockchainHeight() require.NoError(err) require.Equal(uint64(2), tipHeight) blk, err = dao.getBlock(blks[2].HashBlock()) require.Equal(db.ErrNotExist, errors.Cause(err)) require.Nil(blk) } t.Run("In-memory KV Store for blocks", func(t *testing.T) { testBlockDao(db.NewMemKVStore(), t) }) path := "test-kv-store" testFile, _ := ioutil.TempFile(os.TempDir(), path) testPath := testFile.Name() cfg := config.Default.DB cfg.DbPath = testPath t.Run("Bolt DB for blocks", func(t *testing.T) { testBlockDao(db.NewOnDiskDB(cfg), t) }) t.Run("In-memory KV Store for actions", func(t *testing.T) { testActionsDao(db.NewMemKVStore(), t) }) t.Run("Bolt DB for actions", func(t *testing.T) { testActionsDao(db.NewOnDiskDB(cfg), t) }) t.Run("In-memory KV Store deletions", func(t *testing.T) { testDeleteDao(db.NewMemKVStore(), t) }) t.Run("Bolt DB deletions", func(t *testing.T) { testDeleteDao(db.NewOnDiskDB(cfg), t) }) } func TestBlockDao_putReceipts(t *testing.T) { blkDao := newBlockDAO(db.NewMemKVStore(), true, false, 0) receipts := []*action.Receipt{ { BlockHeight: 1, ActionHash: hash.Hash256b([]byte("1")), Status: 1, GasConsumed: 1, ContractAddress: "1", Logs: []*action.Log{}, }, { BlockHeight: 1, ActionHash: hash.Hash256b([]byte("1")), Status: 2, GasConsumed: 2, ContractAddress: "2", Logs: []*action.Log{}, }, } require.NoError(t, blkDao.putReceipts(1, receipts)) for _, receipt := range receipts { r, err := blkDao.getReceiptByActionHash(receipt.ActionHash) require.NoError(t, err) assert.Equal(t, receipt.ActionHash, r.ActionHash) } } func BenchmarkBlockCache(b *testing.B) { test := func(cacheSize int, b *testing.B) { b.StopTimer() path := filepath.Join(os.TempDir(), fmt.Sprintf("test-%d.db", rand.Int())) cfg := config.DB{ DbPath: path, NumRetries: 1, } defer func() { if !fileutil.FileExists(path) { return } require.NoError(b, os.RemoveAll(path)) }() store := db.NewOnDiskDB(cfg) blkDao := newBlockDAO(store, false, false, cacheSize) require.NoError(b, blkDao.Start(context.Background())) defer func() { require.NoError(b, blkDao.Stop(context.Background())) }() prevHash := hash.ZeroHash256 var err error numBlks := 8640 for i := 1; i <= numBlks; i++ { actions := make([]action.SealedEnvelope, 10) for j := 0; j < 10; j++ { actions[j], err = testutil.SignedTransfer( identityset.Address(j).String(), identityset.PrivateKey(j+1), 1, unit.ConvertIotxToRau(1), nil, testutil.TestGasLimit, testutil.TestGasPrice, ) require.NoError(b, err) } tb := block.TestingBuilder{} blk, err := tb.SetPrevBlockHash(prevHash). SetVersion(1). SetTimeStamp(time.Now()). SetHeight(uint64(i)). AddActions(actions...). SignAndBuild(identityset.PrivateKey(0).PublicKey(), identityset.PrivateKey(0)) require.NoError(b, err) require.NoError(b, blkDao.putBlock(&blk)) prevHash = blk.HashBlock() } b.ResetTimer() b.StartTimer() for n := 0; n < b.N; n++ { hash, _ := blkDao.getBlockHash(uint64(rand.Intn(numBlks) + 1)) _, _ = blkDao.getBlock(hash) } b.StopTimer() } b.Run("cache", func(b *testing.B) { test(8640, b) }) b.Run("no-cache", func(b *testing.B) { test(0, b) }) }
1
17,582
line is 195 characters (from `lll`)
iotexproject-iotex-core
go
@@ -29,8 +29,11 @@ import ( "github.com/pkg/errors" k8serror "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + klabels "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/tools/cache" + "k8s.io/kubernetes/pkg/util/slice" ) var (
1
/* Copyright 2018 The OpenEBS Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package spc import ( "fmt" "strings" "time" "github.com/golang/glog" apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1" openebs "github.com/openebs/maya/pkg/client/generated/clientset/versioned" env "github.com/openebs/maya/pkg/env/v1alpha1" spcv1alpha1 "github.com/openebs/maya/pkg/storagepoolclaim/v1alpha1" "github.com/pkg/errors" k8serror "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/tools/cache" ) var ( // supportedPool is a map holding the supported raid configurations. supportedPool = map[apis.CasPoolValString]bool{ apis.PoolTypeStripedCPV: true, apis.PoolTypeMirroredCPV: true, apis.PoolTypeRaidzCPV: true, apis.PoolTypeRaidz2CPV: true, } ) const ( // DiskStateActive is the active state of the disk DiskStateActive = "Active" // ProvisioningTypeManual is the manual type of provisioning pool ProvisioningTypeManual = "manual" ) type clientSet struct { oecs openebs.Interface } // syncHandler compares the actual state with the desired, and attempts to // converge the two. It then updates the Status block of the spcPoolUpdated resource // with the current status of the resource. func (c *Controller) syncHandler(key string) error { startTime := time.Now() glog.V(4).Infof("Started syncing storagepoolclaim %q (%v)", key, startTime) defer func() { glog.V(4).Infof("Finished syncing storagepoolclaim %q (%v)", key, time.Since(startTime)) }() // Convert the namespace/name string into a distinct namespace and name _, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { runtime.HandleError(fmt.Errorf("invalid resource key: %s", key)) return nil } // Get the spc resource with this namespace/name spc, err := c.spcLister.Get(name) if k8serror.IsNotFound(err) { runtime.HandleError(fmt.Errorf("spc '%s' has been deleted", key)) return nil } if err != nil { return err } // Deep-copy otherwise we are mutating our cache. // TODO: Deep-copy only when needed. spcGot := spc.DeepCopy() err = c.syncSpc(spcGot) return err } // enqueueSpc takes a SPC resource and converts it into a namespace/name // string which is then put onto the work queue. This method should *not* be // passed resources of any type other than SPC. func (c *Controller) enqueueSpc(spc interface{}) { var key string var err error if key, err = cache.MetaNamespaceKeyFunc(spc); err != nil { runtime.HandleError(err) return } c.workqueue.Add(key) } // synSpc is the function which tries to converge to a desired state for the spc. func (c *Controller) syncSpc(spc *apis.StoragePoolClaim) error { err := validate(spc) if err != nil { glog.Errorf("Validation of spc failed:%s", err) return nil } pendingPoolCount, err := c.getPendingPoolCount(spc) if err != nil { return err } if pendingPoolCount > 0 { err = c.create(pendingPoolCount, spc) if err != nil { return err } } return nil } // create is a wrapper function that calls the actual function to create pool as many time // as the number of pools need to be created. func (c *Controller) create(pendingPoolCount int, spc *apis.StoragePoolClaim) error { var newSpcLease Leaser newSpcLease = &Lease{spc, SpcLeaseKey, c.clientset, c.kubeclientset} err := newSpcLease.Hold() if err != nil { return errors.Wrapf(err, "Could not acquire lease on spc object") } glog.V(4).Infof("Lease acquired successfully on storagepoolclaim %s ", spc.Name) defer newSpcLease.Release() for poolCount := 1; poolCount <= pendingPoolCount; poolCount++ { glog.Infof("Provisioning pool %d/%d for storagepoolclaim %s", poolCount, pendingPoolCount, spc.Name) err = c.CreateStoragePool(spc) if err != nil { runtime.HandleError(errors.Wrapf(err, "Pool provisioning failed for %d/%d for storagepoolclaim %s", poolCount, pendingPoolCount, spc.Name)) } } return nil } // validate validates the spc configuration before creation of pool. func validate(spc *apis.StoragePoolClaim) error { for _, v := range validateFuncList { err := v(spc) if err != nil { return err } } return nil } // validateFunc is typed function for spc validation functions. type validateFunc func(*apis.StoragePoolClaim) error // validateFuncList holds a list of validate functions for spc var validateFuncList = []validateFunc{ validatePoolType, validateDiskType, validateAutoSpcMaxPool, } // validatePoolType validates pool type in spc. func validatePoolType(spc *apis.StoragePoolClaim) error { poolType := spc.Spec.PoolSpec.PoolType ok := supportedPool[apis.CasPoolValString(poolType)] if !ok { return errors.Errorf("aborting storagepool create operation as specified poolType is '%s' which is invalid", poolType) } return nil } // validateDiskType validates the disk types in spc. func validateDiskType(spc *apis.StoragePoolClaim) error { diskType := spc.Spec.Type if !spcv1alpha1.SupportedDiskTypes[apis.CasPoolValString(diskType)] { return errors.Errorf("aborting storagepool create operation as specified type is %s which is invalid", diskType) } return nil } // validateAutoSpcMaxPool validates the max pool count in auto spc func validateAutoSpcMaxPool(spc *apis.StoragePoolClaim) error { if isAutoProvisioning(spc) { maxPools := spc.Spec.MaxPools if maxPools == nil { return errors.Errorf("validation of spc object is failed as no max pool field present in spc %s", spc.Name) } if *maxPools < 0 { return errors.Errorf("aborting storagepool create operation for %s as invalid maxPool value %d", spc.Name, maxPools) } } return nil } // getCurrentPoolCount give the current pool count for the given auto provisioned spc. func (c *Controller) getCurrentPoolCount(spc *apis.StoragePoolClaim) (int, error) { // Get the current count of provisioned pool for the storagepool claim cspList, err := c.clientset.OpenebsV1alpha1().CStorPools().List(metav1.ListOptions{LabelSelector: string(apis.StoragePoolClaimCPK) + "=" + spc.Name}) if err != nil { return 0, errors.Errorf("unable to get current pool count:unable to list cstor pools: %v", err) } return len(cspList.Items), nil } // isPoolPending tells whether some pool is pending to be created. func (c *Controller) isPoolPending(spc *apis.StoragePoolClaim) bool { pCount, err := c.getPendingPoolCount(spc) if err != nil { glog.Errorf("Unable to get pending pool count for spc %s:%s", spc.Name, err) return false } if pCount > 0 { return true } return false } // getPendingPoolCount gives the count of pool that needs to be provisioned for a given spc. func (c *Controller) getPendingPoolCount(spc *apis.StoragePoolClaim) (int, error) { var err error var pendingPoolCount int if isAutoProvisioning(spc) { pendingPoolCount, err = c.getAutoSpcPendingPoolCount(spc) } else { pendingPoolCount, err = c.getManualSpcPendingPoolCount(spc) } if err != nil { return 0, errors.Wrapf(err, "failed to get pending pool count for spc %s", spc.Name) } if isValidPendingPoolCount(pendingPoolCount) { return pendingPoolCount, nil } return 0, nil } // getAutoSpcPendingPoolCount get the pending pool count for auto provisioned spc. func (c *Controller) getAutoSpcPendingPoolCount(spc *apis.StoragePoolClaim) (int, error) { // Getting pending pool count in case of auto provisioned spc. err := validateAutoSpcMaxPool(spc) if err != nil { return 0, errors.Wrapf(err, "error in max pool value in spc %s", spc.Name) } currentPoolCount, err := c.getCurrentPoolCount(spc) if err != nil { return 0, err } maxPoolCount := *(spc.Spec.MaxPools) pendingPoolCount := maxPoolCount - currentPoolCount return pendingPoolCount, nil } // getManualSpcPendingPoolCount gets the pending pool count for manual provisioned spc. func (c *Controller) getManualSpcPendingPoolCount(spc *apis.StoragePoolClaim) (int, error) { usableNodeCount, err := c.getUsableNodeCount(spc) if err != nil { return 0, err } pendingPoolCount := len(usableNodeCount) return pendingPoolCount, nil } // getFreeDiskNodeMap forms a map that holds block device names which can be used to create a pool. func (c *Controller) getFreeDiskNodeMap() (map[string]string, error) { freeNodeDiskMap := make(map[string]string) //TODO: Update below snippet tomake use of builder and blockdevice kubeclient //package // Get all block device from kube-apiserver namespace := env.Get(env.OpenEBSNamespace) blockDeviceList, err := c.ndmclientset.OpenebsV1alpha1().BlockDevices(namespace).List(metav1.ListOptions{}) if err != nil { return nil, err } usedBlockDeviceMap, err := c.getUsedBlockDeviceMap() if err != nil { return nil, errors.Wrap(err, "unable to get the used block device map ") } for _, blockDevice := range blockDeviceList.Items { if usedBlockDeviceMap[blockDevice.Name] == 1 { continue } freeNodeDiskMap[blockDevice.Name] = blockDevice.Labels[string(apis.HostNameCPK)] } return freeNodeDiskMap, nil } // getUsableNodeCount forms a map that holds node which can be used to provision pool. func (c *Controller) getUsableNodeCount(spc *apis.StoragePoolClaim) (map[string]int, error) { nodeCountMap := make(map[string]int) freeNodeDiskMap, err := c.getFreeDiskNodeMap() if err != nil { return nil, err } for _, spcBlockDevice := range spc.Spec.BlockDevices.BlockDeviceList { if !(len(strings.TrimSpace(freeNodeDiskMap[spcBlockDevice])) == 0) { nodeCountMap[freeNodeDiskMap[spcBlockDevice]]++ } } return nodeCountMap, nil } // getUsedBlockDeviceMap form usedDisk map that will hold the list of all used // block device // TODO: Move to blockDevice package func (c *Controller) getUsedBlockDeviceMap() (map[string]int, error) { // Get the list of block devices that has been used already for pool provisioning cspList, err := c.clientset.OpenebsV1alpha1().CStorPools().List(metav1.ListOptions{}) if err != nil { return nil, errors.Wrapf(err, "unable to get the list of cstor pool") } // Form a map that will hold all the used block device usedBlockDeviceMap := make(map[string]int) for _, csp := range cspList.Items { for _, group := range csp.Spec.Group { for _, bd := range group.Item { usedBlockDeviceMap[bd.Name]++ } } } return usedBlockDeviceMap, nil } // isValidPendingPoolCount tells whether the pending pool count is valid or not. func isValidPendingPoolCount(pendingPoolCout int) bool { if pendingPoolCout < 0 { return false } return true } // isAutoProvisioning returns true the spc is auto provisioning type. func isAutoProvisioning(spc *apis.StoragePoolClaim) bool { return spc.Spec.BlockDevices.BlockDeviceList == nil } // isManualProvisioning returns true if the spc is auto provisioning type. func isManualProvisioning(spc *apis.StoragePoolClaim) bool { return spc.Spec.BlockDevices.BlockDeviceList != nil }
1
16,830
could not import k8s.io/kubernetes/pkg/util/slice (invalid package name: "") (from `typecheck`)
openebs-maya
go
@@ -29,4 +29,4 @@ describeComponent( expect(component._state).to.equal('inDOM'); }); } -); +);
1
/* jshint expr:true */ import {expect} from 'chai'; import { describeComponent, it } from 'ember-mocha'; describeComponent( 'gh-editor-save-button', 'Unit: Component: gh-editor-save-button', { unit: true, needs: [ 'component:gh-dropdown-button', 'component:gh-dropdown', 'component:gh-spin-button', 'service:dropdown' ] }, function () { it('renders', function () { // creates the component instance let component = this.subject(); expect(component._state).to.equal('preRender'); // renders the component on the page this.render(); expect(component._state).to.equal('inDOM'); }); } );
1
7,552
@disordinary would you be able to configure your editor to use the `.editorconfig` file so that we can avoid final newline changes?
TryGhost-Admin
js
@@ -80,12 +80,9 @@ module.exports = function(realmConstructor, context) { const { DefaultNetworkTransport } = require('realm-network-transport'); realmConstructor._networkTransport = new DefaultNetworkTransport(); - Object.defineProperty(realmConstructor.Object.prototype, "toJSON", { - value: function () { - const result = {} - Object.keys(this).forEach(p => result[p] = this[p]); - Object.keys(Object.getPrototypeOf(this)).forEach(p => result[p] = this[p]); - return result; + Object.defineProperty(realmConstructor.Collection.prototype, "toJSON", { + value: function (_, cache = new Map()) { + return this.map((item, index) => item.toJSON ? item.toJSON(index.toString(), cache) : item); }, writable: true,
1
//////////////////////////////////////////////////////////////////////////// // // Copyright 2016 Realm Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // //////////////////////////////////////////////////////////////////////////// 'use strict'; /* global navigator */ const URL = require('url-parse'); let getOwnPropertyDescriptors = Object.getOwnPropertyDescriptors || function(obj) { return Object.getOwnPropertyNames(obj).reduce(function (descriptors, name) { descriptors[name] = Object.getOwnPropertyDescriptor(obj, name); return descriptors; }, {}); }; function setConstructorOnPrototype(klass) { if (klass.prototype.constructor !== klass) { Object.defineProperty(klass.prototype, 'constructor', { value: klass, configurable: true, writable: true }); } } function waitForCompletion(session, fn, timeout, timeoutErrorMessage) { const waiter = new Promise((resolve, reject) => { fn.call(session, (error) => { if (error === undefined) { setTimeout(() => resolve(), 1); } else { setTimeout(() => reject(error), 1); } }); }); if (timeout === undefined) { return waiter; } return Promise.race([ waiter, new Promise((resolve, reject) => { setTimeout(() => { reject(timeoutErrorMessage); }, timeout); }) ]); } function openLocalRealm(realmConstructor, config) { let promise = Promise.resolve(new realmConstructor(config)); promise.progress = (callback) => { return promise; }; promise.cancel = () => { }; return promise; } module.exports = function(realmConstructor, context) { // Add the specified Array methods to the Collection prototype. Object.defineProperties(realmConstructor.Collection.prototype, require('./collection-methods')); setConstructorOnPrototype(realmConstructor.Collection); setConstructorOnPrototype(realmConstructor.List); setConstructorOnPrototype(realmConstructor.Results); setConstructorOnPrototype(realmConstructor.Object); realmConstructor._bson = require('bson'); realmConstructor._Decimal128 = realmConstructor._bson.Decimal128; realmConstructor._ObjectId = realmConstructor._bson.ObjectId; const { DefaultNetworkTransport } = require('realm-network-transport'); realmConstructor._networkTransport = new DefaultNetworkTransport(); Object.defineProperty(realmConstructor.Object.prototype, "toJSON", { value: function () { const result = {} Object.keys(this).forEach(p => result[p] = this[p]); Object.keys(Object.getPrototypeOf(this)).forEach(p => result[p] = this[p]); return result; }, writable: true, configurable: true, enumerable: false }); Object.defineProperty(realmConstructor.Object.prototype, "keys", { value: function () { return Object.keys(this).concat(Object.keys(Object.getPrototypeOf(this))); }, writable: true, configurable: true, enumerable: false }); Object.defineProperty(realmConstructor.Object.prototype, "entries", { value: function () { let result = {}; for (const key in this) { result[key] = this[key]; } return Object.entries(result); }, writable: true, configurable: true, enumerable: false }); //Add static methods to the Realm object Object.defineProperties(realmConstructor, getOwnPropertyDescriptors({ open(config) { // If no config is defined, we should just open the default realm if (config === undefined) { config = {}; } // For local Realms we open the Realm and return it in a resolved Promise. if (!("sync" in config)) { return openLocalRealm(realmConstructor, config); } // Determine if we are opening an existing Realm or not. let behavior = realmConstructor.exists(config) ? "existingRealmFileBehavior" : "newRealmFileBehavior"; // Define how the Realm file is opened let openLocalRealmImmediately = false; // Default is downloadBeforeOpen if (config.sync[behavior] !== undefined) { const type = config.sync[behavior].type; switch (type) { case 'downloadBeforeOpen': openLocalRealmImmediately = false; break; case 'openImmediately': openLocalRealmImmediately = true; break; default: throw Error(`Invalid type: '${type}'. Only 'downloadBeforeOpen' and 'openImmediately' is allowed.`); } } // If configured to do so, the synchronized Realm will be opened locally immediately. // If this is the first time the Realm is created, the schema will be created locally as well. if (openLocalRealmImmediately) { return openLocalRealm(realmConstructor, config); } // Otherwise attempt to synchronize the Realm state from the server before opening it. // First configure any timeOut and corresponding behavior. let openPromises = []; if (config.sync[behavior] !== undefined && config.sync[behavior].timeOut !== undefined) { let timeOut = config.sync[behavior].timeOut; if (typeof timeOut !== 'number') { throw new Error(`'timeOut' must be a number: '${timeOut}'`); } // Define the behavior in case of a timeout let throwOnTimeOut = true; // Default is to throw if (config.sync[behavior] !== undefined && config.sync[behavior].timeOutBehavior) { const timeOutBehavior = config.sync[behavior].timeOutBehavior; switch (timeOutBehavior) { case 'throwException': throwOnTimeOut = true; break; case 'openLocal': throwOnTimeOut = false; break; default: throw Error(`Invalid 'timeOutBehavior': '${timeOutBehavior}'. Only 'throwException' and 'openLocal' is allowed.`); } } openPromises.push(new Promise((resolve, reject) => { setTimeout(() => { if (asyncOpenTask) { asyncOpenTask.cancel(); asyncOpenTask = null; } if (throwOnTimeOut) { reject(new Error(`${config.sync.url} could not be downloaded in the allocated time: ${timeOut} ms.`)); } else { return resolve(openLocalRealm(realmConstructor, config)); } }, timeOut); })); } // Configure promise responsible for downloading the Realm from the server let asyncOpenTask; let cancelled = false; openPromises.push(new Promise((resolve, reject) => { asyncOpenTask = realmConstructor._asyncOpen(config, (realm, error) => { setTimeout(() => { asyncOpenTask = null; // The user may have cancelled the open between when // the download completed and when we managed to // actually invoke this, so recheck here. if (cancelled) { return; } if (error) { reject(error); } else { resolve(realm); } }, 0); }); })); // Return wrapped promises, allowing the users to control them. let openPromise = Promise.race(openPromises); openPromise.cancel = () => { if (asyncOpenTask) { asyncOpenTask.cancel(); cancelled = true; } }; openPromise.progress = (callback) => { if (asyncOpenTask) { asyncOpenTask.addDownloadNotification(callback); } return openPromise; }; return openPromise; }, createTemplateObject(objectSchema) { let obj = {}; for (let key in objectSchema.properties) { let type; if (typeof objectSchema.properties[key] === 'string' || objectSchema.properties[key] instanceof String) { // Simple declaration of the type type = objectSchema.properties[key]; } else { // Advanced property setup const property = objectSchema.properties[key]; // if optional is set, it wil take precedence over any `?` set on the type parameter if (property.optional === true) { continue; } // If a default value is explicitly set, always set the property if (property.default !== undefined) { obj[key] = property.default; continue; } type = property.type; } // Set the default value for all required primitive types. // Lists are always treated as empty if not specified and references to objects are always optional switch (type) { case 'bool': obj[key] = false; break; case 'int': obj[key] = 0; break; case 'float': obj[key] = 0.0; break; case 'double': obj[key] = 0.0; break; case 'string': obj[key] = ""; break; case 'data': obj[key] = new ArrayBuffer(0); break; case 'date': obj[key] = new Date(0); break; } } return obj; }, _expandEmbeddedObjectSchemas(schemas) { // we only work on arrays and let object store's schema parser produce the error messages if (!(schemas instanceof Array)) { return schemas; } let newSchema = []; schemas.forEach(schema => { // a schema must be an object and have 'name' and 'properties' // we let object store's schema parser produce the error messages if (!(schema instanceof Object) || !schema.hasOwnProperty('name') || !schema.hasOwnProperty('properties')) { newSchema.push(schema); return; } // is the schema defined as a constructor? if (schema instanceof Function) { schema = schema.schema; } let os = {}; os.name = schema.name; if (schema.primaryKey) { os.primaryKey = schema.primaryKey; } if (schema.embedded) { os.embedded = true; } else { os.embedded = false; } if ((schema.properties instanceof Array)) { newSchema.push(schema); } else { os.properties = {}; for (let key in schema.properties) { let prop = schema.properties[key]; if (prop instanceof Object && prop.hasOwnProperty('name') && prop.hasOwnProperty('properties')) { let embeddedSchema = {}; embeddedSchema.name = prop.name; embeddedSchema.embedded = true; embeddedSchema.properties = prop.properties; if (prop.hasOwnProperty('type') && prop.type === 'list') { os.properties[key] = { type: 'list', objectType: prop.name }; } else { os.properties[key] = { type: prop.name }; } newSchema.push(embeddedSchema); } else { os.properties[key] = schema.properties[key]; } } newSchema.push(os); } }); return newSchema; } })); // Add static properties to Realm Object const updateModeType = { All: 'all', Modified: 'modified', Never: 'never', }; if (!realmConstructor.UpdateMode) { Object.defineProperty(realmConstructor, 'UpdateMode', { value: updateModeType, configurable: false, }); } // Add sync methods if (realmConstructor.Sync) { let appMethods = require("./app"); Object.defineProperties(realmConstructor.App, getOwnPropertyDescriptors(appMethods.static)); Object.defineProperties(realmConstructor.App.prototype, getOwnPropertyDescriptors(appMethods.instance)); let userMethods = require("./user"); Object.defineProperties(realmConstructor.User, getOwnPropertyDescriptors(userMethods.static)); Object.defineProperties(realmConstructor.User.prototype, getOwnPropertyDescriptors(userMethods.instance)); let credentialMethods = require("./credentials"); Object.defineProperties(realmConstructor.Credentials, getOwnPropertyDescriptors(credentialMethods.static)) let emailPasswordAuthMethods = require("./email-password-auth-methods"); Object.defineProperties(realmConstructor.Auth.EmailPasswordAuth.prototype, getOwnPropertyDescriptors(emailPasswordAuthMethods.instance)); let apiKeyAuthMethods = require("./api-key-auth-methods"); Object.defineProperties(realmConstructor.Auth.ApiKeyAuth.prototype, getOwnPropertyDescriptors(apiKeyAuthMethods.instance)); realmConstructor.Sync.AuthError = require("./errors").AuthError; if (realmConstructor.Sync.removeAllListeners) { process.on("exit", realmConstructor.Sync.removeAllListeners); process.on("SIGINT", function () { realmConstructor.Sync.removeAllListeners(); process.exit(2); }); process.on("uncaughtException", function(e) { realmConstructor.Sync.removeAllListeners(); /* eslint-disable no-console */ console.log(e.stack); process.exit(99); }); } setConstructorOnPrototype(realmConstructor.User); setConstructorOnPrototype(realmConstructor.Sync.Session); setConstructorOnPrototype(realmConstructor.App); setConstructorOnPrototype(realmConstructor.Credentials); realmConstructor.Sync.openLocalRealmBehavior = { type: 'openImmediately' }; realmConstructor.Sync.downloadBeforeOpenBehavior = { type: 'downloadBeforeOpen', timeOut: 30 * 1000, timeOutBehavior: 'throwException' }; realmConstructor.Sync.Session.prototype.uploadAllLocalChanges = function(timeout) { return waitForCompletion(this, this._waitForUploadCompletion, timeout, `Uploading changes did not complete in ${timeout} ms.`); }; realmConstructor.Sync.Session.prototype.downloadAllServerChanges = function(timeout) { return waitForCompletion(this, this._waitForDownloadCompletion, timeout, `Downloading changes did not complete in ${timeout} ms.`); }; realmConstructor.Sync.ConnectionState = { Disconnected: "disconnected", Connecting: "connecting", Connected: "connected", }; realmConstructor.Sync.ClientResyncMode = { Discard: 'discard', Manual: 'manual', Recover: 'recover' }; Object.defineProperties(realmConstructor, getOwnPropertyDescriptors({ // Creates the user agent description for the JS binding itself. Users must specify the application // user agent using Realm.Sync.setUserAgent(...) _createUserAgentDescription() { // Detect if in ReactNative (running on a phone) or in a Node.js environment // Credit: https://stackoverflow.com/questions/39468022/how-do-i-know-if-my-code-is-running-as-react-native try { var userAgent = "RealmJS/"; userAgent = userAgent + require('../package.json').version + " (" + context + ", "; if (typeof navigator !== 'undefined' && navigator.product === 'ReactNative') { // Running on ReactNative const Platform = require('react-native').Platform; userAgent += Platform.OS + ", v" + Platform.Version; } else { // Running on a normal machine userAgent += process.version; } return userAgent += ")"; } catch (e) { return "RealmJS/Unknown" } }, })); } // TODO: Remove this now useless object. var types = Object.freeze({ 'BOOL': 'bool', 'INT': 'int', 'FLOAT': 'float', 'DOUBLE': 'double', 'STRING': 'string', 'DATE': 'date', 'DATA': 'data', 'OBJECT': 'object', 'LIST': 'list', }); Object.defineProperty(realmConstructor, 'Types', { get: function() { if (typeof console != 'undefined') { /* global console */ /* eslint-disable no-console */ var stack = new Error().stack.split("\n").slice(2).join("\n"); var msg = '`Realm.Types` is deprecated! Please specify the type name as lowercase string instead!\n'+stack; if (console.warn != undefined) { console.warn(msg); } else { console.log(msg); } /* eslint-enable no-console */ } return types; }, configurable: true }); }
1
18,955
I really think we should not use this style where unused arguments are named with underscore. While this eases the person writing the function it does it at an expense of readability and debugging. So next person who needs to actually use this argument will need to check the spec and rename this to `obj` and then use it or if he is debugging it he will is not able to watch for a meaningful variable name and will need to inspect `_` which is weird and uncomfortable. We have not used this style anywhere in the code while I agree its very tempting to use it :)
realm-realm-js
js
@@ -575,7 +575,7 @@ Lng32 main(Lng32 argc, char *argv[]) // setup log4cxx QRLogger::initLog4cxx(QRLogger::QRL_LOB); // initialize lob globals - lobGlobals = new ExLobGlobals(); + lobGlobals = new ExLobGlobals(NULL); if (lobGlobals == NULL) return -1;
1
/********************************************************************** // @@@ START COPYRIGHT @@@ // // Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. // // @@@ END COPYRIGHT @@@ **********************************************************************/ /* -*-C++-*- ***************************************************************************** * * File: ExpLOBprocess.cpp * Description: class to store and retrieve LOB info from mxlobsrvr process. * * * Created: 10/29/2012 * Language: C++ * * * * ***************************************************************************** */ /*** Note *** This file is currently compiled and creates the mxlobsrvr executable. But the functions in this file are not active or used at this point. Code maybe added in the near future to offload any tasks like garbage collection, to this process .Hence we are retainign this file as part of the mxlobsrvr infrastructure .If any functionas are added and need to be executed in the mxlobsrvr process, the sqstart/sqstop need to modified to call lobstop and lostart**/ /****************************************************************************/ #include <stdio.h> #include <unistd.h> #include <stdlib.h> #include <malloc.h> #include <string> #include <errno.h> #include <sys/file.h> #include <iostream> #include <errno.h> #include <fcntl.h> // for nonblocking #include <netdb.h> #include <pthread.h> #include <signal.h> #include <stdarg.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <unistd.h> #include <zlib.h> // ZLIB compression library #include <netinet/in.h> #include <netinet/tcp.h> #include <arpa/inet.h> #include <sys/epoll.h> #include <sys/socket.h> // basic socket definitions #include <sys/stat.h> #include <sys/time.h> #include <sys/types.h> // basic system data types #include <sys/uio.h> #include <sys/wait.h> #include <guardian/kphandlz.h> #include <seabed/ms.h> #include <seabed/fs.h> #include <seabed/pctl.h> #include <seabed/pevents.h> #include <seabed/fserr.h> #include "ComRtUtils.h" //#include "ExeReplInterface.h" #include "Globals.h" #include "NAExit.h" #include "ex_ex.h" // ex_assert #include "SCMVersHelp.h" #define SQ_USE_LOB_PROCESS 1 #include "ExpLOBaccess.h" #include "QRLogger.h" #include "ExpLOBexternal.h" extern int ms_transid_reg(MS_Mon_Transid_Type, MS_Mon_Transseq_Type); extern void ms_transid_clear(MS_Mon_Transid_Type, MS_Mon_Transseq_Type); extern "C" short GETTRANSID(short *transid); extern "C" short JOINTRANSACTION(Int64 transid); extern "C" short SUSPENDTRANSACTION(short *transid); #define TRANSID_IS_VALID(idin) (idin.id[0] != 0) using namespace std; // Session State values enum { IDLE_STATE = 0, WAITING = 1, DATA_PENDING = 2, WRITE_PENDING = 3, SEND_PENDING = 4, SENDING_DATA = 5, RQST_PENDING = 6, REPLY_PENDING = 7, WAITING_REPLY = 8, END_SESSION = 9, COMM_RESET = 10, READ_POSTED = 11, // TCP/IP State Machine states LISTENER_INIT_STATE = 20, LISTENER_SOCKOPT_STATE = 21, LISTENER_ACCEPT_STATE = 22, LISTENER_SHUTDOWN_STATE = 23, LISTENER_CLOSE_STATE = 24, SESSION_INIT_STATE = 30, SESSION_CONNECT_STATE = 31, SESSION_CONNECT_CHECK_STATE = 32, SESSION_SOCKOPT1_STATE = 33, SESSION_SOCKOPT2_STATE = 34, SESSION_RECV_STATE = 35, SESSION_SEND_STATE = 36, SESSION_SHUTDOWN_STATE = 37, SESSION_CLOSE_STATE = 38 }; // AWAITIO classes enum {Class_0 = 0, Class_1 = 1, Class_2 = 2, Class_3 = 3, Class_4 = 4, Class_5 = 5, Class_6 = 6, Class_7 = 7}; void process_msg(BMS_SRE *sre) { // do work here return; } class rcv_struct { public: union { unsigned char *databuf; Lng32 *datalen; }; Lng32 buflen; short file; short state; }; #pragma fieldalign platform awaitio_tag_struct class awaitio_tag_struct { public: union { SB_Tag_Type Tag; struct { ULng32 Class:4; // I/O class ULng32 State:12; // State of this I/O operation. ULng32 Index:16; // Index into control block table. }; }; }; class awaitio_struct { public: Lng32 Iocount; short Error; short File; }; enum {IDLE_TIMEOUT = 30000, // 5 minute wait before stopping cli process. MAX_RETRIES = 3, // Maximum number of retries before waiting. NUM_OPENS = 64, // Initial number of entries in the OCB table. RECV_BUFSIZE = 56 * 1024, // Size of $RECEIVE I/O buffer. RETRY_TIMEOUT = 3000, // 30 second wait before retrying. SOCK_BUFSIZE = 56 * 1024, // Size of socket I/O buffers. STARTUP_TIMEOUT = 500}; // CLI Invokers will die if an open message is not // received before a 5 sec startup timer expires. rcv_struct rcv; // Mutexes and thread variables pthread_mutex_t cnfg_mutex; pthread_mutex_t g_mutex; pthread_attr_t thr_attr; Lng32 total_dynamic_memory; CliGlobals *cliGlobals = NULL; char *myProgramName = NULL; xzsys_ddl_smsg_def *sysmsg = NULL; FS_Receiveinfo_Type rcvinfo; ExLobGlobals *lobGlobals = NULL; //***************************************************************************** //***************************************************************************** static void sigterm_handler(Lng32 signo) { printf("sigterm received\n"); } void LOB_process_stop(short flag) { if (rcv.state == READ_POSTED) BCANCEL(rcv.file); else NAExit(flag); } // BDR_process_stop //***************************************************************************** static void *Calloc(size_t memsize) { void *ptr; Lng32 retval; retval = pthread_mutex_lock(&g_mutex); ex_assert(retval == 0, "Calloc 1"); ptr = calloc(1, memsize); ex_assert(ptr != NULL, "Calloc 2"); total_dynamic_memory += memsize; retval = pthread_mutex_unlock(&g_mutex); ex_assert(retval == 0, "Calloc 3"); return(ptr); } // Calloc void post_receive_read(void) { _bcc_status cc_status; awaitio_tag_struct tag; if (rcv.state == END_SESSION) LOB_process_stop(0); if (rcv.databuf == NULL) { rcv.buflen = RECV_BUFSIZE + 2*sizeof(int); rcv.databuf = (unsigned char *)Calloc(rcv.buflen); sysmsg = (xzsys_ddl_smsg_def *)&rcv.datalen[0]; } tag.Tag = 0; tag.Class = Class_7; cc_status = BREADUPDATEX(rcv.file, (char *)&rcv.datalen[0], RECV_BUFSIZE, NULL, tag.Tag); ex_assert(_status_eq(cc_status), "post_receive_read 1"); rcv.state = READ_POSTED; return; } // post_receive_read short process_open(void) { Lng32 retval; short retvals = XZFIL_ERR_OK; return retvals; } // process_open //***************************************************************************** //***************************************************************************** short process_close(void) { Lng32 retval; short retvals = XZFIL_ERR_OK; return retvals; } // process_close void process_mon_msg(MS_Mon_Msg *msg) { printf("server received monitor msg, type=%d\n", msg->type); switch (msg->type) { case MS_MsgType_Change: printf(" type=%d, group=%s, key=%s, value=%s\n", msg->u.change.type, msg->u.change.group, msg->u.change.key, msg->u.change.value); break; case MS_MsgType_Close: printf(" nid=%d, pid=%d, process=%s, aborted=%d\n", msg->u.close.nid, msg->u.close.pid, msg->u.close.process_name, msg->u.close.aborted); break; case MS_MsgType_Event: break; case MS_MsgType_NodeDown: printf(" nid=%d, node=%s\n", msg->u.down.nid, msg->u.down.node_name); break; case MS_MsgType_NodeUp: printf(" nid=%d, node=%s\n", msg->u.up.nid, msg->u.up.node_name); break; case MS_MsgType_Open: printf(" nid=%d, pid=%d, process=%s, death=%d\n", msg->u.open.nid, msg->u.open.pid, msg->u.open.process_name, msg->u.open.death_notification); break; case MS_MsgType_ProcessCreated: printf(" nid=%d, pid=%d, tag=0x%llx, process=%s, ferr=%d\n", msg->u.process_created.nid, msg->u.process_created.pid, msg->u.process_created.tag, msg->u.process_created.process_name, msg->u.process_created.ferr); break; case MS_MsgType_ProcessDeath: printf(" nid=%d, pid=%d, aborted=%d, process=%s\n", msg->u.death.nid, msg->u.death.pid, msg->u.death.aborted, msg->u.death.process_name); break; case MS_MsgType_Service: break; case MS_MsgType_Shutdown: printf(" nid=%d, pid=%d, level=%d\n", msg->u.shutdown.nid, msg->u.shutdown.pid, msg->u.shutdown.level); break; case MS_MsgType_TmSyncAbort: case MS_MsgType_TmSyncCommit: break; case MS_MsgType_UnsolicitedMessage: break; default: break; } } Ex_Lob_Error ExLob::getDesc(ExLobRequest *request) { Ex_Lob_Error err; Lng32 clierr; Int64 dummyParam; Lng32 handleOutLen = 0; Lng32 blackBoxLen = 0; Int64 offset; Int64 size; clierr = SQL_EXEC_LOBcliInterface(request->getHandleIn(), request->getHandleInLen(), request->getBlackBox(), &blackBoxLen, request->getHandleOut(), &handleOutLen, LOB_CLI_SELECT_UNIQUE, LOB_CLI_ExecImmed, &offset, &size, &dummyParam, &dummyParam, 0, request->getTransId(), FALSE); request->setHandleOutLen(handleOutLen); request->setBlackBoxLen(blackBoxLen); request->setCliError(clierr); return LOB_OPER_OK; } void processRequest(ExLobRequest *request) { Ex_Lob_Error err = LOB_OPER_OK; Int64 descNum; Int64 dataOffset; Int64 operLen; ExLobDescHeader descHeader; ExLob *lobPtr; ExLobDesc desc; ExLobDesc *descPtr; Lng32 clierr; Lng32 handleOutLen; Int64 size; err = lobGlobals->getLobPtr(request->getDescFileName(), lobPtr); if (err != LOB_OPER_OK) { request->setError(LOB_INIT_ERROR); return ; } if (!lobGlobals->isCliInitialized()) { Lng32 clierr = SQL_EXEC_LOBcliInterface(0, 0, 0, 0, 0, 0, LOB_CLI_INIT, LOB_CLI_ExecImmed, 0, 0, 0, 0, 0, 0,FALSE); if (clierr < 0) { request->setError(LOB_INIT_ERROR); return ; } lobGlobals->setCliInitialized(); } switch(request->getType()) { case Lob_Req_Get_Desc: err = lobPtr->getDesc(request); break; default: err = LOB_REQUEST_UNDEFINED_ERROR; printf("bad request = %d\n", request->getType()); break; }; request->setError(err); return; } void receive_message(ExLobRequest *request) { Int64 transId; int err; int cliRC = GETTRANSID((short *)&transId); printf("transid before setting = %ld\n", transId); if (TRANSID_IS_VALID(request->getTransIdBig())) { err = ms_transid_reg(request->getTransIdBig(), request->getTransStartId()); printf("transid reg err = %d\n", err); } else if (request->getTransId()) { err = JOINTRANSACTION(request->getTransId()); printf("join txn err = %d\n", err); } cliRC = GETTRANSID((short *)&transId); printf("transid after setting = %ld\n", transId); processRequest(request); if (TRANSID_IS_VALID(request->getTransIdBig())) { ms_transid_clear(request->getTransIdBig(), request->getTransStartId()); } else if (request->getTransId()) { transId = request->getTransId(); SUSPENDTRANSACTION((short*)&transId); } return; } // receive_message Ex_Lob_Error ExLobGlobals::initialize() { lobMap_ = (lobMap_t *) new lobMap_t; if (lobMap_ == NULL) return LOB_INIT_ERROR; return LOB_OPER_OK; } Ex_Lob_Error ExLobGlobals::getLobPtr(char *lobName, ExLob *& lobPtr) { Ex_Lob_Error err; lobMap_t *lobMap = NULL; lobMap_it it; lobMap = lobGlobals->getLobMap(); it = lobMap->find(string(lobName)); if (it == lobMap->end()) { lobPtr = new (lobGlobals->getHeap())ExLob(lobGlobals->getHeap()); if (lobPtr == NULL) return LOB_ALLOC_ERROR; lobMap->insert(pair<string, ExLob*>(string(lobName), lobPtr)); } else { lobPtr = it->second; } return LOB_OPER_OK; } Ex_Lob_Error ExLobGlobals::delLobPtr(char *lobName) { Ex_Lob_Error err; lobMap_t *lobMap = NULL; lobMap_it it; lobMap = lobGlobals->getLobMap(); it = lobMap->find(string(lobName)); if (it != lobMap->end()) { ExLob *lobPtr = it->second; delete lobPtr; lobMap->erase(it); } return LOB_OPER_OK; } Lng32 main(Lng32 argc, char *argv[]) { Lng32 lv_event_len; Lng32 lv_error; Lng32 lv_ret; BMS_SRE lv_sre; Lng32 retval; _bcc_status cc_status; awaitio_tag_struct awaitTag; awaitio_struct awaitIo; // Register sigterm_handler as our signal handler for SIGTERM. if (signal(SIGTERM, sigterm_handler) == SIG_ERR) { cout << "*** Cannot handle SIGTERM ***" << endl; exit(1); } retval = pthread_mutex_init(&cnfg_mutex, NULL); ex_assert(retval == 0, "main 1"); // seaquest related stuff retval = msg_init_attach(&argc, &argv, true, (char *)""); if (retval) printf("msg_init_attach returned: %d\n", retval); // sq_fs_dllmain(); msg_mon_process_startup(true); msg_mon_enable_mon_messages(1); retval = atexit((void(*)(void))msg_mon_process_shutdown); if (retval != 0) { cout << "*** atexit failed with error " << retval << " ***" << endl; exit(1); } // setup log4cxx QRLogger::initLog4cxx(QRLogger::QRL_LOB); // initialize lob globals lobGlobals = new ExLobGlobals(); if (lobGlobals == NULL) return -1; retval = lobGlobals->initialize(); if (retval != LOB_OPER_OK) return -1; /* Lng32 clierr = SQL_EXEC_LOBcliInterface(0, 0, 0, 0, 0, 0, LOB_CLI_INIT, LOB_CLI_ExecImmed, 0, 0, 0, 0, 0, 0); if (clierr < 0) return -1; */ bool done = false; BMS_SRE sre; Lng32 len; char recv_buffer[BUFSIZ]; Lng32 err; printf("lob process initialialized. Ready for requests\n"); while(!done) { do { XWAIT(LREQ, -1); err = BMSG_LISTEN_((short *) &sre, 0, 0); } while (err == BSRETYPE_NOWORK); err = BMSG_READDATA_(sre.sre_msgId, recv_buffer, BUFSIZ); if ((sre.sre_flags & XSRE_MON)) { MS_Mon_Msg *msg = (MS_Mon_Msg *)recv_buffer; process_mon_msg(msg); if (msg->type == MS_MsgType_Shutdown) done = true; len = 0; } else { receive_message((ExLobRequest *)recv_buffer); len = sizeof(ExLobRequest); } BMSG_REPLY_(sre.sre_msgId, NULL, 0, recv_buffer, len, 0, NULL); } msg_mon_process_shutdown(); return 0; }
1
19,630
Thanks for avoiding the default value for the param. But, I think sending heap as NULL might make this program to fail. Have you tested this program? If you have tested this program already, then this PR is ready to be merged.
apache-trafodion
cpp
@@ -0,0 +1,18 @@ +// Licensed to the .NET Foundation under one or more agreements. +// The .NET Foundation licenses this file to you under the MS-PL license. +// See the LICENSE file in the project root for more information. + +using MvvmCross.Presenters.Attributes; +using Windows.UI.Xaml.Controls; + +namespace MvvmCross.Platforms.Uap.Presenters.Attributes +{ + public sealed class MvxModalViewPresentationAttribute : MvxBasePresentationAttribute + { + public MvxModalViewPresentationAttribute() + { + } + + public ContentDialogPlacement Placement { get; set; } + } +}
1
1
14,600
Don't make it sealed
MvvmCross-MvvmCross
.cs
@@ -278,6 +278,8 @@ func (a *Account) shallowCopy() *Account { } // JetStream na.jsLimits = a.jsLimits + // Server config account limits. + na.limits = a.limits return na }
1
// Copyright 2018-2021 The NATS Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package server import ( "bytes" "encoding/hex" "errors" "fmt" "hash/maphash" "io/ioutil" "math" "math/rand" "net/http" "net/textproto" "reflect" "sort" "strconv" "strings" "sync" "time" "github.com/nats-io/jwt/v2" "github.com/nats-io/nkeys" "github.com/nats-io/nuid" ) // For backwards compatibility with NATS < 2.0, users who are not explicitly defined into an // account will be grouped in the default global account. const globalAccountName = DEFAULT_GLOBAL_ACCOUNT const defaultMaxSubLimitReportThreshold = int64(2 * time.Second) var maxSubLimitReportThreshold = defaultMaxSubLimitReportThreshold // Account are subject namespace definitions. By default no messages are shared between accounts. // You can share via Exports and Imports of Streams and Services. type Account struct { gwReplyMapping Name string Nkey string Issuer string claimJWT string updated time.Time mu sync.RWMutex sqmu sync.Mutex sl *Sublist ic *client isid uint64 etmr *time.Timer ctmr *time.Timer strack map[string]sconns nrclients int32 sysclients int32 nleafs int32 nrleafs int32 clients map[*client]struct{} rm map[string]int32 lqws map[string]int32 usersRevoked map[string]int64 actsRevoked map[string]int64 mappings []*mapping lleafs []*client imports importMap exports exportMap js *jsAccount jsLimits *JetStreamAccountLimits limits expired bool incomplete bool signingKeys map[string]jwt.Scope srv *Server // server this account is registered with (possibly nil) lds string // loop detection subject for leaf nodes siReply []byte // service reply prefix, will form wildcard subscription. prand *rand.Rand eventIds *nuid.NUID eventIdsMu sync.Mutex defaultPerms *Permissions tags jwt.TagList nameTag string lastLimErr int64 } // Account based limits. type limits struct { mpay int32 msubs int32 mconns int32 mleafs int32 } // Used to track remote clients and leafnodes per remote server. type sconns struct { conns int32 leafs int32 } // Import stream mapping struct type streamImport struct { acc *Account from string to string tr *transform rtr *transform claim *jwt.Import usePub bool invalid bool } const ClientInfoHdr = "Nats-Request-Info" // Import service mapping struct type serviceImport struct { acc *Account claim *jwt.Import se *serviceExport sid []byte from string to string tr *transform ts int64 rt ServiceRespType latency *serviceLatency m1 *ServiceLatency rc *client usePub bool response bool invalid bool share bool tracking bool didDeliver bool trackingHdr http.Header // header from request } // This is used to record when we create a mapping for implicit service // imports. We use this to clean up entries that are not singletons when // we detect that interest is no longer present. The key to the map will // be the actual interest. We record the mapped subject and the account. type serviceRespEntry struct { acc *Account msub string } // ServiceRespType represents the types of service request response types. type ServiceRespType uint8 // Service response types. Defaults to a singleton. const ( Singleton ServiceRespType = iota Streamed Chunked ) // String helper. func (rt ServiceRespType) String() string { switch rt { case Singleton: return "Singleton" case Streamed: return "Streamed" case Chunked: return "Chunked" } return "Unknown ServiceResType" } // exportAuth holds configured approvals or boolean indicating an // auth token is required for import. type exportAuth struct { tokenReq bool accountPos uint approved map[string]*Account } // streamExport type streamExport struct { exportAuth } // serviceExport holds additional information for exported services. type serviceExport struct { exportAuth acc *Account respType ServiceRespType latency *serviceLatency rtmr *time.Timer respThresh time.Duration } // Used to track service latency. type serviceLatency struct { sampling int8 // percentage from 1-100 or 0 to indicate triggered by header subject string } // exportMap tracks the exported streams and services. type exportMap struct { streams map[string]*streamExport services map[string]*serviceExport responses map[string]*serviceImport } // importMap tracks the imported streams and services. // For services we will also track the response mappings as well. type importMap struct { streams []*streamImport services map[string]*serviceImport rrMap map[string][]*serviceRespEntry } // NewAccount creates a new unlimited account with the given name. func NewAccount(name string) *Account { a := &Account{ Name: name, limits: limits{-1, -1, -1, -1}, eventIds: nuid.New(), } return a } func (a *Account) String() string { return a.Name } // Used to create shallow copies of accounts for transfer // from opts to real accounts in server struct. func (a *Account) shallowCopy() *Account { na := NewAccount(a.Name) na.Nkey = a.Nkey na.Issuer = a.Issuer if a.imports.streams != nil { na.imports.streams = make([]*streamImport, 0, len(a.imports.streams)) for _, v := range a.imports.streams { si := *v na.imports.streams = append(na.imports.streams, &si) } } if a.imports.services != nil { na.imports.services = make(map[string]*serviceImport) for k, v := range a.imports.services { si := *v na.imports.services[k] = &si } } if a.exports.streams != nil { na.exports.streams = make(map[string]*streamExport) for k, v := range a.exports.streams { if v != nil { se := *v na.exports.streams[k] = &se } else { na.exports.streams[k] = nil } } } if a.exports.services != nil { na.exports.services = make(map[string]*serviceExport) for k, v := range a.exports.services { if v != nil { se := *v na.exports.services[k] = &se } else { na.exports.services[k] = nil } } } // JetStream na.jsLimits = a.jsLimits return na } // nextEventID uses its own lock for better concurrency. func (a *Account) nextEventID() string { a.eventIdsMu.Lock() id := a.eventIds.Next() a.eventIdsMu.Unlock() return id } // Called to track a remote server and connections and leafnodes it // has for this account. func (a *Account) updateRemoteServer(m *AccountNumConns) []*client { a.mu.Lock() if a.strack == nil { a.strack = make(map[string]sconns) } // This does not depend on receiving all updates since each one is idempotent. // FIXME(dlc) - We should cleanup when these both go to zero. prev := a.strack[m.Server.ID] a.strack[m.Server.ID] = sconns{conns: int32(m.Conns), leafs: int32(m.LeafNodes)} a.nrclients += int32(m.Conns) - prev.conns a.nrleafs += int32(m.LeafNodes) - prev.leafs mtce := a.mconns != jwt.NoLimit && (len(a.clients)-int(a.sysclients)+int(a.nrclients) > int(a.mconns)) // If we are over here some have snuck in and we need to rebalance. // All others will probably be doing the same thing but better to be // conservative and bit harsh here. Clients will reconnect if we over compensate. var clients []*client if mtce { clients = make([]*client, 0, len(a.clients)) for c := range a.clients { clients = append(clients, c) } sort.Slice(clients, func(i, j int) bool { return clients[i].start.After(clients[j].start) }) over := (len(a.clients) - int(a.sysclients) + int(a.nrclients)) - int(a.mconns) if over < len(clients) { clients = clients[:over] } } // Now check leafnodes. mtlce := a.mleafs != jwt.NoLimit && (a.nleafs+a.nrleafs > a.mleafs) if mtlce { // Take ones from the end. leafs := a.lleafs over := int(a.nleafs + a.nrleafs - a.mleafs) if over < len(leafs) { leafs = leafs[len(leafs)-over:] } clients = append(clients, leafs...) } a.mu.Unlock() // If we have exceeded our max clients this will be populated. return clients } // Removes tracking for a remote server that has shutdown. func (a *Account) removeRemoteServer(sid string) { a.mu.Lock() if a.strack != nil { prev := a.strack[sid] delete(a.strack, sid) a.nrclients -= prev.conns a.nrleafs -= prev.leafs } a.mu.Unlock() } // When querying for subject interest this is the number of // expected responses. We need to actually check that the entry // has active connections. func (a *Account) expectedRemoteResponses() (expected int32) { a.mu.RLock() for _, sc := range a.strack { if sc.conns > 0 || sc.leafs > 0 { expected++ } } a.mu.RUnlock() return } // Clears eventing and tracking for this account. func (a *Account) clearEventing() { a.mu.Lock() a.nrclients = 0 // Now clear state clearTimer(&a.etmr) clearTimer(&a.ctmr) a.clients = nil a.strack = nil a.mu.Unlock() } // GetName will return the accounts name. func (a *Account) GetName() string { if a == nil { return "n/a" } a.mu.RLock() name := a.Name a.mu.RUnlock() return name } // NumConnections returns active number of clients for this account for // all known servers. func (a *Account) NumConnections() int { a.mu.RLock() nc := len(a.clients) - int(a.sysclients) + int(a.nrclients) a.mu.RUnlock() return nc } // NumRemoteConnections returns the number of client or leaf connections that // are not on this server. func (a *Account) NumRemoteConnections() int { a.mu.RLock() nc := int(a.nrclients + a.nrleafs) a.mu.RUnlock() return nc } // NumLocalConnections returns active number of clients for this account // on this server. func (a *Account) NumLocalConnections() int { a.mu.RLock() nlc := a.numLocalConnections() a.mu.RUnlock() return nlc } // Do not account for the system accounts. func (a *Account) numLocalConnections() int { return len(a.clients) - int(a.sysclients) - int(a.nleafs) } // This is for extended local interest. // Lock should not be held. func (a *Account) numLocalAndLeafConnections() int { a.mu.RLock() nlc := len(a.clients) - int(a.sysclients) a.mu.RUnlock() return nlc } func (a *Account) numLocalLeafNodes() int { return int(a.nleafs) } // MaxTotalConnectionsReached returns if we have reached our limit for number of connections. func (a *Account) MaxTotalConnectionsReached() bool { var mtce bool a.mu.RLock() if a.mconns != jwt.NoLimit { mtce = len(a.clients)-int(a.sysclients)+int(a.nrclients) >= int(a.mconns) } a.mu.RUnlock() return mtce } // MaxActiveConnections return the set limit for the account system // wide for total number of active connections. func (a *Account) MaxActiveConnections() int { a.mu.RLock() mconns := int(a.mconns) a.mu.RUnlock() return mconns } // MaxTotalLeafNodesReached returns if we have reached our limit for number of leafnodes. func (a *Account) MaxTotalLeafNodesReached() bool { a.mu.RLock() mtc := a.maxTotalLeafNodesReached() a.mu.RUnlock() return mtc } func (a *Account) maxTotalLeafNodesReached() bool { if a.mleafs != jwt.NoLimit { return a.nleafs+a.nrleafs >= a.mleafs } return false } // NumLeafNodes returns the active number of local and remote // leaf node connections. func (a *Account) NumLeafNodes() int { a.mu.RLock() nln := int(a.nleafs + a.nrleafs) a.mu.RUnlock() return nln } // NumRemoteLeafNodes returns the active number of remote // leaf node connections. func (a *Account) NumRemoteLeafNodes() int { a.mu.RLock() nrn := int(a.nrleafs) a.mu.RUnlock() return nrn } // MaxActiveLeafNodes return the set limit for the account system // wide for total number of leavenode connections. // NOTE: these are tracked separately. func (a *Account) MaxActiveLeafNodes() int { a.mu.RLock() mleafs := int(a.mleafs) a.mu.RUnlock() return mleafs } // RoutedSubs returns how many subjects we would send across a route when first // connected or expressing interest. Local client subs. func (a *Account) RoutedSubs() int { a.mu.RLock() defer a.mu.RUnlock() return len(a.rm) } // TotalSubs returns total number of Subscriptions for this account. func (a *Account) TotalSubs() int { a.mu.RLock() defer a.mu.RUnlock() return int(a.sl.Count()) } func (a *Account) shouldLogMaxSubErr() bool { if a == nil { return true } a.mu.RLock() last := a.lastLimErr a.mu.RUnlock() if now := time.Now().UnixNano(); now-last >= maxSubLimitReportThreshold { a.mu.Lock() a.lastLimErr = now a.mu.Unlock() return true } return false } // MapDest is for mapping published subjects for clients. type MapDest struct { Subject string `json:"subject"` Weight uint8 `json:"weight"` Cluster string `json:"cluster,omitempty"` } func NewMapDest(subject string, weight uint8) *MapDest { return &MapDest{subject, weight, _EMPTY_} } // destination is for internal representation for a weighted mapped destination. type destination struct { tr *transform weight uint8 } // mapping is an internal entry for mapping subjects. type mapping struct { src string wc bool dests []*destination cdests map[string][]*destination } // AddMapping adds in a simple route mapping from src subject to dest subject // for inbound client messages. func (a *Account) AddMapping(src, dest string) error { return a.AddWeightedMappings(src, NewMapDest(dest, 100)) } // AddWeightedMapping will add in a weighted mappings for the destinations. // TODO(dlc) - Allow cluster filtering func (a *Account) AddWeightedMappings(src string, dests ...*MapDest) error { a.mu.Lock() defer a.mu.Unlock() // We use this for selecting between multiple weighted destinations. if a.prand == nil { a.prand = rand.New(rand.NewSource(time.Now().UnixNano())) } if !IsValidSubject(src) { return ErrBadSubject } m := &mapping{src: src, wc: subjectHasWildcard(src), dests: make([]*destination, 0, len(dests)+1)} seen := make(map[string]struct{}) var tw uint8 for _, d := range dests { if _, ok := seen[d.Subject]; ok { return fmt.Errorf("duplicate entry for %q", d.Subject) } seen[d.Subject] = struct{}{} if d.Weight > 100 { return fmt.Errorf("individual weights need to be <= 100") } tw += d.Weight if tw > 100 { return fmt.Errorf("total weight needs to be <= 100") } if !IsValidSubject(d.Subject) { return ErrBadSubject } tr, err := newTransform(src, d.Subject) if err != nil { return err } if d.Cluster == _EMPTY_ { m.dests = append(m.dests, &destination{tr, d.Weight}) } else { // We have a cluster scoped filter. if m.cdests == nil { m.cdests = make(map[string][]*destination) } ad := m.cdests[d.Cluster] ad = append(ad, &destination{tr, d.Weight}) m.cdests[d.Cluster] = ad } } processDestinations := func(dests []*destination) ([]*destination, error) { var ltw uint8 for _, d := range dests { ltw += d.weight } // Auto add in original at weight difference if all entries weight does not total to 100. // Iff the src was not already added in explicitly, meaning they want loss. _, haveSrc := seen[src] if ltw != 100 && !haveSrc { dest := src if m.wc { // We need to make the appropriate markers for the wildcards etc. dest = transformTokenize(dest) } tr, err := newTransform(src, dest) if err != nil { return nil, err } aw := 100 - ltw if len(dests) == 0 { aw = 100 } dests = append(dests, &destination{tr, aw}) } sort.Slice(dests, func(i, j int) bool { return dests[i].weight < dests[j].weight }) var lw uint8 for _, d := range dests { d.weight += lw lw = d.weight } return dests, nil } var err error if m.dests, err = processDestinations(m.dests); err != nil { return err } // Option cluster scoped destinations for cluster, dests := range m.cdests { if dests, err = processDestinations(dests); err != nil { return err } m.cdests[cluster] = dests } // Replace an old one if it exists. for i, m := range a.mappings { if m.src == src { a.mappings[i] = m return nil } } // If we did not replace add to the end. a.mappings = append(a.mappings, m) // If we have connected leafnodes make sure to update. if len(a.lleafs) > 0 { for _, lc := range a.lleafs { lc.forceAddToSmap(src) } } return nil } // Helper function to tokenize subjects with partial wildcards into formal transform destinations. // e.g. foo.*.* -> foo.$1.$2 func transformTokenize(subject string) string { // We need to make the appropriate markers for the wildcards etc. i := 1 var nda []string for _, token := range strings.Split(subject, tsep) { if token == "*" { nda = append(nda, fmt.Sprintf("$%d", i)) i++ } else { nda = append(nda, token) } } return strings.Join(nda, tsep) } func transformUntokenize(subject string) (string, []string) { var phs []string var nda []string for _, token := range strings.Split(subject, tsep) { if len(token) > 1 && token[0] == '$' && token[1] >= '1' && token[1] <= '9' { phs = append(phs, token) nda = append(nda, "*") } else { nda = append(nda, token) } } return strings.Join(nda, tsep), phs } // RemoveMapping will remove an existing mapping. func (a *Account) RemoveMapping(src string) bool { a.mu.Lock() defer a.mu.Unlock() for i, m := range a.mappings { if m.src == src { // Swap last one into this spot. Its ok to change order. a.mappings[i] = a.mappings[len(a.mappings)-1] a.mappings[len(a.mappings)-1] = nil // gc a.mappings = a.mappings[:len(a.mappings)-1] return true } } return false } // Indicates we have mapping entries. func (a *Account) hasMappings() bool { if a == nil { return false } a.mu.RLock() n := len(a.mappings) a.mu.RUnlock() return n > 0 } // This performs the logic to map to a new dest subject based on mappings. // Should only be called from processInboundClientMsg or service import processing. func (a *Account) selectMappedSubject(dest string) (string, bool) { a.mu.RLock() if len(a.mappings) == 0 { a.mu.RUnlock() return dest, false } // In case we have to tokenize for subset matching. tsa := [32]string{} tts := tsa[:0] var m *mapping for _, rm := range a.mappings { if !rm.wc && rm.src == dest { m = rm break } else { // tokenize and reuse for subset matching. if len(tts) == 0 { start := 0 subject := dest for i := 0; i < len(subject); i++ { if subject[i] == btsep { tts = append(tts, subject[start:i]) start = i + 1 } } tts = append(tts, subject[start:]) } if isSubsetMatch(tts, rm.src) { m = rm break } } } if m == nil { a.mu.RUnlock() return dest, false } // The selected destination for the mapping. var d *destination var ndest string dests := m.dests if len(m.cdests) > 0 { cn := a.srv.cachedClusterName() dests = m.cdests[cn] if dests == nil { // Fallback to main if we do not match the cluster. dests = m.dests } } // Optimize for single entry case. if len(dests) == 1 && dests[0].weight == 100 { d = dests[0] } else { w := uint8(a.prand.Int31n(100)) for _, rm := range dests { if w < rm.weight { d = rm break } } } if d != nil { if len(d.tr.dtpi) == 0 { ndest = d.tr.dest } else if nsubj, err := d.tr.transform(tts); err == nil { ndest = nsubj } } a.mu.RUnlock() return ndest, true } // SubscriptionInterest returns true if this account has a matching subscription // for the given `subject`. func (a *Account) SubscriptionInterest(subject string) bool { return a.Interest(subject) > 0 } // Interest returns the number of subscriptions for a given subject that match. func (a *Account) Interest(subject string) int { var nms int a.mu.RLock() if a.sl != nil { res := a.sl.Match(subject) nms = len(res.psubs) + len(res.qsubs) } a.mu.RUnlock() return nms } // addClient keeps our accounting of local active clients or leafnodes updated. // Returns previous total. func (a *Account) addClient(c *client) int { a.mu.Lock() n := len(a.clients) if a.clients != nil { a.clients[c] = struct{}{} } added := n != len(a.clients) if added { if c.kind != CLIENT && c.kind != LEAF { a.sysclients++ } else if c.kind == LEAF { a.nleafs++ a.lleafs = append(a.lleafs, c) } } a.mu.Unlock() if c != nil && c.srv != nil && added { c.srv.accConnsUpdate(a) } return n } // Helper function to remove leaf nodes. If number of leafnodes gets large // this may need to be optimized out of linear search but believe number // of active leafnodes per account scope to be small and therefore cache friendly. // Lock should be held on account. func (a *Account) removeLeafNode(c *client) { ll := len(a.lleafs) for i, l := range a.lleafs { if l == c { a.lleafs[i] = a.lleafs[ll-1] if ll == 1 { a.lleafs = nil } else { a.lleafs = a.lleafs[:ll-1] } return } } } // removeClient keeps our accounting of local active clients updated. func (a *Account) removeClient(c *client) int { a.mu.Lock() n := len(a.clients) delete(a.clients, c) removed := n != len(a.clients) if removed { if c.kind != CLIENT && c.kind != LEAF { a.sysclients-- } else if c.kind == LEAF { a.nleafs-- a.removeLeafNode(c) } } a.mu.Unlock() if c != nil && c.srv != nil && removed { c.srv.mu.Lock() doRemove := a != c.srv.gacc c.srv.mu.Unlock() if doRemove { c.srv.accConnsUpdate(a) } } return n } func setExportAuth(ea *exportAuth, subject string, accounts []*Account, accountPos uint) error { if accountPos > 0 { token := strings.Split(subject, ".") if len(token) < int(accountPos) || token[accountPos-1] != "*" { return ErrInvalidSubject } } ea.accountPos = accountPos // empty means auth required but will be import token. if accounts == nil { return nil } if len(accounts) == 0 { ea.tokenReq = true return nil } if ea.approved == nil { ea.approved = make(map[string]*Account, len(accounts)) } for _, acc := range accounts { ea.approved[acc.Name] = acc } return nil } // AddServiceExport will configure the account with the defined export. func (a *Account) AddServiceExport(subject string, accounts []*Account) error { return a.addServiceExportWithResponseAndAccountPos(subject, Singleton, accounts, 0) } // AddServiceExport will configure the account with the defined export. func (a *Account) addServiceExportWithAccountPos(subject string, accounts []*Account, accountPos uint) error { return a.addServiceExportWithResponseAndAccountPos(subject, Singleton, accounts, accountPos) } // AddServiceExportWithResponse will configure the account with the defined export and response type. func (a *Account) AddServiceExportWithResponse(subject string, respType ServiceRespType, accounts []*Account) error { return a.addServiceExportWithResponseAndAccountPos(subject, respType, accounts, 0) } // AddServiceExportWithresponse will configure the account with the defined export and response type. func (a *Account) addServiceExportWithResponseAndAccountPos( subject string, respType ServiceRespType, accounts []*Account, accountPos uint) error { if a == nil { return ErrMissingAccount } a.mu.Lock() defer a.mu.Unlock() if a.exports.services == nil { a.exports.services = make(map[string]*serviceExport) } se := a.exports.services[subject] // Always create a service export if se == nil { se = &serviceExport{} } if respType != Singleton { se.respType = respType } if accounts != nil || accountPos > 0 { if err := setExportAuth(&se.exportAuth, subject, accounts, accountPos); err != nil { return err } } lrt := a.lowestServiceExportResponseTime() se.acc = a se.respThresh = DEFAULT_SERVICE_EXPORT_RESPONSE_THRESHOLD a.exports.services[subject] = se if nlrt := a.lowestServiceExportResponseTime(); nlrt != lrt { a.updateAllClientsServiceExportResponseTime(nlrt) } return nil } // TrackServiceExport will enable latency tracking of the named service. // Results will be published in this account to the given results subject. func (a *Account) TrackServiceExport(service, results string) error { return a.TrackServiceExportWithSampling(service, results, DEFAULT_SERVICE_LATENCY_SAMPLING) } // TrackServiceExportWithSampling will enable latency tracking of the named service for the given // sampling rate (1-100). Results will be published in this account to the given results subject. func (a *Account) TrackServiceExportWithSampling(service, results string, sampling int) error { if a == nil { return ErrMissingAccount } if sampling != 0 { // 0 means triggered by header if sampling < 1 || sampling > 100 { return ErrBadSampling } } if !IsValidPublishSubject(results) { return ErrBadPublishSubject } // Don't loop back on outselves. if a.IsExportService(results) { return ErrBadPublishSubject } if a.srv != nil && !a.srv.EventsEnabled() { return ErrNoSysAccount } a.mu.Lock() if a.exports.services == nil { a.mu.Unlock() return ErrMissingService } ea, ok := a.exports.services[service] if !ok { a.mu.Unlock() return ErrMissingService } if ea == nil { ea = &serviceExport{} a.exports.services[service] = ea } else if ea.respType != Singleton { a.mu.Unlock() return ErrBadServiceType } ea.latency = &serviceLatency{ sampling: int8(sampling), subject: results, } s := a.srv a.mu.Unlock() if s == nil { return nil } // Now track down the imports and add in latency as needed to enable. s.accounts.Range(func(k, v interface{}) bool { acc := v.(*Account) acc.mu.Lock() for _, im := range acc.imports.services { if im != nil && im.acc.Name == a.Name && subjectIsSubsetMatch(im.to, service) { im.latency = ea.latency } } acc.mu.Unlock() return true }) return nil } // UnTrackServiceExport will disable latency tracking of the named service. func (a *Account) UnTrackServiceExport(service string) { if a == nil || (a.srv != nil && !a.srv.EventsEnabled()) { return } a.mu.Lock() if a == nil || a.exports.services == nil { a.mu.Unlock() return } ea, ok := a.exports.services[service] if !ok || ea == nil || ea.latency == nil { a.mu.Unlock() return } // We have latency here. ea.latency = nil s := a.srv a.mu.Unlock() if s == nil { return } // Now track down the imports and clean them up. s.accounts.Range(func(k, v interface{}) bool { acc := v.(*Account) acc.mu.Lock() for _, im := range acc.imports.services { if im != nil && im.acc.Name == a.Name { if subjectIsSubsetMatch(im.to, service) { im.latency, im.m1 = nil, nil } } } acc.mu.Unlock() return true }) } // IsExportService will indicate if this service exists. Will check wildcard scenarios. func (a *Account) IsExportService(service string) bool { a.mu.RLock() defer a.mu.RUnlock() _, ok := a.exports.services[service] if ok { return true } tokens := strings.Split(service, tsep) for subj := range a.exports.services { if isSubsetMatch(tokens, subj) { return true } } return false } // IsExportServiceTracking will indicate if given publish subject is an export service with tracking enabled. func (a *Account) IsExportServiceTracking(service string) bool { a.mu.RLock() ea, ok := a.exports.services[service] if ok && ea == nil { a.mu.RUnlock() return false } if ok && ea != nil && ea.latency != nil { a.mu.RUnlock() return true } // FIXME(dlc) - Might want to cache this is in the hot path checking for latency tracking. tokens := strings.Split(service, tsep) for subj, ea := range a.exports.services { if isSubsetMatch(tokens, subj) && ea != nil && ea.latency != nil { a.mu.RUnlock() return true } } a.mu.RUnlock() return false } // ServiceLatency is the JSON message sent out in response to latency tracking for // an accounts exported services. Additional client info is available in requestor // and responder. Note that for a requestor, the only information shared by default // is the RTT used to calculate the total latency. The requestor's account can // designate to share the additional information in the service import. type ServiceLatency struct { TypedEvent Status int `json:"status"` Error string `json:"description,omitempty"` Requestor *ClientInfo `json:"requestor,omitempty"` Responder *ClientInfo `json:"responder,omitempty"` RequestHeader http.Header `json:"header,omitempty"` // only contains header(s) triggering the measurement RequestStart time.Time `json:"start"` ServiceLatency time.Duration `json:"service"` SystemLatency time.Duration `json:"system"` TotalLatency time.Duration `json:"total"` } // ServiceLatencyType is the NATS Event Type for ServiceLatency const ServiceLatencyType = "io.nats.server.metric.v1.service_latency" // NATSTotalTime is a helper function that totals the NATS latencies. func (m1 *ServiceLatency) NATSTotalTime() time.Duration { return m1.Requestor.RTT + m1.Responder.RTT + m1.SystemLatency } // Merge function to merge m1 and m2 (requestor and responder) measurements // when there are two samples. This happens when the requestor and responder // are on different servers. // // m2 ServiceLatency is correct, so use that. // m1 TotalLatency is correct, so use that. // Will use those to back into NATS latency. func (m1 *ServiceLatency) merge(m2 *ServiceLatency) { rtt := time.Duration(0) if m2.Responder != nil { rtt = m2.Responder.RTT } m1.SystemLatency = m1.ServiceLatency - (m2.ServiceLatency + rtt) m1.ServiceLatency = m2.ServiceLatency m1.Responder = m2.Responder sanitizeLatencyMetric(m1) } // sanitizeLatencyMetric adjusts latency metric values that could go // negative in some edge conditions since we estimate client RTT // for both requestor and responder. // These numbers are never meant to be negative, it just could be // how we back into the values based on estimated RTT. func sanitizeLatencyMetric(sl *ServiceLatency) { if sl.ServiceLatency < 0 { sl.ServiceLatency = 0 } if sl.SystemLatency < 0 { sl.SystemLatency = 0 } } // Used for transporting remote latency measurements. type remoteLatency struct { Account string `json:"account"` ReqId string `json:"req_id"` M2 ServiceLatency `json:"m2"` respThresh time.Duration } // sendLatencyResult will send a latency result and clear the si of the requestor(rc). func (a *Account) sendLatencyResult(si *serviceImport, sl *ServiceLatency) { sl.Type = ServiceLatencyType sl.ID = a.nextEventID() sl.Time = time.Now().UTC() a.mu.Lock() lsubj := si.latency.subject si.rc = nil a.mu.Unlock() a.srv.sendInternalAccountMsg(a, lsubj, sl) } // Used to send a bad request metric when we do not have a reply subject func (a *Account) sendBadRequestTrackingLatency(si *serviceImport, requestor *client, header http.Header) { sl := &ServiceLatency{ Status: 400, Error: "Bad Request", Requestor: requestor.getClientInfo(si.share), } sl.RequestHeader = header sl.RequestStart = time.Now().Add(-sl.Requestor.RTT).UTC() a.sendLatencyResult(si, sl) } // Used to send a latency result when the requestor interest was lost before the // response could be delivered. func (a *Account) sendReplyInterestLostTrackLatency(si *serviceImport) { sl := &ServiceLatency{ Status: 408, Error: "Request Timeout", } a.mu.RLock() rc := si.rc share := si.share ts := si.ts sl.RequestHeader = si.trackingHdr a.mu.RUnlock() if rc != nil { sl.Requestor = rc.getClientInfo(share) } sl.RequestStart = time.Unix(0, ts-int64(sl.Requestor.RTT)).UTC() a.sendLatencyResult(si, sl) } func (a *Account) sendBackendErrorTrackingLatency(si *serviceImport, reason rsiReason) { sl := &ServiceLatency{} a.mu.RLock() rc := si.rc share := si.share ts := si.ts sl.RequestHeader = si.trackingHdr a.mu.RUnlock() if rc != nil { sl.Requestor = rc.getClientInfo(share) } var reqRTT time.Duration if sl.Requestor != nil { reqRTT = sl.Requestor.RTT } sl.RequestStart = time.Unix(0, ts-int64(reqRTT)).UTC() if reason == rsiNoDelivery { sl.Status = 503 sl.Error = "Service Unavailable" } else if reason == rsiTimeout { sl.Status = 504 sl.Error = "Service Timeout" } a.sendLatencyResult(si, sl) } // sendTrackingMessage will send out the appropriate tracking information for the // service request/response latency. This is called when the requestor's server has // received the response. // TODO(dlc) - holding locks for RTTs may be too much long term. Should revisit. func (a *Account) sendTrackingLatency(si *serviceImport, responder *client) bool { if si.rc == nil { return true } ts := time.Now() serviceRTT := time.Duration(ts.UnixNano() - si.ts) requestor := si.rc sl := &ServiceLatency{ Status: 200, Requestor: requestor.getClientInfo(si.share), Responder: responder.getClientInfo(true), } var respRTT, reqRTT time.Duration if sl.Responder != nil { respRTT = sl.Responder.RTT } if sl.Requestor != nil { reqRTT = sl.Requestor.RTT } sl.RequestStart = time.Unix(0, si.ts-int64(reqRTT)).UTC() sl.ServiceLatency = serviceRTT - respRTT sl.TotalLatency = sl.Requestor.RTT + serviceRTT if respRTT > 0 { sl.SystemLatency = time.Since(ts) sl.TotalLatency += sl.SystemLatency } sl.RequestHeader = si.trackingHdr sanitizeLatencyMetric(sl) sl.Type = ServiceLatencyType sl.ID = a.nextEventID() sl.Time = time.Now().UTC() // If we are expecting a remote measurement, store our sl here. // We need to account for the race between this and us receiving the // remote measurement. // FIXME(dlc) - We need to clean these up but this should happen // already with the auto-expire logic. if responder != nil && responder.kind != CLIENT { si.acc.mu.Lock() if si.m1 != nil { m1, m2 := sl, si.m1 m1.merge(m2) si.acc.mu.Unlock() a.srv.sendInternalAccountMsg(a, si.latency.subject, m1) a.mu.Lock() si.rc = nil a.mu.Unlock() return true } si.m1 = sl si.acc.mu.Unlock() return false } else { a.srv.sendInternalAccountMsg(a, si.latency.subject, sl) a.mu.Lock() si.rc = nil a.mu.Unlock() } return true } // This will check to make sure our response lower threshold is set // properly in any clients doing rrTracking. // Lock should be held. func (a *Account) updateAllClientsServiceExportResponseTime(lrt time.Duration) { for c := range a.clients { c.mu.Lock() if c.rrTracking != nil && lrt != c.rrTracking.lrt { c.rrTracking.lrt = lrt if c.rrTracking.ptmr.Stop() { c.rrTracking.ptmr.Reset(lrt) } } c.mu.Unlock() } } // Will select the lowest respThresh from all service exports. // Read lock should be held. func (a *Account) lowestServiceExportResponseTime() time.Duration { // Lowest we will allow is 5 minutes. Its an upper bound for this function. lrt := 5 * time.Minute for _, se := range a.exports.services { if se.respThresh < lrt { lrt = se.respThresh } } return lrt } // AddServiceImportWithClaim will add in the service import via the jwt claim. func (a *Account) AddServiceImportWithClaim(destination *Account, from, to string, imClaim *jwt.Import) error { if destination == nil { return ErrMissingAccount } // Empty means use from. if to == _EMPTY_ { to = from } if !IsValidSubject(from) || !IsValidSubject(to) { return ErrInvalidSubject } // First check to see if the account has authorized us to route to the "to" subject. if !destination.checkServiceImportAuthorized(a, to, imClaim) { return ErrServiceImportAuthorization } // Check if this introduces a cycle before proceeding. if err := a.serviceImportFormsCycle(destination, from); err != nil { return err } _, err := a.addServiceImport(destination, from, to, imClaim) return err } const MaxAccountCycleSearchDepth = 1024 func (a *Account) serviceImportFormsCycle(dest *Account, from string) error { return dest.checkServiceImportsForCycles(from, map[string]bool{a.Name: true}) } func (a *Account) checkServiceImportsForCycles(from string, visited map[string]bool) error { if len(visited) >= MaxAccountCycleSearchDepth { return ErrCycleSearchDepth } a.mu.RLock() for _, si := range a.imports.services { if SubjectsCollide(from, si.to) { a.mu.RUnlock() if visited[si.acc.Name] { return ErrImportFormsCycle } // Push ourselves and check si.acc visited[a.Name] = true if subjectIsSubsetMatch(si.from, from) { from = si.from } if err := si.acc.checkServiceImportsForCycles(from, visited); err != nil { return err } a.mu.RLock() } } a.mu.RUnlock() return nil } func (a *Account) streamImportFormsCycle(dest *Account, to string) error { return dest.checkStreamImportsForCycles(to, map[string]bool{a.Name: true}) } // Lock should be held. func (a *Account) hasStreamExportMatching(to string) bool { for subj := range a.exports.streams { if subjectIsSubsetMatch(to, subj) { return true } } return false } func (a *Account) checkStreamImportsForCycles(to string, visited map[string]bool) error { if len(visited) >= MaxAccountCycleSearchDepth { return ErrCycleSearchDepth } a.mu.RLock() if !a.hasStreamExportMatching(to) { a.mu.RUnlock() return nil } for _, si := range a.imports.streams { if SubjectsCollide(to, si.to) { a.mu.RUnlock() if visited[si.acc.Name] { return ErrImportFormsCycle } // Push ourselves and check si.acc visited[a.Name] = true if subjectIsSubsetMatch(si.to, to) { to = si.to } if err := si.acc.checkStreamImportsForCycles(to, visited); err != nil { return err } a.mu.RLock() } } a.mu.RUnlock() return nil } // SetServiceImportSharing will allow sharing of information about requests with the export account. // Used for service latency tracking at the moment. func (a *Account) SetServiceImportSharing(destination *Account, to string, allow bool) error { a.mu.Lock() defer a.mu.Unlock() if a.isClaimAccount() { return fmt.Errorf("claim based accounts can not be updated directly") } for _, si := range a.imports.services { if si.acc == destination && si.to == to { si.share = allow return nil } } return fmt.Errorf("service import not found") } // AddServiceImport will add a route to an account to send published messages / requests // to the destination account. From is the local subject to map, To is the // subject that will appear on the destination account. Destination will need // to have an import rule to allow access via addService. func (a *Account) AddServiceImport(destination *Account, from, to string) error { return a.AddServiceImportWithClaim(destination, from, to, nil) } // NumPendingReverseResponses returns the number of response mappings we have for all outstanding // requests for service imports. func (a *Account) NumPendingReverseResponses() int { a.mu.RLock() defer a.mu.RUnlock() return len(a.imports.rrMap) } // NumPendingAllResponses return the number of all responses outstanding for service exports. func (a *Account) NumPendingAllResponses() int { return a.NumPendingResponses(_EMPTY_) } // NumResponsesPending returns the number of responses outstanding for service exports // on this account. An empty filter string returns all responses regardless of which export. // If you specify the filter we will only return ones that are for that export. // NOTE this is only for what this server is tracking. func (a *Account) NumPendingResponses(filter string) int { a.mu.RLock() defer a.mu.RUnlock() if filter == _EMPTY_ { return len(a.exports.responses) } se := a.getServiceExport(filter) if se == nil { return 0 } var nre int for _, si := range a.exports.responses { if si.se == se { nre++ } } return nre } // NumServiceImports returns the number of service imports we have configured. func (a *Account) NumServiceImports() int { a.mu.RLock() defer a.mu.RUnlock() return len(a.imports.services) } // Reason why we are removing this response serviceImport. type rsiReason int const ( rsiOk = rsiReason(iota) rsiNoDelivery rsiTimeout ) // removeRespServiceImport removes a response si mapping and the reverse entries for interest detection. func (a *Account) removeRespServiceImport(si *serviceImport, reason rsiReason) { if si == nil { return } a.mu.Lock() delete(a.exports.responses, si.from) dest := si.acc to := si.to tracking := si.tracking rc := si.rc a.mu.Unlock() if tracking && rc != nil { a.sendBackendErrorTrackingLatency(si, reason) } dest.checkForReverseEntry(to, si, false) } // removeServiceImport will remove the route by subject. func (a *Account) removeServiceImport(subject string) { a.mu.Lock() si, ok := a.imports.services[subject] delete(a.imports.services, subject) var sid []byte c := a.ic if ok && si != nil { if a.ic != nil && si.sid != nil { sid = si.sid } } a.mu.Unlock() if sid != nil { c.processUnsub(sid) } } // This tracks responses to service requests mappings. This is used for cleanup. func (a *Account) addReverseRespMapEntry(acc *Account, reply, from string) { a.mu.Lock() if a.imports.rrMap == nil { a.imports.rrMap = make(map[string][]*serviceRespEntry) } sre := &serviceRespEntry{acc, from} sra := a.imports.rrMap[reply] a.imports.rrMap[reply] = append(sra, sre) a.mu.Unlock() } // checkForReverseEntries is for when we are trying to match reverse entries to a wildcard. // This will be called from checkForReverseEntry when the reply arg is a wildcard subject. // This will usually be called in a go routine since we need to walk all the entries. func (a *Account) checkForReverseEntries(reply string, checkInterest bool) { a.mu.RLock() if len(a.imports.rrMap) == 0 { a.mu.RUnlock() return } if subjectIsLiteral(reply) { a.mu.RUnlock() a.checkForReverseEntry(reply, nil, checkInterest) return } var _rs [64]string rs := _rs[:0] for k := range a.imports.rrMap { if subjectIsSubsetMatch(k, reply) { rs = append(rs, k) } } a.mu.RUnlock() for _, reply := range rs { a.checkForReverseEntry(reply, nil, checkInterest) } } // This checks for any response map entries. If you specify an si we will only match and // clean up for that one, otherwise we remove them all. func (a *Account) checkForReverseEntry(reply string, si *serviceImport, checkInterest bool) { a.mu.RLock() if len(a.imports.rrMap) == 0 { a.mu.RUnlock() return } if subjectHasWildcard(reply) { doInline := len(a.imports.rrMap) <= 64 a.mu.RUnlock() if doInline { a.checkForReverseEntries(reply, checkInterest) } else { go a.checkForReverseEntries(reply, checkInterest) } return } sres := a.imports.rrMap[reply] if sres == nil { a.mu.RUnlock() return } // If we are here we have an entry we should check. // If requested we will first check if there is any // interest for this subject for the entire account. // If there is we can not delete any entries yet. // Note that if we are here reply has to be a literal subject. if checkInterest { // If interest still exists we can not clean these up yet. if rr := a.sl.Match(reply); len(rr.psubs)+len(rr.qsubs) > 0 { a.mu.RUnlock() return } } a.mu.RUnlock() // Delete the appropriate entries here based on optional si. a.mu.Lock() if si == nil { delete(a.imports.rrMap, reply) } else { // Find the one we are looking for.. for i, sre := range sres { if sre.msub == si.from { sres = append(sres[:i], sres[i+1:]...) break } } if len(sres) > 0 { a.imports.rrMap[si.to] = sres } else { delete(a.imports.rrMap, si.to) } } a.mu.Unlock() // If we are here we no longer have interest and we have // response entries that we should clean up. if si == nil { for _, sre := range sres { acc := sre.acc var trackingCleanup bool var rsi *serviceImport acc.mu.Lock() if rsi = acc.exports.responses[sre.msub]; rsi != nil && !rsi.didDeliver { delete(acc.exports.responses, rsi.from) trackingCleanup = rsi.tracking && rsi.rc != nil } acc.mu.Unlock() if trackingCleanup { acc.sendReplyInterestLostTrackLatency(rsi) } } } } // Checks to see if a potential service import subject is already overshadowed. func (a *Account) serviceImportShadowed(from string) bool { a.mu.RLock() defer a.mu.RUnlock() if a.imports.services[from] != nil { return true } // We did not find a direct match, so check individually. for subj := range a.imports.services { if subjectIsSubsetMatch(from, subj) { return true } } return false } // Internal check to see if a service import exists. func (a *Account) serviceImportExists(from string) bool { a.mu.RLock() dup := a.imports.services[from] a.mu.RUnlock() return dup != nil } // Add a service import. // This does no checks and should only be called by the msg processing code. // Use AddServiceImport from above if responding to user input or config changes, etc. func (a *Account) addServiceImport(dest *Account, from, to string, claim *jwt.Import) (*serviceImport, error) { rt := Singleton var lat *serviceLatency dest.mu.RLock() se := dest.getServiceExport(to) if se != nil { rt = se.respType lat = se.latency } s := dest.srv dest.mu.RUnlock() // Track if this maps us to the system account. // We will always share information with them. var isSysAcc bool if s != nil { s.mu.Lock() if s.sys != nil && dest == s.sys.account { isSysAcc = true } s.mu.Unlock() } a.mu.Lock() if a.imports.services == nil { a.imports.services = make(map[string]*serviceImport) } else if dup := a.imports.services[from]; dup != nil { a.mu.Unlock() return nil, fmt.Errorf("duplicate service import subject %q, previously used in import for account %q, subject %q", from, dup.acc.Name, dup.to) } if to == _EMPTY_ { to = from } // Check to see if we have a wildcard var ( usePub bool tr *transform err error ) if subjectHasWildcard(to) { // If to and from match, then we use the published subject. if to == from { usePub = true } else { to, _ = transformUntokenize(to) // Create a transform. Do so in reverse such that $ symbols only exist in to if tr, err = newTransform(to, transformTokenize(from)); err != nil { a.mu.Unlock() return nil, fmt.Errorf("failed to create mapping transform for service import subject %q to %q: %v", from, to, err) } else { // un-tokenize and reverse transform so we get the transform needed from, _ = transformUntokenize(from) tr = tr.reverse() } } } // Turn on sharing by default if importing from system services. share := isSysAcc if claim != nil { share = claim.Share } si := &serviceImport{dest, claim, se, nil, from, to, tr, 0, rt, lat, nil, nil, usePub, false, false, share, false, false, nil} a.imports.services[from] = si a.mu.Unlock() if err := a.addServiceImportSub(si); err != nil { a.removeServiceImport(si.from) return nil, err } return si, nil } // Returns the internal client, will create one if not present. // Lock should be held. func (a *Account) internalClient() *client { if a.ic == nil && a.srv != nil { a.ic = a.srv.createInternalAccountClient() a.ic.acc = a } return a.ic } // Internal account scoped subscriptions. func (a *Account) subscribeInternal(subject string, cb msgHandler) (*subscription, error) { a.mu.Lock() c := a.internalClient() a.isid++ sid := strconv.FormatUint(a.isid, 10) a.mu.Unlock() // This will happen in parsing when the account has not been properly setup. if c == nil { return nil, fmt.Errorf("no internal account client") } return c.processSub([]byte(subject), nil, []byte(sid), cb, false) } // This will add an account subscription that matches the "from" from a service import entry. func (a *Account) addServiceImportSub(si *serviceImport) error { a.mu.Lock() c := a.internalClient() // This will happen in parsing when the account has not been properly setup. if c == nil { a.mu.Unlock() return nil } if si.sid != nil { a.mu.Unlock() return fmt.Errorf("duplicate call to create subscription for service import") } a.isid++ sid := strconv.FormatUint(a.isid, 10) si.sid = []byte(sid) subject := si.from a.mu.Unlock() cb := func(sub *subscription, c *client, acc *Account, subject, reply string, msg []byte) { c.processServiceImport(si, acc, msg) } sub, err := c.processSubEx([]byte(subject), nil, []byte(sid), cb, true, true, false) if err != nil { return err } // Leafnodes introduce a new way to introduce messages into the system. Therefore forward import subscription // This is similar to what initLeafNodeSmapAndSendSubs does // TODO we need to consider performing this update as we get client subscriptions. // This behavior would result in subscription propagation only where actually used. a.srv.updateLeafNodes(a, sub, 1) return nil } // Remove all the subscriptions associated with service imports. func (a *Account) removeAllServiceImportSubs() { a.mu.RLock() var sids [][]byte for _, si := range a.imports.services { if si.sid != nil { sids = append(sids, si.sid) si.sid = nil } } c := a.ic a.ic = nil a.mu.RUnlock() if c == nil { return } for _, sid := range sids { c.processUnsub(sid) } c.closeConnection(InternalClient) } // Add in subscriptions for all registered service imports. func (a *Account) addAllServiceImportSubs() { for _, si := range a.imports.services { a.addServiceImportSub(si) } } var ( // header where all information is encoded in one value. trcUber = textproto.CanonicalMIMEHeaderKey("Uber-Trace-Id") trcCtx = textproto.CanonicalMIMEHeaderKey("Traceparent") trcB3 = textproto.CanonicalMIMEHeaderKey("B3") // openzipkin header to check trcB3Sm = textproto.CanonicalMIMEHeaderKey("X-B3-Sampled") trcB3Id = textproto.CanonicalMIMEHeaderKey("X-B3-TraceId") // additional header needed to include when present trcB3PSId = textproto.CanonicalMIMEHeaderKey("X-B3-ParentSpanId") trcB3SId = textproto.CanonicalMIMEHeaderKey("X-B3-SpanId") trcCtxSt = textproto.CanonicalMIMEHeaderKey("Tracestate") trcUberCtxPrefix = textproto.CanonicalMIMEHeaderKey("Uberctx-") ) func newB3Header(h http.Header) http.Header { retHdr := http.Header{} if v, ok := h[trcB3Sm]; ok { retHdr[trcB3Sm] = v } if v, ok := h[trcB3Id]; ok { retHdr[trcB3Id] = v } if v, ok := h[trcB3PSId]; ok { retHdr[trcB3PSId] = v } if v, ok := h[trcB3SId]; ok { retHdr[trcB3SId] = v } return retHdr } func newUberHeader(h http.Header, tId []string) http.Header { retHdr := http.Header{trcUber: tId} for k, v := range h { if strings.HasPrefix(k, trcUberCtxPrefix) { retHdr[k] = v } } return retHdr } func newTraceCtxHeader(h http.Header, tId []string) http.Header { retHdr := http.Header{trcCtx: tId} if v, ok := h[trcCtxSt]; ok { retHdr[trcCtxSt] = v } return retHdr } // Helper to determine when to sample. When header has a value, sampling is driven by header func shouldSample(l *serviceLatency, c *client) (bool, http.Header) { if l == nil { return false, nil } if l.sampling < 0 { return false, nil } if l.sampling >= 100 { return true, nil } if l.sampling > 0 && rand.Int31n(100) <= int32(l.sampling) { return true, nil } h := c.parseState.getHeader() if len(h) == 0 { return false, nil } if tId := h[trcUber]; len(tId) != 0 { // sample 479fefe9525eddb:5adb976bfc1f95c1:479fefe9525eddb:1 tk := strings.Split(tId[0], ":") if len(tk) == 4 && len(tk[3]) > 0 && len(tk[3]) <= 2 { dst := [2]byte{} src := [2]byte{'0', tk[3][0]} if len(tk[3]) == 2 { src[1] = tk[3][1] } if _, err := hex.Decode(dst[:], src[:]); err == nil && dst[0]&1 == 1 { return true, newUberHeader(h, tId) } } return false, nil } else if sampled := h[trcB3Sm]; len(sampled) != 0 && sampled[0] == "1" { return true, newB3Header(h) // allowed } else if len(sampled) != 0 && sampled[0] == "0" { return false, nil // denied } else if _, ok := h[trcB3Id]; ok { // sample 80f198ee56343ba864fe8b2a57d3eff7 // presence (with X-B3-Sampled not being 0) means sampling left to recipient return true, newB3Header(h) } else if b3 := h[trcB3]; len(b3) != 0 { // sample 80f198ee56343ba864fe8b2a57d3eff7-e457b5a2e4d86bd1-1-05e3ac9a4f6e3b90 // sample 0 tk := strings.Split(b3[0], "-") if len(tk) > 2 && tk[2] == "0" { return false, nil // denied } else if len(tk) == 1 && tk[0] == "0" { return false, nil // denied } return true, http.Header{trcB3: b3} // sampling allowed or left to recipient of header } else if tId := h[trcCtx]; len(tId) != 0 { // sample 00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01 tk := strings.Split(tId[0], "-") if len(tk) == 4 && len([]byte(tk[3])) == 2 && tk[3] == "01" { return true, newTraceCtxHeader(h, tId) } else { return false, nil } } return false, nil } // Used to mimic client like replies. const ( replyPrefix = "_R_." replyPrefixLen = len(replyPrefix) baseServerLen = 10 replyLen = 6 minReplyLen = 15 digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" base = 62 ) // This is where all service export responses are handled. func (a *Account) processServiceImportResponse(sub *subscription, c *client, _ *Account, subject, reply string, msg []byte) { a.mu.RLock() if a.expired || len(a.exports.responses) == 0 { a.mu.RUnlock() return } si := a.exports.responses[subject] if si == nil || si.invalid { a.mu.RUnlock() return } a.mu.RUnlock() // Send for normal processing. c.processServiceImport(si, a, msg) } // Will create a wildcard subscription to handle interest graph propagation for all // service replies. // Lock should not be held. func (a *Account) createRespWildcard() []byte { a.mu.Lock() var b = [baseServerLen]byte{'_', 'R', '_', '.'} rn := a.prand.Uint64() for i, l := replyPrefixLen, rn; i < len(b); i++ { b[i] = digits[l%base] l /= base } a.siReply = append(b[:], '.') pre := a.siReply wcsub := append(a.siReply, '>') c := a.internalClient() a.isid++ sid := strconv.FormatUint(a.isid, 10) a.mu.Unlock() // Create subscription and internal callback for all the wildcard response subjects. c.processSubEx(wcsub, nil, []byte(sid), a.processServiceImportResponse, false, false, true) return pre } // Test whether this is a tracked reply. func isTrackedReply(reply []byte) bool { lreply := len(reply) - 1 return lreply > 3 && reply[lreply-1] == '.' && reply[lreply] == 'T' } // Generate a new service reply from the wildcard prefix. // FIXME(dlc) - probably do not have to use rand here. about 25ns per. func (a *Account) newServiceReply(tracking bool) []byte { a.mu.Lock() s, replyPre := a.srv, a.siReply if a.prand == nil { var h maphash.Hash h.WriteString(nuid.Next()) a.prand = rand.New(rand.NewSource(int64(h.Sum64()))) } rn := a.prand.Uint64() a.mu.Unlock() if replyPre == nil { replyPre = a.createRespWildcard() } var b [replyLen]byte for i, l := 0, rn; i < len(b); i++ { b[i] = digits[l%base] l /= base } // Make sure to copy. reply := make([]byte, 0, len(replyPre)+len(b)) reply = append(reply, replyPre...) reply = append(reply, b[:]...) if tracking && s.sys != nil { // Add in our tracking identifier. This allows the metrics to get back to only // this server without needless SUBS/UNSUBS. reply = append(reply, '.') reply = append(reply, s.sys.shash...) reply = append(reply, '.', 'T') } return reply } // Checks if a serviceImport was created to map responses. func (si *serviceImport) isRespServiceImport() bool { return si != nil && si.response } // Sets the response theshold timer for a service export. // Account lock should be held func (se *serviceExport) setResponseThresholdTimer() { if se.rtmr != nil { return // Already set } se.rtmr = time.AfterFunc(se.respThresh, se.checkExpiredResponses) } // Account lock should be held func (se *serviceExport) clearResponseThresholdTimer() bool { if se.rtmr == nil { return true } stopped := se.rtmr.Stop() se.rtmr = nil return stopped } // checkExpiredResponses will check for any pending responses that need to // be cleaned up. func (se *serviceExport) checkExpiredResponses() { acc := se.acc if acc == nil { se.clearResponseThresholdTimer() return } var expired []*serviceImport mints := time.Now().UnixNano() - int64(se.respThresh) // TODO(dlc) - Should we release lock while doing this? Or only do these in batches? // Should we break this up for responses only from this service export? // Responses live on acc directly for fast inbound processsing for the _R_ wildcard. // We could do another indirection at this level but just to get to the service export? var totalResponses int acc.mu.RLock() for _, si := range acc.exports.responses { if si.se == se { totalResponses++ if si.ts <= mints { expired = append(expired, si) } } } acc.mu.RUnlock() for _, si := range expired { acc.removeRespServiceImport(si, rsiTimeout) } // Pull out expired to determine if we have any left for timer. totalResponses -= len(expired) // Redo timer as needed. acc.mu.Lock() if totalResponses > 0 && se.rtmr != nil { se.rtmr.Stop() se.rtmr.Reset(se.respThresh) } else { se.clearResponseThresholdTimer() } acc.mu.Unlock() } // ServiceExportResponseThreshold returns the current threshold. func (a *Account) ServiceExportResponseThreshold(export string) (time.Duration, error) { a.mu.Lock() defer a.mu.Unlock() se := a.getServiceExport(export) if se == nil { return 0, fmt.Errorf("no export defined for %q", export) } return se.respThresh, nil } // SetServiceExportResponseThreshold sets the maximum time the system will a response to be delivered // from a service export responder. func (a *Account) SetServiceExportResponseThreshold(export string, maxTime time.Duration) error { a.mu.Lock() defer a.mu.Unlock() if a.isClaimAccount() { return fmt.Errorf("claim based accounts can not be updated directly") } lrt := a.lowestServiceExportResponseTime() se := a.getServiceExport(export) if se == nil { return fmt.Errorf("no export defined for %q", export) } se.respThresh = maxTime if nlrt := a.lowestServiceExportResponseTime(); nlrt != lrt { a.updateAllClientsServiceExportResponseTime(nlrt) } return nil } // This is for internal service import responses. func (a *Account) addRespServiceImport(dest *Account, to string, osi *serviceImport, tracking bool, header http.Header) *serviceImport { nrr := string(osi.acc.newServiceReply(tracking)) a.mu.Lock() rt := osi.rt // dest is the requestor's account. a is the service responder with the export. // Marked as internal here, that is how we distinguish. si := &serviceImport{dest, nil, osi.se, nil, nrr, to, nil, 0, rt, nil, nil, nil, false, true, false, osi.share, false, false, nil} if a.exports.responses == nil { a.exports.responses = make(map[string]*serviceImport) } a.exports.responses[nrr] = si // Always grab time and make sure response threshold timer is running. si.ts = time.Now().UnixNano() osi.se.setResponseThresholdTimer() if rt == Singleton && tracking { si.latency = osi.latency si.tracking = true si.trackingHdr = header } a.mu.Unlock() // We do not do individual subscriptions here like we do on configured imports. // We have an internal callback for all responses inbound to this account and // will process appropriately there. This does not pollute the sublist and the caches. // We do add in the reverse map such that we can detect loss of interest and do proper // cleanup of this si as interest goes away. dest.addReverseRespMapEntry(a, to, nrr) return si } // AddStreamImportWithClaim will add in the stream import from a specific account with optional token. func (a *Account) AddStreamImportWithClaim(account *Account, from, prefix string, imClaim *jwt.Import) error { if account == nil { return ErrMissingAccount } // First check to see if the account has authorized export of the subject. if !account.checkStreamImportAuthorized(a, from, imClaim) { return ErrStreamImportAuthorization } // Check prefix if it exists and make sure its a literal. // Append token separator if not already present. if prefix != _EMPTY_ { // Make sure there are no wildcards here, this prefix needs to be a literal // since it will be prepended to a publish subject. if !subjectIsLiteral(prefix) { return ErrStreamImportBadPrefix } if prefix[len(prefix)-1] != btsep { prefix = prefix + string(btsep) } } return a.AddMappedStreamImportWithClaim(account, from, prefix+from, imClaim) } // AddMappedStreamImport helper for AddMappedStreamImportWithClaim func (a *Account) AddMappedStreamImport(account *Account, from, to string) error { return a.AddMappedStreamImportWithClaim(account, from, to, nil) } // AddMappedStreamImportWithClaim will add in the stream import from a specific account with optional token. func (a *Account) AddMappedStreamImportWithClaim(account *Account, from, to string, imClaim *jwt.Import) error { if account == nil { return ErrMissingAccount } // First check to see if the account has authorized export of the subject. if !account.checkStreamImportAuthorized(a, from, imClaim) { return ErrStreamImportAuthorization } if to == _EMPTY_ { to = from } // Check if this forms a cycle. if err := a.streamImportFormsCycle(account, to); err != nil { return err } var ( usePub bool tr *transform err error ) if subjectHasWildcard(from) { if to == from { usePub = true } else { // Create a transform if tr, err = newTransform(from, transformTokenize(to)); err != nil { return fmt.Errorf("failed to create mapping transform for stream import subject %q to %q: %v", from, to, err) } to, _ = transformUntokenize(to) } } a.mu.Lock() if a.isStreamImportDuplicate(account, from) { a.mu.Unlock() return ErrStreamImportDuplicate } a.imports.streams = append(a.imports.streams, &streamImport{account, from, to, tr, nil, imClaim, usePub, false}) a.mu.Unlock() return nil } // isStreamImportDuplicate checks for duplicate. // Lock should be held. func (a *Account) isStreamImportDuplicate(acc *Account, from string) bool { for _, si := range a.imports.streams { if si.acc == acc && si.from == from { return true } } return false } // AddStreamImport will add in the stream import from a specific account. func (a *Account) AddStreamImport(account *Account, from, prefix string) error { return a.AddStreamImportWithClaim(account, from, prefix, nil) } // IsPublicExport is a placeholder to denote a public export. var IsPublicExport = []*Account(nil) // AddStreamExport will add an export to the account. If accounts is nil // it will signify a public export, meaning anyone can import. func (a *Account) AddStreamExport(subject string, accounts []*Account) error { return a.addStreamExportWithAccountPos(subject, accounts, 0) } // AddStreamExport will add an export to the account. If accounts is nil // it will signify a public export, meaning anyone can import. // if accountPos is > 0, all imports will be granted where the following holds: // strings.Split(subject, ".")[accountPos] == account id will be granted. func (a *Account) addStreamExportWithAccountPos(subject string, accounts []*Account, accountPos uint) error { if a == nil { return ErrMissingAccount } a.mu.Lock() defer a.mu.Unlock() if a.exports.streams == nil { a.exports.streams = make(map[string]*streamExport) } ea := a.exports.streams[subject] if accounts != nil || accountPos > 0 { if ea == nil { ea = &streamExport{} } if err := setExportAuth(&ea.exportAuth, subject, accounts, accountPos); err != nil { return err } } a.exports.streams[subject] = ea return nil } // Check if another account is authorized to import from us. func (a *Account) checkStreamImportAuthorized(account *Account, subject string, imClaim *jwt.Import) bool { // Find the subject in the exports list. a.mu.RLock() auth := a.checkStreamImportAuthorizedNoLock(account, subject, imClaim) a.mu.RUnlock() return auth } func (a *Account) checkStreamImportAuthorizedNoLock(account *Account, subject string, imClaim *jwt.Import) bool { if a.exports.streams == nil || !IsValidSubject(subject) { return false } return a.checkStreamExportApproved(account, subject, imClaim) } func (a *Account) checkAuth(ea *exportAuth, account *Account, imClaim *jwt.Import, tokens []string) bool { // if ea is nil or ea.approved is nil, that denotes a public export if ea == nil || (len(ea.approved) == 0 && !ea.tokenReq && ea.accountPos == 0) { return true } // Check if the export is protected and enforces presence of importing account identity if ea.accountPos > 0 { return ea.accountPos <= uint(len(tokens)) && tokens[ea.accountPos-1] == account.Name } // Check if token required if ea.tokenReq { return a.checkActivation(account, imClaim, true) } if ea.approved == nil { return false } // If we have a matching account we are authorized _, ok := ea.approved[account.Name] return ok } func (a *Account) checkStreamExportApproved(account *Account, subject string, imClaim *jwt.Import) bool { // Check direct match of subject first ea, ok := a.exports.streams[subject] if ok { // if ea is nil or eq.approved is nil, that denotes a public export if ea == nil { return true } return a.checkAuth(&ea.exportAuth, account, imClaim, nil) } // ok if we are here we did not match directly so we need to test each one. // The import subject arg has to take precedence, meaning the export // has to be a true subset of the import claim. We already checked for // exact matches above. tokens := strings.Split(subject, tsep) for subj, ea := range a.exports.streams { if isSubsetMatch(tokens, subj) { if ea == nil { return true } return a.checkAuth(&ea.exportAuth, account, imClaim, tokens) } } return false } func (a *Account) checkServiceExportApproved(account *Account, subject string, imClaim *jwt.Import) bool { // Check direct match of subject first se, ok := a.exports.services[subject] if ok { // if se is nil or eq.approved is nil, that denotes a public export if se == nil { return true } return a.checkAuth(&se.exportAuth, account, imClaim, nil) } // ok if we are here we did not match directly so we need to test each one. // The import subject arg has to take precedence, meaning the export // has to be a true subset of the import claim. We already checked for // exact matches above. tokens := strings.Split(subject, tsep) for subj, se := range a.exports.services { if isSubsetMatch(tokens, subj) { if se == nil { return true } return a.checkAuth(&se.exportAuth, account, imClaim, tokens) } } return false } // Helper function to get a serviceExport. // Lock should be held on entry. func (a *Account) getServiceExport(subj string) *serviceExport { se, ok := a.exports.services[subj] // The export probably has a wildcard, so lookup that up. if !ok { se = a.getWildcardServiceExport(subj) } return se } // This helper is used when trying to match a serviceExport record that is // represented by a wildcard. // Lock should be held on entry. func (a *Account) getWildcardServiceExport(from string) *serviceExport { tokens := strings.Split(from, tsep) for subj, se := range a.exports.services { if isSubsetMatch(tokens, subj) { return se } } return nil } // These are import stream specific versions for when an activation expires. func (a *Account) streamActivationExpired(exportAcc *Account, subject string) { a.mu.RLock() if a.expired || a.imports.streams == nil { a.mu.RUnlock() return } var si *streamImport for _, si = range a.imports.streams { if si.acc == exportAcc && si.from == subject { break } } if si == nil || si.invalid { a.mu.RUnlock() return } a.mu.RUnlock() if si.acc.checkActivation(a, si.claim, false) { // The token has been updated most likely and we are good to go. return } a.mu.Lock() si.invalid = true clients := make([]*client, 0, len(a.clients)) for c := range a.clients { clients = append(clients, c) } awcsti := map[string]struct{}{a.Name: {}} a.mu.Unlock() for _, c := range clients { c.processSubsOnConfigReload(awcsti) } } // These are import service specific versions for when an activation expires. func (a *Account) serviceActivationExpired(subject string) { a.mu.RLock() if a.expired || a.imports.services == nil { a.mu.RUnlock() return } si := a.imports.services[subject] if si == nil || si.invalid { a.mu.RUnlock() return } a.mu.RUnlock() if si.acc.checkActivation(a, si.claim, false) { // The token has been updated most likely and we are good to go. return } a.mu.Lock() si.invalid = true a.mu.Unlock() } // Fires for expired activation tokens. We could track this with timers etc. // Instead we just re-analyze where we are and if we need to act. func (a *Account) activationExpired(exportAcc *Account, subject string, kind jwt.ExportType) { switch kind { case jwt.Stream: a.streamActivationExpired(exportAcc, subject) case jwt.Service: a.serviceActivationExpired(subject) } } func isRevoked(revocations map[string]int64, subject string, issuedAt int64) bool { if revocations == nil { return false } if t, ok := revocations[subject]; !ok || t < issuedAt { return false } return true } // checkActivation will check the activation token for validity. func (a *Account) checkActivation(importAcc *Account, claim *jwt.Import, expTimer bool) bool { if claim == nil || claim.Token == _EMPTY_ { return false } // Create a quick clone so we can inline Token JWT. clone := *claim vr := jwt.CreateValidationResults() clone.Validate(importAcc.Name, vr) if vr.IsBlocking(true) { return false } act, err := jwt.DecodeActivationClaims(clone.Token) if err != nil { return false } if !a.isIssuerClaimTrusted(act) { return false } vr = jwt.CreateValidationResults() act.Validate(vr) if vr.IsBlocking(true) { return false } if act.Expires != 0 { tn := time.Now().Unix() if act.Expires <= tn { return false } if expTimer { expiresAt := time.Duration(act.Expires - tn) time.AfterFunc(expiresAt*time.Second, func() { importAcc.activationExpired(a, string(act.ImportSubject), claim.Type) }) } } // Check for token revocation.. return !isRevoked(a.actsRevoked, act.Subject, act.IssuedAt) } // Returns true if the activation claim is trusted. That is the issuer matches // the account or is an entry in the signing keys. func (a *Account) isIssuerClaimTrusted(claims *jwt.ActivationClaims) bool { // if no issuer account, issuer is the account if claims.IssuerAccount == _EMPTY_ { return true } // If the IssuerAccount is not us, then this is considered an error. if a.Name != claims.IssuerAccount { if a.srv != nil { a.srv.Errorf("Invalid issuer account %q in activation claim (subject: %q - type: %q) for account %q", claims.IssuerAccount, claims.Activation.ImportSubject, claims.Activation.ImportType, a.Name) } return false } _, ok := a.hasIssuerNoLock(claims.Issuer) return ok } // Returns true if `a` and `b` stream imports are the same. Note that the // check is done with the account's name, not the pointer. This is used // during config reload where we are comparing current and new config // in which pointers are different. // No lock is acquired in this function, so it is assumed that the // import maps are not changed while this executes. func (a *Account) checkStreamImportsEqual(b *Account) bool { if len(a.imports.streams) != len(b.imports.streams) { return false } // Load the b imports into a map index by what we are looking for. bm := make(map[string]*streamImport, len(b.imports.streams)) for _, bim := range b.imports.streams { bm[bim.acc.Name+bim.from+bim.to] = bim } for _, aim := range a.imports.streams { if _, ok := bm[aim.acc.Name+aim.from+aim.to]; !ok { return false } } return true } func (a *Account) checkStreamExportsEqual(b *Account) bool { if len(a.exports.streams) != len(b.exports.streams) { return false } for subj, aea := range a.exports.streams { bea, ok := b.exports.streams[subj] if !ok { return false } if !reflect.DeepEqual(aea, bea) { return false } } return true } func (a *Account) checkServiceExportsEqual(b *Account) bool { if len(a.exports.services) != len(b.exports.services) { return false } for subj, aea := range a.exports.services { bea, ok := b.exports.services[subj] if !ok { return false } if !reflect.DeepEqual(aea, bea) { return false } } return true } // Check if another account is authorized to route requests to this service. func (a *Account) checkServiceImportAuthorized(account *Account, subject string, imClaim *jwt.Import) bool { a.mu.RLock() authorized := a.checkServiceImportAuthorizedNoLock(account, subject, imClaim) a.mu.RUnlock() return authorized } // Check if another account is authorized to route requests to this service. func (a *Account) checkServiceImportAuthorizedNoLock(account *Account, subject string, imClaim *jwt.Import) bool { // Find the subject in the services list. if a.exports.services == nil { return false } return a.checkServiceExportApproved(account, subject, imClaim) } // IsExpired returns expiration status. func (a *Account) IsExpired() bool { a.mu.RLock() exp := a.expired a.mu.RUnlock() return exp } // Called when an account has expired. func (a *Account) expiredTimeout() { // Mark expired first. a.mu.Lock() a.expired = true a.mu.Unlock() // Collect the clients and expire them. cs := make([]*client, 0, len(a.clients)) a.mu.RLock() for c := range a.clients { cs = append(cs, c) } a.mu.RUnlock() for _, c := range cs { c.accountAuthExpired() } } // Sets the expiration timer for an account JWT that has it set. func (a *Account) setExpirationTimer(d time.Duration) { a.etmr = time.AfterFunc(d, a.expiredTimeout) } // Lock should be held func (a *Account) clearExpirationTimer() bool { if a.etmr == nil { return true } stopped := a.etmr.Stop() a.etmr = nil return stopped } // checkUserRevoked will check if a user has been revoked. func (a *Account) checkUserRevoked(nkey string, issuedAt int64) bool { a.mu.RLock() defer a.mu.RUnlock() return isRevoked(a.usersRevoked, nkey, issuedAt) } // Check expiration and set the proper state as needed. func (a *Account) checkExpiration(claims *jwt.ClaimsData) { a.mu.Lock() defer a.mu.Unlock() a.clearExpirationTimer() if claims.Expires == 0 { a.expired = false return } tn := time.Now().Unix() if claims.Expires <= tn { a.expired = true return } expiresAt := time.Duration(claims.Expires - tn) a.setExpirationTimer(expiresAt * time.Second) a.expired = false } // hasIssuer returns true if the issuer matches the account // If the issuer is a scoped signing key, the scope will be returned as well // issuer or it is a signing key for the account. func (a *Account) hasIssuer(issuer string) (jwt.Scope, bool) { a.mu.RLock() scope, ok := a.hasIssuerNoLock(issuer) a.mu.RUnlock() return scope, ok } // hasIssuerNoLock is the unlocked version of hasIssuer func (a *Account) hasIssuerNoLock(issuer string) (jwt.Scope, bool) { scope, ok := a.signingKeys[issuer] return scope, ok } // Returns the loop detection subject used for leafnodes func (a *Account) getLDSubject() string { a.mu.RLock() lds := a.lds a.mu.RUnlock() return lds } // Placeholder for signaling token auth required. var tokenAuthReq = []*Account{} func authAccounts(tokenReq bool) []*Account { if tokenReq { return tokenAuthReq } return nil } // SetAccountResolver will assign the account resolver. func (s *Server) SetAccountResolver(ar AccountResolver) { s.mu.Lock() s.accResolver = ar s.mu.Unlock() } // AccountResolver returns the registered account resolver. func (s *Server) AccountResolver() AccountResolver { s.mu.Lock() ar := s.accResolver s.mu.Unlock() return ar } // isClaimAccount returns if this account is backed by a JWT claim. // Lock should be held. func (a *Account) isClaimAccount() bool { return a.claimJWT != _EMPTY_ } // updateAccountClaims will update an existing account with new claims. // This will replace any exports or imports previously defined. // Lock MUST NOT be held upon entry. func (s *Server) UpdateAccountClaims(a *Account, ac *jwt.AccountClaims) { s.updateAccountClaimsWithRefresh(a, ac, true) } func (a *Account) traceLabel() string { if a == nil { return _EMPTY_ } if a.nameTag != _EMPTY_ { return fmt.Sprintf("%s/%s", a.Name, a.nameTag) } return a.Name } // updateAccountClaimsWithRefresh will update an existing account with new claims. // If refreshImportingAccounts is true it will also update incomplete dependent accounts // This will replace any exports or imports previously defined. // Lock MUST NOT be held upon entry. func (s *Server) updateAccountClaimsWithRefresh(a *Account, ac *jwt.AccountClaims, refreshImportingAccounts bool) { if a == nil { return } s.Debugf("Updating account claims: %s/%s", a.Name, ac.Name) a.checkExpiration(ac.Claims()) a.mu.Lock() // Clone to update, only select certain fields. old := &Account{Name: a.Name, exports: a.exports, limits: a.limits, signingKeys: a.signingKeys} // overwrite claim meta data a.nameTag = ac.Name a.tags = ac.Tags // Reset exports and imports here. // Exports is creating a whole new map. a.exports = exportMap{} // Imports are checked unlocked in processInbound, so we can't change out the struct here. Need to process inline. if a.imports.streams != nil { old.imports.streams = a.imports.streams a.imports.streams = nil } if a.imports.services != nil { old.imports.services = make(map[string]*serviceImport, len(a.imports.services)) } for k, v := range a.imports.services { old.imports.services[k] = v delete(a.imports.services, k) } // Reset any notion of export revocations. a.actsRevoked = nil alteredScope := map[string]struct{}{} // update account signing keys a.signingKeys = nil _, strict := s.strictSigningKeyUsage[a.Issuer] if len(ac.SigningKeys) > 0 || !strict { a.signingKeys = make(map[string]jwt.Scope) } signersChanged := false for k, scope := range ac.SigningKeys { a.signingKeys[k] = scope } if !strict { a.signingKeys[a.Name] = nil } if len(a.signingKeys) != len(old.signingKeys) { signersChanged = true } for k, scope := range a.signingKeys { if oldScope, ok := old.signingKeys[k]; !ok { signersChanged = true } else if !reflect.DeepEqual(scope, oldScope) { signersChanged = true alteredScope[k] = struct{}{} } } // collect mappings that need to be removed removeList := []string{} for _, m := range a.mappings { if _, ok := ac.Mappings[jwt.Subject(m.src)]; !ok { removeList = append(removeList, m.src) } } a.mu.Unlock() for sub, wm := range ac.Mappings { mappings := make([]*MapDest, len(wm)) for i, m := range wm { mappings[i] = &MapDest{ Subject: string(m.Subject), Weight: m.GetWeight(), Cluster: m.Cluster, } } // This will overwrite existing entries a.AddWeightedMappings(string(sub), mappings...) } // remove mappings for _, rmMapping := range removeList { a.RemoveMapping(rmMapping) } // Re-register system exports/imports. if a == s.SystemAccount() { s.addSystemAccountExports(a) } else { s.registerSystemImports(a) } gatherClients := func() []*client { a.mu.RLock() clients := make([]*client, 0, len(a.clients)) for c := range a.clients { clients = append(clients, c) } a.mu.RUnlock() return clients } jsEnabled := s.JetStreamEnabled() if jsEnabled && a == s.SystemAccount() { s.checkJetStreamExports() } for _, e := range ac.Exports { switch e.Type { case jwt.Stream: s.Debugf("Adding stream export %q for %s", e.Subject, a.traceLabel()) if err := a.addStreamExportWithAccountPos( string(e.Subject), authAccounts(e.TokenReq), e.AccountTokenPosition); err != nil { s.Debugf("Error adding stream export to account [%s]: %v", a.traceLabel(), err.Error()) } case jwt.Service: s.Debugf("Adding service export %q for %s", e.Subject, a.traceLabel()) rt := Singleton switch e.ResponseType { case jwt.ResponseTypeStream: rt = Streamed case jwt.ResponseTypeChunked: rt = Chunked } if err := a.addServiceExportWithResponseAndAccountPos( string(e.Subject), rt, authAccounts(e.TokenReq), e.AccountTokenPosition); err != nil { s.Debugf("Error adding service export to account [%s]: %v", a.traceLabel(), err) continue } sub := string(e.Subject) if e.Latency != nil { if err := a.TrackServiceExportWithSampling(sub, string(e.Latency.Results), int(e.Latency.Sampling)); err != nil { hdrNote := _EMPTY_ if e.Latency.Sampling == jwt.Headers { hdrNote = " (using headers)" } s.Debugf("Error adding latency tracking%s for service export to account [%s]: %v", hdrNote, a.traceLabel(), err) } } if e.ResponseThreshold != 0 { // Response threshold was set in options. if err := a.SetServiceExportResponseThreshold(sub, e.ResponseThreshold); err != nil { s.Debugf("Error adding service export response threshold for [%s]: %v", a.traceLabel(), err) } } } // We will track these at the account level. Should not have any collisions. if e.Revocations != nil { a.mu.Lock() if a.actsRevoked == nil { a.actsRevoked = make(map[string]int64) } for k, t := range e.Revocations { a.actsRevoked[k] = t } a.mu.Unlock() } } var incompleteImports []*jwt.Import for _, i := range ac.Imports { // check tmpAccounts with priority var acc *Account var err error if v, ok := s.tmpAccounts.Load(i.Account); ok { acc = v.(*Account) } else { acc, err = s.lookupAccount(i.Account) } if acc == nil || err != nil { s.Errorf("Can't locate account [%s] for import of [%v] %s (err=%v)", i.Account, i.Subject, i.Type, err) incompleteImports = append(incompleteImports, i) continue } from := string(i.Subject) to := i.GetTo() switch i.Type { case jwt.Stream: if i.LocalSubject != _EMPTY_ { // set local subject implies to is empty to = string(i.LocalSubject) s.Debugf("Adding stream import %s:%q for %s:%q", acc.traceLabel(), from, a.traceLabel(), to) err = a.AddMappedStreamImportWithClaim(acc, from, to, i) } else { s.Debugf("Adding stream import %s:%q for %s:%q", acc.traceLabel(), from, a.traceLabel(), to) err = a.AddStreamImportWithClaim(acc, from, to, i) } if err != nil { s.Debugf("Error adding stream import to account [%s]: %v", a.traceLabel(), err.Error()) incompleteImports = append(incompleteImports, i) } case jwt.Service: if i.LocalSubject != _EMPTY_ { from = string(i.LocalSubject) to = string(i.Subject) } s.Debugf("Adding service import %s:%q for %s:%q", acc.traceLabel(), from, a.traceLabel(), to) if err := a.AddServiceImportWithClaim(acc, from, to, i); err != nil { s.Debugf("Error adding service import to account [%s]: %v", a.traceLabel(), err.Error()) incompleteImports = append(incompleteImports, i) } } } // Now let's apply any needed changes from import/export changes. if !a.checkStreamImportsEqual(old) { awcsti := map[string]struct{}{a.Name: {}} for _, c := range gatherClients() { c.processSubsOnConfigReload(awcsti) } } // Now check if stream exports have changed. if !a.checkStreamExportsEqual(old) || signersChanged { clients := map[*client]struct{}{} // We need to check all accounts that have an import claim from this account. awcsti := map[string]struct{}{} s.accounts.Range(func(k, v interface{}) bool { acc := v.(*Account) // Move to the next if this account is actually account "a". if acc.Name == a.Name { return true } // TODO: checkStreamImportAuthorized() stack should not be trying // to lock "acc". If we find that to be needed, we will need to // rework this to ensure we don't lock acc. acc.mu.Lock() for _, im := range acc.imports.streams { if im != nil && im.acc.Name == a.Name { // Check for if we are still authorized for an import. im.invalid = !a.checkStreamImportAuthorized(acc, im.from, im.claim) awcsti[acc.Name] = struct{}{} for c := range acc.clients { clients[c] = struct{}{} } } } acc.mu.Unlock() return true }) // Now walk clients. for c := range clients { c.processSubsOnConfigReload(awcsti) } } // Now check if service exports have changed. if !a.checkServiceExportsEqual(old) || signersChanged { s.accounts.Range(func(k, v interface{}) bool { acc := v.(*Account) // Move to the next if this account is actually account "a". if acc.Name == a.Name { return true } // TODO: checkServiceImportAuthorized() stack should not be trying // to lock "acc". If we find that to be needed, we will need to // rework this to ensure we don't lock acc. acc.mu.Lock() for _, si := range acc.imports.services { if si != nil && si.acc.Name == a.Name { // Check for if we are still authorized for an import. si.invalid = !a.checkServiceImportAuthorized(acc, si.to, si.claim) if si.latency != nil && !si.response { // Make sure we should still be tracking latency. if se := a.getServiceExport(si.to); se != nil { si.latency = se.latency } } } } acc.mu.Unlock() return true }) } // Now make sure we shutdown the old service import subscriptions. var sids [][]byte a.mu.RLock() c := a.ic for _, si := range old.imports.services { if c != nil && si.sid != nil { sids = append(sids, si.sid) } } a.mu.RUnlock() for _, sid := range sids { c.processUnsub(sid) } // Now do limits if they are present. a.mu.Lock() a.msubs = int32(ac.Limits.Subs) a.mpay = int32(ac.Limits.Payload) a.mconns = int32(ac.Limits.Conn) a.mleafs = int32(ac.Limits.LeafNodeConn) // Check for any revocations if len(ac.Revocations) > 0 { // We will always replace whatever we had with most current, so no // need to look at what we have. a.usersRevoked = make(map[string]int64, len(ac.Revocations)) for pk, t := range ac.Revocations { a.usersRevoked[pk] = t } } else { a.usersRevoked = nil } a.defaultPerms = buildPermissionsFromJwt(&ac.DefaultPermissions) a.incomplete = len(incompleteImports) != 0 for _, i := range incompleteImports { s.incompleteAccExporterMap.Store(i.Account, struct{}{}) } if a.srv == nil { a.srv = s } // Setup js limits regardless of whether this server has jsEnabled. if ac.Limits.JetStreamLimits.DiskStorage != 0 || ac.Limits.JetStreamLimits.MemoryStorage != 0 { // JetStreamAccountLimits and jwt.JetStreamLimits use same value for unlimited a.jsLimits = &JetStreamAccountLimits{ MaxMemory: ac.Limits.JetStreamLimits.MemoryStorage, MaxStore: ac.Limits.JetStreamLimits.DiskStorage, MaxStreams: int(ac.Limits.JetStreamLimits.Streams), MaxConsumers: int(ac.Limits.JetStreamLimits.Consumer), } } else if a.jsLimits != nil { // covers failed update followed by disable a.jsLimits = nil } a.updated = time.Now().UTC() a.mu.Unlock() clients := gatherClients() // Sort if we are over the limit. if a.MaxTotalConnectionsReached() { sort.Slice(clients, func(i, j int) bool { return clients[i].start.After(clients[j].start) }) } // If JetStream is enabled for this server we will call into configJetStream for the account // regardless of enabled or disabled. It handles both cases. if jsEnabled { if err := s.configJetStream(a); err != nil { s.Errorf("Error configuring jetstream for account [%s]: %v", a.traceLabel(), err.Error()) a.mu.Lock() // Absent reload of js server cfg, this is going to be broken until js is disabled a.incomplete = true a.mu.Unlock() } } else if a.jsLimits != nil { // We do not have JS enabled for this server, but the account has it enabled so setup // our imports properly. This allows this server to proxy JS traffic correctly. s.checkJetStreamExports() a.enableAllJetStreamServiceImportsAndMappings() } for i, c := range clients { a.mu.RLock() exceeded := a.mconns != jwt.NoLimit && i >= int(a.mconns) a.mu.RUnlock() if exceeded { c.maxAccountConnExceeded() continue } c.mu.Lock() c.applyAccountLimits() theJWT := c.opts.JWT c.mu.Unlock() // Check for being revoked here. We use ac one to avoid the account lock. if ac.Revocations != nil && theJWT != _EMPTY_ { if juc, err := jwt.DecodeUserClaims(theJWT); err != nil { c.Debugf("User JWT not valid: %v", err) c.authViolation() continue } else if ok := ac.IsClaimRevoked(juc); ok { c.sendErrAndDebug("User Authentication Revoked") c.closeConnection(Revocation) continue } } } // Check if the signing keys changed, might have to evict if signersChanged { for _, c := range clients { c.mu.Lock() if c.user == nil { c.mu.Unlock() continue } sk := c.user.SigningKey c.mu.Unlock() if sk == _EMPTY_ { continue } if _, ok := alteredScope[sk]; ok { c.closeConnection(AuthenticationViolation) } else if _, ok := a.hasIssuer(sk); !ok { c.closeConnection(AuthenticationViolation) } } } if _, ok := s.incompleteAccExporterMap.Load(old.Name); ok && refreshImportingAccounts { s.incompleteAccExporterMap.Delete(old.Name) s.accounts.Range(func(key, value interface{}) bool { acc := value.(*Account) acc.mu.RLock() incomplete := acc.incomplete name := acc.Name label := acc.traceLabel() // Must use jwt in account or risk failing on fetch // This jwt may not be the same that caused exportingAcc to be in incompleteAccExporterMap claimJWT := acc.claimJWT acc.mu.RUnlock() if incomplete && name != old.Name { if accClaims, _, err := s.verifyAccountClaims(claimJWT); err == nil { // Since claimJWT has not changed, acc can become complete // but it won't alter incomplete for it's dependents accounts. s.updateAccountClaimsWithRefresh(acc, accClaims, false) // old.Name was deleted before ranging over accounts // If it exists again, UpdateAccountClaims set it for failed imports of acc. // So there was one import of acc that imported this account and failed again. // Since this account just got updated, the import itself may be in error. So trace that. if _, ok := s.incompleteAccExporterMap.Load(old.Name); ok { s.incompleteAccExporterMap.Delete(old.Name) s.Errorf("Account %s has issues importing account %s", label, old.Name) } } } return true }) } } // Helper to build an internal account structure from a jwt.AccountClaims. // Lock MUST NOT be held upon entry. func (s *Server) buildInternalAccount(ac *jwt.AccountClaims) *Account { acc := NewAccount(ac.Subject) acc.Issuer = ac.Issuer // Set this here since we are placing in s.tmpAccounts below and may be // referenced by an route RS+, etc. s.setAccountSublist(acc) // We don't want to register an account that is in the process of // being built, however, to solve circular import dependencies, we // need to store it here. s.tmpAccounts.Store(ac.Subject, acc) s.UpdateAccountClaims(acc, ac) return acc } // Helper to build Permissions from jwt.Permissions // or return nil if none were specified func buildPermissionsFromJwt(uc *jwt.Permissions) *Permissions { if uc == nil { return nil } var p *Permissions if len(uc.Pub.Allow) > 0 || len(uc.Pub.Deny) > 0 { p = &Permissions{} p.Publish = &SubjectPermission{} p.Publish.Allow = uc.Pub.Allow p.Publish.Deny = uc.Pub.Deny } if len(uc.Sub.Allow) > 0 || len(uc.Sub.Deny) > 0 { if p == nil { p = &Permissions{} } p.Subscribe = &SubjectPermission{} p.Subscribe.Allow = uc.Sub.Allow p.Subscribe.Deny = uc.Sub.Deny } if uc.Resp != nil { if p == nil { p = &Permissions{} } p.Response = &ResponsePermission{ MaxMsgs: uc.Resp.MaxMsgs, Expires: uc.Resp.Expires, } validateResponsePermissions(p) } return p } // Helper to build internal NKeyUser. func buildInternalNkeyUser(uc *jwt.UserClaims, acts map[string]struct{}, acc *Account) *NkeyUser { nu := &NkeyUser{Nkey: uc.Subject, Account: acc, AllowedConnectionTypes: acts} if uc.IssuerAccount != _EMPTY_ { nu.SigningKey = uc.Issuer } // Now check for permissions. var p = buildPermissionsFromJwt(&uc.Permissions) if p == nil && acc.defaultPerms != nil { p = acc.defaultPerms.clone() } nu.Permissions = p return nu } func fetchAccount(res AccountResolver, name string) (string, error) { if !nkeys.IsValidPublicAccountKey(name) { return _EMPTY_, fmt.Errorf("will only fetch valid account keys") } return res.Fetch(name) } // AccountResolver interface. This is to fetch Account JWTs by public nkeys type AccountResolver interface { Fetch(name string) (string, error) Store(name, jwt string) error IsReadOnly() bool Start(server *Server) error IsTrackingUpdate() bool Reload() error Close() } // Default implementations of IsReadOnly/Start so only need to be written when changed type resolverDefaultsOpsImpl struct{} func (*resolverDefaultsOpsImpl) IsReadOnly() bool { return true } func (*resolverDefaultsOpsImpl) IsTrackingUpdate() bool { return false } func (*resolverDefaultsOpsImpl) Start(*Server) error { return nil } func (*resolverDefaultsOpsImpl) Reload() error { return nil } func (*resolverDefaultsOpsImpl) Close() { } func (*resolverDefaultsOpsImpl) Store(_, _ string) error { return fmt.Errorf("store operation not supported for URL Resolver") } // MemAccResolver is a memory only resolver. // Mostly for testing. type MemAccResolver struct { sm sync.Map resolverDefaultsOpsImpl } // Fetch will fetch the account jwt claims from the internal sync.Map. func (m *MemAccResolver) Fetch(name string) (string, error) { if j, ok := m.sm.Load(name); ok { return j.(string), nil } return _EMPTY_, ErrMissingAccount } // Store will store the account jwt claims in the internal sync.Map. func (m *MemAccResolver) Store(name, jwt string) error { m.sm.Store(name, jwt) return nil } func (m *MemAccResolver) IsReadOnly() bool { return false } // URLAccResolver implements an http fetcher. type URLAccResolver struct { url string c *http.Client resolverDefaultsOpsImpl } // NewURLAccResolver returns a new resolver for the given base URL. func NewURLAccResolver(url string) (*URLAccResolver, error) { if !strings.HasSuffix(url, "/") { url += "/" } // FIXME(dlc) - Make timeout and others configurable. // We create our own transport to amortize TLS. tr := &http.Transport{ MaxIdleConns: 10, IdleConnTimeout: 30 * time.Second, } ur := &URLAccResolver{ url: url, c: &http.Client{Timeout: DEFAULT_ACCOUNT_FETCH_TIMEOUT, Transport: tr}, } return ur, nil } // Fetch will fetch the account jwt claims from the base url, appending the // account name onto the end. func (ur *URLAccResolver) Fetch(name string) (string, error) { url := ur.url + name resp, err := ur.c.Get(url) if err != nil { return _EMPTY_, fmt.Errorf("could not fetch <%q>: %v", redactURLString(url), err) } else if resp == nil { return _EMPTY_, fmt.Errorf("could not fetch <%q>: no response", redactURLString(url)) } else if resp.StatusCode != http.StatusOK { return _EMPTY_, fmt.Errorf("could not fetch <%q>: %v", redactURLString(url), resp.Status) } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return _EMPTY_, err } return string(body), nil } // Resolver based on nats for synchronization and backing directory for storage. type DirAccResolver struct { *DirJWTStore *Server syncInterval time.Duration fetchTimeout time.Duration } func (dr *DirAccResolver) IsTrackingUpdate() bool { return true } func (dr *DirAccResolver) Reload() error { return dr.DirJWTStore.Reload() } func respondToUpdate(s *Server, respSubj string, acc string, message string, err error) { if err == nil { if acc == _EMPTY_ { s.Debugf("%s", message) } else { s.Debugf("%s - %s", message, acc) } } else { if acc == _EMPTY_ { s.Errorf("%s - %s", message, err) } else { s.Errorf("%s - %s - %s", message, acc, err) } } if respSubj == _EMPTY_ { return } server := &ServerInfo{} response := map[string]interface{}{"server": server} m := map[string]interface{}{} if acc != _EMPTY_ { m["account"] = acc } if err == nil { m["code"] = http.StatusOK m["message"] = message response["data"] = m } else { m["code"] = http.StatusInternalServerError m["description"] = fmt.Sprintf("%s - %v", message, err) response["error"] = m } s.sendInternalMsgLocked(respSubj, _EMPTY_, server, response) } func handleListRequest(store *DirJWTStore, s *Server, reply string) { if reply == _EMPTY_ { return } accIds := make([]string, 0, 1024) if err := store.PackWalk(1, func(partialPackMsg string) { if tk := strings.Split(partialPackMsg, "|"); len(tk) == 2 { accIds = append(accIds, tk[0]) } }); err != nil { // let them timeout s.Errorf("list request error: %v", err) } else { s.Debugf("list request responded with %d account ids", len(accIds)) server := &ServerInfo{} response := map[string]interface{}{"server": server, "data": accIds} s.sendInternalMsgLocked(reply, _EMPTY_, server, response) } } func handleDeleteRequest(store *DirJWTStore, s *Server, msg []byte, reply string) { var accIds []interface{} var subj, sysAccName string if sysAcc := s.SystemAccount(); sysAcc != nil { sysAccName = sysAcc.GetName() } // Only operator and operator signing key are allowed to delete gk, err := jwt.DecodeGeneric(string(msg)) if err == nil { subj = gk.Subject if store.deleteType == NoDelete { err = fmt.Errorf("delete must be enabled in server config") } else if subj != gk.Issuer { err = fmt.Errorf("not self signed") } else if _, ok := store.operator[gk.Issuer]; !ok { err = fmt.Errorf("not trusted") } else if list, ok := gk.Data["accounts"]; !ok { err = fmt.Errorf("malformed request") } else if accIds, ok = list.([]interface{}); !ok { err = fmt.Errorf("malformed request") } else { for _, entry := range accIds { if acc, ok := entry.(string); !ok || acc == _EMPTY_ || !nkeys.IsValidPublicAccountKey(acc) { err = fmt.Errorf("malformed request") break } else if acc == sysAccName { err = fmt.Errorf("not allowed to delete system account") break } } } } if err != nil { respondToUpdate(s, reply, _EMPTY_, fmt.Sprintf("delete accounts request by %s failed", subj), err) return } errs := []string{} passCnt := 0 for _, acc := range accIds { if err := store.delete(acc.(string)); err != nil { errs = append(errs, err.Error()) } else { passCnt++ } } if len(errs) == 0 { respondToUpdate(s, reply, _EMPTY_, fmt.Sprintf("deleted %d accounts", passCnt), nil) } else { respondToUpdate(s, reply, _EMPTY_, fmt.Sprintf("deleted %d accounts, failed for %d", passCnt, len(errs)), errors.New(strings.Join(errs, "\n"))) } } func getOperatorKeys(s *Server) (string, map[string]struct{}, bool, error) { var op string var strict bool keys := make(map[string]struct{}) if opts := s.getOpts(); opts != nil && len(opts.TrustedOperators) > 0 { op = opts.TrustedOperators[0].Subject strict = opts.TrustedOperators[0].StrictSigningKeyUsage if !strict { keys[opts.TrustedOperators[0].Subject] = struct{}{} } for _, key := range opts.TrustedOperators[0].SigningKeys { keys[key] = struct{}{} } } if len(keys) == 0 { return _EMPTY_, nil, false, fmt.Errorf("no operator key found") } return op, keys, strict, nil } func claimValidate(claim *jwt.AccountClaims) error { vr := &jwt.ValidationResults{} claim.Validate(vr) if vr.IsBlocking(false) { return fmt.Errorf("validation errors: %v", vr.Errors()) } return nil } func removeCb(s *Server, pubKey string) { v, ok := s.accounts.Load(pubKey) if !ok { return } a := v.(*Account) s.Debugf("Disable account %s due to remove", pubKey) a.mu.Lock() // lock out new clients a.msubs = 0 a.mpay = 0 a.mconns = 0 a.mleafs = 0 a.updated = time.Now().UTC() a.mu.Unlock() // set the account to be expired and disconnect clients a.expiredTimeout() a.mu.Lock() a.clearExpirationTimer() a.mu.Unlock() } func (dr *DirAccResolver) Start(s *Server) error { op, opKeys, strict, err := getOperatorKeys(s) if err != nil { return err } dr.Lock() defer dr.Unlock() dr.Server = s dr.operator = opKeys dr.DirJWTStore.changed = func(pubKey string) { if v, ok := s.accounts.Load(pubKey); !ok { } else if theJwt, err := dr.LoadAcc(pubKey); err != nil { s.Errorf("update got error on load: %v", err) } else if err := s.updateAccountWithClaimJWT(v.(*Account), theJwt); err != nil { s.Errorf("update resulted in error %v", err) } } dr.DirJWTStore.deleted = func(pubKey string) { removeCb(s, pubKey) } packRespIb := s.newRespInbox() for _, reqSub := range []string{accUpdateEventSubjOld, accUpdateEventSubjNew} { // subscribe to account jwt update requests if _, err := s.sysSubscribe(fmt.Sprintf(reqSub, "*"), func(_ *subscription, _ *client, _ *Account, subj, resp string, msg []byte) { pubKey := _EMPTY_ tk := strings.Split(subj, tsep) if len(tk) == accUpdateTokensNew { pubKey = tk[accReqAccIndex] } else if len(tk) == accUpdateTokensOld { pubKey = tk[accUpdateAccIdxOld] } else { s.Debugf("jwt update skipped due to bad subject %q", subj) return } if claim, err := jwt.DecodeAccountClaims(string(msg)); err != nil { respondToUpdate(s, resp, "n/a", "jwt update resulted in error", err) } else if err := claimValidate(claim); err != nil { respondToUpdate(s, resp, claim.Subject, "jwt validation failed", err) } else if claim.Subject != pubKey { err := errors.New("subject does not match jwt content") respondToUpdate(s, resp, pubKey, "jwt update resulted in error", err) } else if claim.Issuer == op && strict { err := errors.New("operator requires issuer to be a signing key") respondToUpdate(s, resp, pubKey, "jwt update resulted in error", err) } else if err := dr.save(pubKey, string(msg)); err != nil { respondToUpdate(s, resp, pubKey, "jwt update resulted in error", err) } else { respondToUpdate(s, resp, pubKey, "jwt updated", nil) } }); err != nil { return fmt.Errorf("error setting up update handling: %v", err) } } if _, err := s.sysSubscribe(accClaimsReqSubj, func(_ *subscription, _ *client, _ *Account, subj, resp string, msg []byte) { if claim, err := jwt.DecodeAccountClaims(string(msg)); err != nil { respondToUpdate(s, resp, "n/a", "jwt update resulted in error", err) } else if claim.Issuer == op && strict { err := errors.New("operator requires issuer to be a signing key") respondToUpdate(s, resp, claim.Subject, "jwt update resulted in error", err) } else if err := claimValidate(claim); err != nil { respondToUpdate(s, resp, claim.Subject, "jwt validation failed", err) } else if err := dr.save(claim.Subject, string(msg)); err != nil { respondToUpdate(s, resp, claim.Subject, "jwt update resulted in error", err) } else { respondToUpdate(s, resp, claim.Subject, "jwt updated", nil) } }); err != nil { return fmt.Errorf("error setting up update handling: %v", err) } // respond to lookups with our version if _, err := s.sysSubscribe(fmt.Sprintf(accLookupReqSubj, "*"), func(_ *subscription, _ *client, _ *Account, subj, reply string, msg []byte) { if reply == _EMPTY_ { return } tk := strings.Split(subj, tsep) if len(tk) != accLookupReqTokens { return } if theJWT, err := dr.DirJWTStore.LoadAcc(tk[accReqAccIndex]); err != nil { s.Errorf("Merging resulted in error: %v", err) } else { s.sendInternalMsgLocked(reply, _EMPTY_, nil, []byte(theJWT)) } }); err != nil { return fmt.Errorf("error setting up lookup request handling: %v", err) } // respond to pack requests with one or more pack messages // an empty message signifies the end of the response responder if _, err := s.sysSubscribeQ(accPackReqSubj, "responder", func(_ *subscription, _ *client, _ *Account, _, reply string, theirHash []byte) { if reply == _EMPTY_ { return } ourHash := dr.DirJWTStore.Hash() if bytes.Equal(theirHash, ourHash[:]) { s.sendInternalMsgLocked(reply, _EMPTY_, nil, []byte{}) s.Debugf("pack request matches hash %x", ourHash[:]) } else if err := dr.DirJWTStore.PackWalk(1, func(partialPackMsg string) { s.sendInternalMsgLocked(reply, _EMPTY_, nil, []byte(partialPackMsg)) }); err != nil { // let them timeout s.Errorf("pack request error: %v", err) } else { s.Debugf("pack request hash %x - finished responding with hash %x", theirHash, ourHash) s.sendInternalMsgLocked(reply, _EMPTY_, nil, []byte{}) } }); err != nil { return fmt.Errorf("error setting up pack request handling: %v", err) } // respond to list requests with one message containing all account ids if _, err := s.sysSubscribe(accListReqSubj, func(_ *subscription, _ *client, _ *Account, _, reply string, _ []byte) { handleListRequest(dr.DirJWTStore, s, reply) }); err != nil { return fmt.Errorf("error setting up list request handling: %v", err) } if _, err := s.sysSubscribe(accDeleteReqSubj, func(_ *subscription, _ *client, _ *Account, _, reply string, msg []byte) { handleDeleteRequest(dr.DirJWTStore, s, msg, reply) }); err != nil { return fmt.Errorf("error setting up delete request handling: %v", err) } // embed pack responses into store if _, err := s.sysSubscribe(packRespIb, func(_ *subscription, _ *client, _ *Account, _, _ string, msg []byte) { hash := dr.DirJWTStore.Hash() if len(msg) == 0 { // end of response stream s.Debugf("Merging Finished and resulting in: %x", dr.DirJWTStore.Hash()) return } else if err := dr.DirJWTStore.Merge(string(msg)); err != nil { s.Errorf("Merging resulted in error: %v", err) } else { s.Debugf("Merging succeeded and changed %x to %x", hash, dr.DirJWTStore.Hash()) } }); err != nil { return fmt.Errorf("error setting up pack response handling: %v", err) } // periodically send out pack message quit := s.quitCh s.startGoRoutine(func() { defer s.grWG.Done() ticker := time.NewTicker(dr.syncInterval) for { select { case <-quit: ticker.Stop() return case <-ticker.C: } ourHash := dr.DirJWTStore.Hash() s.Debugf("Checking store state: %x", ourHash) s.sendInternalMsgLocked(accPackReqSubj, packRespIb, nil, ourHash[:]) } }) s.Noticef("Managing all jwt in exclusive directory %s", dr.directory) return nil } func (dr *DirAccResolver) Fetch(name string) (string, error) { if theJWT, err := dr.LoadAcc(name); theJWT != _EMPTY_ { return theJWT, nil } else { dr.Lock() srv := dr.Server to := dr.fetchTimeout dr.Unlock() if srv == nil { return _EMPTY_, err } return srv.fetch(dr, name, to) // lookup from other server } } func (dr *DirAccResolver) Store(name, jwt string) error { return dr.saveIfNewer(name, jwt) } type DirResOption func(s *DirAccResolver) error // limits the amount of time spent waiting for an account fetch to complete func FetchTimeout(to time.Duration) DirResOption { return func(r *DirAccResolver) error { if to <= time.Duration(0) { return fmt.Errorf("Fetch timeout %v is too smal", to) } r.fetchTimeout = to return nil } } func (dr *DirAccResolver) apply(opts ...DirResOption) error { for _, o := range opts { if err := o(dr); err != nil { return err } } return nil } func NewDirAccResolver(path string, limit int64, syncInterval time.Duration, delete bool, opts ...DirResOption) (*DirAccResolver, error) { if limit == 0 { limit = math.MaxInt64 } if syncInterval <= 0 { syncInterval = time.Minute } deleteType := NoDelete if delete { deleteType = RenameDeleted } store, err := NewExpiringDirJWTStore(path, false, true, deleteType, 0, limit, false, 0, nil) if err != nil { return nil, err } res := &DirAccResolver{store, nil, syncInterval, DEFAULT_ACCOUNT_FETCH_TIMEOUT} if err := res.apply(opts...); err != nil { return nil, err } return res, nil } // Caching resolver using nats for lookups and making use of a directory for storage type CacheDirAccResolver struct { DirAccResolver ttl time.Duration } func (s *Server) fetch(res AccountResolver, name string, timeout time.Duration) (string, error) { if s == nil { return _EMPTY_, ErrNoAccountResolver } respC := make(chan []byte, 1) accountLookupRequest := fmt.Sprintf(accLookupReqSubj, name) s.mu.Lock() if s.sys == nil || s.sys.replies == nil { s.mu.Unlock() return _EMPTY_, fmt.Errorf("eventing shut down") } replySubj := s.newRespInbox() replies := s.sys.replies // Store our handler. replies[replySubj] = func(sub *subscription, _ *client, _ *Account, subject, _ string, msg []byte) { clone := make([]byte, len(msg)) copy(clone, msg) s.mu.Lock() if _, ok := replies[replySubj]; ok { select { case respC <- clone: // only use first response and only if there is still interest default: } } s.mu.Unlock() } s.sendInternalMsg(accountLookupRequest, replySubj, nil, []byte{}) quit := s.quitCh s.mu.Unlock() var err error var theJWT string select { case <-quit: err = errors.New("fetching jwt failed due to shutdown") case <-time.After(timeout): err = errors.New("fetching jwt timed out") case m := <-respC: if err = res.Store(name, string(m)); err == nil { theJWT = string(m) } } s.mu.Lock() delete(replies, replySubj) s.mu.Unlock() close(respC) return theJWT, err } func NewCacheDirAccResolver(path string, limit int64, ttl time.Duration, opts ...DirResOption) (*CacheDirAccResolver, error) { if limit <= 0 { limit = 1_000 } store, err := NewExpiringDirJWTStore(path, false, true, HardDelete, 0, limit, true, ttl, nil) if err != nil { return nil, err } res := &CacheDirAccResolver{DirAccResolver{store, nil, 0, DEFAULT_ACCOUNT_FETCH_TIMEOUT}, ttl} if err := res.apply(opts...); err != nil { return nil, err } return res, nil } func (dr *CacheDirAccResolver) Start(s *Server) error { op, opKeys, strict, err := getOperatorKeys(s) if err != nil { return err } dr.Lock() defer dr.Unlock() dr.Server = s dr.operator = opKeys dr.DirJWTStore.changed = func(pubKey string) { if v, ok := s.accounts.Load(pubKey); !ok { } else if theJwt, err := dr.LoadAcc(pubKey); err != nil { s.Errorf("update got error on load: %v", err) } else if err := s.updateAccountWithClaimJWT(v.(*Account), theJwt); err != nil { s.Errorf("update resulted in error %v", err) } } dr.DirJWTStore.deleted = func(pubKey string) { removeCb(s, pubKey) } for _, reqSub := range []string{accUpdateEventSubjOld, accUpdateEventSubjNew} { // subscribe to account jwt update requests if _, err := s.sysSubscribe(fmt.Sprintf(reqSub, "*"), func(_ *subscription, _ *client, _ *Account, subj, resp string, msg []byte) { pubKey := _EMPTY_ tk := strings.Split(subj, tsep) if len(tk) == accUpdateTokensNew { pubKey = tk[accReqAccIndex] } else if len(tk) == accUpdateTokensOld { pubKey = tk[accUpdateAccIdxOld] } else { s.Debugf("jwt update cache skipped due to bad subject %q", subj) return } if claim, err := jwt.DecodeAccountClaims(string(msg)); err != nil { respondToUpdate(s, resp, pubKey, "jwt update cache resulted in error", err) } else if claim.Subject != pubKey { err := errors.New("subject does not match jwt content") respondToUpdate(s, resp, pubKey, "jwt update cache resulted in error", err) } else if claim.Issuer == op && strict { err := errors.New("operator requires issuer to be a signing key") respondToUpdate(s, resp, pubKey, "jwt update cache resulted in error", err) } else if _, ok := s.accounts.Load(pubKey); !ok { respondToUpdate(s, resp, pubKey, "jwt update cache skipped", nil) } else if err := claimValidate(claim); err != nil { respondToUpdate(s, resp, claim.Subject, "jwt update cache validation failed", err) } else if err := dr.save(pubKey, string(msg)); err != nil { respondToUpdate(s, resp, pubKey, "jwt update cache resulted in error", err) } else { respondToUpdate(s, resp, pubKey, "jwt updated cache", nil) } }); err != nil { return fmt.Errorf("error setting up update handling: %v", err) } } if _, err := s.sysSubscribe(accClaimsReqSubj, func(_ *subscription, _ *client, _ *Account, subj, resp string, msg []byte) { if claim, err := jwt.DecodeAccountClaims(string(msg)); err != nil { respondToUpdate(s, resp, "n/a", "jwt update cache resulted in error", err) } else if claim.Issuer == op && strict { err := errors.New("operator requires issuer to be a signing key") respondToUpdate(s, resp, claim.Subject, "jwt update cache resulted in error", err) } else if _, ok := s.accounts.Load(claim.Subject); !ok { respondToUpdate(s, resp, claim.Subject, "jwt update cache skipped", nil) } else if err := claimValidate(claim); err != nil { respondToUpdate(s, resp, claim.Subject, "jwt update cache validation failed", err) } else if err := dr.save(claim.Subject, string(msg)); err != nil { respondToUpdate(s, resp, claim.Subject, "jwt update cache resulted in error", err) } else { respondToUpdate(s, resp, claim.Subject, "jwt updated cache", nil) } }); err != nil { return fmt.Errorf("error setting up update handling: %v", err) } // respond to list requests with one message containing all account ids if _, err := s.sysSubscribe(accListReqSubj, func(_ *subscription, _ *client, _ *Account, _, reply string, _ []byte) { handleListRequest(dr.DirJWTStore, s, reply) }); err != nil { return fmt.Errorf("error setting up list request handling: %v", err) } if _, err := s.sysSubscribe(accDeleteReqSubj, func(_ *subscription, _ *client, _ *Account, _, reply string, msg []byte) { handleDeleteRequest(dr.DirJWTStore, s, msg, reply) }); err != nil { return fmt.Errorf("error setting up list request handling: %v", err) } s.Noticef("Managing some jwt in exclusive directory %s", dr.directory) return nil } func (dr *CacheDirAccResolver) Reload() error { return dr.DirAccResolver.Reload() } // Transforms for arbitrarily mapping subjects from one to another for maps, tees and filters. // These can also be used for proper mapping on wildcard exports/imports. // These will be grouped and caching and locking are assumed to be in the upper layers. type transform struct { src, dest string dtoks []string stoks []string dtpi []int8 } // Helper to pull raw place holder index. Returns -1 if not a place holder. func placeHolderIndex(token string) int { if len(token) > 1 && token[0] == '$' { var tp int if n, err := fmt.Sscanf(token, "$%d", &tp); err == nil && n == 1 { return tp } } return -1 } // newTransform will create a new transform checking the src and dest subjects for accuracy. func newTransform(src, dest string) (*transform, error) { // Both entries need to be valid subjects. sv, stokens, npwcs, hasFwc := subjectInfo(src) dv, dtokens, dnpwcs, dHasFwc := subjectInfo(dest) // Make sure both are valid, match fwc if present and there are no pwcs in the dest subject. if !sv || !dv || dnpwcs > 0 || hasFwc != dHasFwc { return nil, ErrBadSubject } var dtpi []int8 // If the src has partial wildcards then the dest needs to have the token place markers. if npwcs > 0 || hasFwc { // We need to count to make sure that the dest has token holders for the pwcs. sti := make(map[int]int) for i, token := range stokens { if len(token) == 1 && token[0] == pwc { sti[len(sti)+1] = i } } nphs := 0 for _, token := range dtokens { tp := placeHolderIndex(token) if tp >= 0 { if tp > npwcs { return nil, ErrBadSubject } nphs++ // Now build up our runtime mapping from dest to source tokens. dtpi = append(dtpi, int8(sti[tp])) } else { dtpi = append(dtpi, -1) } } if nphs != npwcs { return nil, ErrBadSubject } } return &transform{src: src, dest: dest, dtoks: dtokens, stoks: stokens, dtpi: dtpi}, nil } // match will take a literal published subject that is associated with a client and will match and transform // the subject if possible. // TODO(dlc) - We could add in client here to allow for things like foo -> foo.$ACCOUNT func (tr *transform) match(subject string) (string, error) { // Tokenize the subject. This should always be a literal subject. tsa := [32]string{} tts := tsa[:0] start := 0 for i := 0; i < len(subject); i++ { if subject[i] == btsep { tts = append(tts, subject[start:i]) start = i + 1 } } tts = append(tts, subject[start:]) if !isValidLiteralSubject(tts) { return _EMPTY_, ErrBadSubject } if isSubsetMatch(tts, tr.src) { return tr.transform(tts) } return _EMPTY_, ErrNoTransforms } // Do not need to match, just transform. func (tr *transform) transformSubject(subject string) (string, error) { // Tokenize the subject. tsa := [32]string{} tts := tsa[:0] start := 0 for i := 0; i < len(subject); i++ { if subject[i] == btsep { tts = append(tts, subject[start:i]) start = i + 1 } } tts = append(tts, subject[start:]) return tr.transform(tts) } // Do a transform on the subject to the dest subject. func (tr *transform) transform(tokens []string) (string, error) { if len(tr.dtpi) == 0 { return tr.dest, nil } var b strings.Builder var token string // We need to walk destination tokens and create the mapped subject pulling tokens from src. // This is slow and that is ok, transforms should have caching layer in front for mapping transforms // and export/import semantics with streams and services. li := len(tr.dtpi) - 1 for i, index := range tr.dtpi { // <0 means use destination token. if index < 0 { token = tr.dtoks[i] // Break if fwc if len(token) == 1 && token[0] == fwc { break } } else { // >= 0 means use source map index to figure out which source token to pull. token = tokens[index] } b.WriteString(token) if i < li { b.WriteByte(btsep) } } // We may have more source tokens available. This happens with ">". if tr.dtoks[len(tr.dtoks)-1] == ">" { for sli, i := len(tokens)-1, len(tr.stoks)-1; i < len(tokens); i++ { b.WriteString(tokens[i]) if i < sli { b.WriteByte(btsep) } } } return b.String(), nil } // Reverse a transform. func (tr *transform) reverse() *transform { if len(tr.dtpi) == 0 { rtr, _ := newTransform(tr.dest, tr.src) return rtr } // If we are here we need to dynamically get the correct reverse // of this transform. nsrc, phs := transformUntokenize(tr.dest) var nda []string for _, token := range tr.stoks { if token == "*" { if len(phs) == 0 { // TODO(dlc) - Should not happen return nil } nda = append(nda, phs[0]) phs = phs[1:] } else { nda = append(nda, token) } } ndest := strings.Join(nda, tsep) rtr, _ := newTransform(nsrc, ndest) return rtr }
1
14,286
Did we miss that for jwt before?
nats-io-nats-server
go
@@ -44,10 +44,10 @@ const ( // UART1 on the Trinket M0. var ( - UART1 = UART{Bus: sam.SERCOM1_USART, + UART1 = UART{ Buffer: NewRingBuffer(), - Mode: PinSERCOM, - IRQVal: sam.IRQ_SERCOM1, + Bus: sam.SERCOM0_USART, + SERCOM: 0, } )
1
// +build sam,atsamd21,trinket_m0 package machine import "device/sam" // used to reset into bootloader const RESET_MAGIC_VALUE = 0xf01669ef // GPIO Pins const ( D0 = PA08 // PWM available D1 = PA02 D2 = PA09 // PWM available D3 = PA07 // PWM available / UART0 RX D4 = PA06 // PWM available / UART0 TX D13 = PA10 // LED ) // Analog pins const ( A0 = D1 A1 = D2 A2 = D0 A3 = D3 A4 = D4 ) const ( LED = D13 ) // UART0 aka USBCDC pins const ( USBCDC_DM_PIN = PA24 USBCDC_DP_PIN = PA25 ) // UART1 pins const ( UART_TX_PIN = D4 UART_RX_PIN = D3 ) // UART1 on the Trinket M0. var ( UART1 = UART{Bus: sam.SERCOM1_USART, Buffer: NewRingBuffer(), Mode: PinSERCOM, IRQVal: sam.IRQ_SERCOM1, } ) //go:export SERCOM1_IRQHandler func handleUART1() { defaultUART1Handler() } // SPI pins const ( SPI0_SCK_PIN = D3 SPI0_MOSI_PIN = D4 SPI0_MISO_PIN = D2 ) // SPI on the Trinket M0. var ( SPI0 = SPI{ Bus: sam.SERCOM0_SPI, SERCOM: 0, } ) // I2C pins const ( SDA_PIN = D0 // SDA SCL_PIN = D2 // SCL ) // I2C on the Trinket M0. var ( I2C0 = I2C{Bus: sam.SERCOM2_I2CM, SDA: SDA_PIN, SCL: SCL_PIN, PinMode: PinSERCOMAlt} ) // I2S pins const ( I2S_SCK_PIN = PA10 I2S_SD_PIN = PA08 I2S_WS_PIN = NoPin // TODO: figure out what this is on Trinket M0. )
1
7,821
It appears that using SERCOM1 was a bug: the only SERCOM that is supported on the TX/RX pins is SERCOM0. Previous UART support wouldn't have worked on the Trinket M0 TX/RX pins anyway (`PA06`/`PA07`). This PR fixes that, and should thereby add support for UART on the Trinket M0.
tinygo-org-tinygo
go
@@ -179,7 +179,8 @@ class Preference extends Model public function getLocaleOptions() { $localeOptions = [ - 'be' => [Lang::get('system::lang.locale.be'), 'flag-by'], + 'ar' => [Lang::get('system::lang.locale.ar'), 'flag-sa'], + 'be' => [Lang::get('system::lang.locale.be'), 'flag-by'], 'cs' => [Lang::get('system::lang.locale.cs'), 'flag-cz'], 'da' => [Lang::get('system::lang.locale.da'), 'flag-dk'], 'en' => [Lang::get('system::lang.locale.en'), 'flag-us'],
1
<?php namespace Backend\Models; use App; use Lang; use Model; use Config; use Session; use BackendAuth; use DirectoryIterator; use DateTime; use DateTimeZone; use Carbon\Carbon; /** * Backend preferences for the backend user * * @package october\backend * @author Alexey Bobkov, Samuel Georges */ class Preference extends Model { use \October\Rain\Database\Traits\Validation; const DEFAULT_THEME = 'twilight'; /** * @var array Behaviors implemented by this model. */ public $implement = [ \Backend\Behaviors\UserPreferencesModel::class ]; /** * @var string Unique code */ public $settingsCode = 'backend::backend.preferences'; /** * @var mixed Settings form field defitions */ public $settingsFields = 'fields.yaml'; /** * @var array Validation rules */ public $rules = []; /** * Initialize the seed data for this model. This only executes when the * model is first created or reset to default. * @return void */ public function initSettingsData() { $config = App::make('config'); $this->locale = $config->get('app.locale', 'en'); $this->fallback_locale = $this->getFallbackLocale($this->locale); $this->timezone = $config->get('cms.backendTimezone', $config->get('app.timezone')); $this->editor_font_size = $config->get('editor.font_size', 12); $this->editor_word_wrap = $config->get('editor.word_wrap', 'fluid'); $this->editor_code_folding = $config->get('editor.code_folding', 'manual'); $this->editor_tab_size = $config->get('editor.tab_size', 4); $this->editor_theme = $config->get('editor.theme', static::DEFAULT_THEME); $this->editor_show_invisibles = $config->get('editor.show_invisibles', false); $this->editor_highlight_active_line = $config->get('editor.highlight_active_line', true); $this->editor_use_hard_tabs = $config->get('editor.use_hard_tabs', false); $this->editor_show_gutter = $config->get('editor.show_gutter', true); $this->editor_auto_closing = $config->get('editor.auto_closing', false); $this->editor_autocompletion = $config->get('editor.editor_autocompletion', 'manual'); $this->editor_enable_snippets = $config->get('editor.enable_snippets', false); $this->editor_display_indent_guides = $config->get('editor.display_indent_guides', false); $this->editor_show_print_margin = $config->get('editor.show_print_margin', false); } /** * Set the application's locale based on the user preference. * @return void */ public static function setAppLocale() { if (Session::has('locale')) { App::setLocale(Session::get('locale')); } elseif ( ($user = BackendAuth::getUser()) && ($locale = static::get('locale')) ) { Session::put('locale', $locale); App::setLocale($locale); } } /** * Same as setAppLocale except for the fallback definition. * @return void */ public static function setAppFallbackLocale() { if (Session::has('fallback_locale')) { Lang::setFallback(Session::get('fallback_locale')); } elseif ( ($user = BackendAuth::getUser()) && ($locale = static::get('fallback_locale')) ) { Session::put('fallback_locale', $locale); Lang::setFallback($locale); } } // // Events // public function beforeValidate() { $this->fallback_locale = $this->getFallbackLocale($this->locale); } public function afterSave() { Session::put('locale', $this->locale); Session::put('fallback_locale', $this->fallback_locale); } // // Utils // /** * Called when this model is reset to default by the user. * @return void */ public function resetDefault() { parent::resetDefault(); Session::forget('locale'); Session::forget('fallback_locale'); } /** * Overrides the config with the user's preference. * @return void */ public static function applyConfigValues() { $settings = self::instance(); Config::set('app.locale', $settings->locale); Config::set('app.fallback_locale', $settings->fallback_locale); } // // Getters // /** * Attempt to extract the language from the locale, * otherwise use the configuration. * @return string */ protected function getFallbackLocale($locale) { if ($position = strpos($locale, '-')) { $target = substr($locale, 0, $position); $available = $this->getLocaleOptions(); if (isset($available[$target])) { return $target; } } return Config::get('app.fallback_locale'); } /** * Returns available options for the "locale" attribute. * @return array */ public function getLocaleOptions() { $localeOptions = [ 'be' => [Lang::get('system::lang.locale.be'), 'flag-by'], 'cs' => [Lang::get('system::lang.locale.cs'), 'flag-cz'], 'da' => [Lang::get('system::lang.locale.da'), 'flag-dk'], 'en' => [Lang::get('system::lang.locale.en'), 'flag-us'], 'en-au' => [Lang::get('system::lang.locale.en-au'), 'flag-au'], 'en-ca' => [Lang::get('system::lang.locale.en-ca'), 'flag-ca'], 'en-gb' => [Lang::get('system::lang.locale.en-gb'), 'flag-gb'], 'et' => [Lang::get('system::lang.locale.et'), 'flag-ee'], 'de' => [Lang::get('system::lang.locale.de'), 'flag-de'], 'es' => [Lang::get('system::lang.locale.es'), 'flag-es'], 'es-ar' => [Lang::get('system::lang.locale.es-ar'), 'flag-ar'], 'fa' => [Lang::get('system::lang.locale.fa'), 'flag-ir'], 'fr' => [Lang::get('system::lang.locale.fr'), 'flag-fr'], 'fr-ca' => [Lang::get('system::lang.locale.fr-ca'), 'flag-ca'], 'hu' => [Lang::get('system::lang.locale.hu'), 'flag-hu'], 'id' => [Lang::get('system::lang.locale.id'), 'flag-id'], 'it' => [Lang::get('system::lang.locale.it'), 'flag-it'], 'ja' => [Lang::get('system::lang.locale.ja'), 'flag-jp'], 'kr' => [Lang::get('system::lang.locale.kr'), 'flag-kr'], 'lt' => [Lang::get('system::lang.locale.lt'), 'flag-lt'], 'lv' => [Lang::get('system::lang.locale.lv'), 'flag-lv'], 'nl' => [Lang::get('system::lang.locale.nl'), 'flag-nl'], 'pt-br' => [Lang::get('system::lang.locale.pt-br'), 'flag-br'], 'pt-pt' => [Lang::get('system::lang.locale.pt-pt'), 'flag-pt'], 'ro' => [Lang::get('system::lang.locale.ro'), 'flag-ro'], 'ru' => [Lang::get('system::lang.locale.ru'), 'flag-ru'], 'fi' => [Lang::get('system::lang.locale.fi'), 'flag-fi'], 'sv' => [Lang::get('system::lang.locale.sv'), 'flag-se'], 'tr' => [Lang::get('system::lang.locale.tr'), 'flag-tr'], 'uk' => [Lang::get('system::lang.locale.uk'), 'flag-ua'], 'pl' => [Lang::get('system::lang.locale.pl'), 'flag-pl'], 'sk' => [Lang::get('system::lang.locale.sk'), 'flag-sk'], 'zh-cn' => [Lang::get('system::lang.locale.zh-cn'), 'flag-cn'], 'zh-tw' => [Lang::get('system::lang.locale.zh-tw'), 'flag-tw'], 'nb-no' => [Lang::get('system::lang.locale.nb-no'), 'flag-no'], 'el' => [Lang::get('system::lang.locale.el'), 'flag-gr'], ]; $locales = Config::get('app.localeOptions', $localeOptions); // Sort locales alphabetically asort($locales); return $locales; } /** * Returns all available timezone options. * @return array */ public function getTimezoneOptions() { $timezoneIdentifiers = DateTimeZone::listIdentifiers(); $utcTime = new DateTime('now', new DateTimeZone('UTC')); $tempTimezones = []; foreach ($timezoneIdentifiers as $timezoneIdentifier) { $currentTimezone = new DateTimeZone($timezoneIdentifier); $tempTimezones[] = [ 'offset' => (int) $currentTimezone->getOffset($utcTime), 'identifier' => $timezoneIdentifier ]; } // Sort the array by offset, identifier ascending usort($tempTimezones, function ($a, $b) { return $a['offset'] === $b['offset'] ? strcmp($a['identifier'], $b['identifier']) : $a['offset'] - $b['offset']; }); $timezoneList = []; foreach ($tempTimezones as $tz) { $sign = $tz['offset'] > 0 ? '+' : '-'; $offset = gmdate('H:i', abs($tz['offset'])); $timezoneList[$tz['identifier']] = '(UTC ' . $sign . $offset . ') ' . $tz['identifier']; } return $timezoneList; } /** * Returns the theme options for the backend editor. * @return array */ public function getEditorThemeOptions() { $themeDir = new DirectoryIterator("modules/backend/formwidgets/codeeditor/assets/vendor/ace/"); $themes = []; // Iterate through the themes foreach ($themeDir as $node) { // If this file is a theme (starting by "theme-") if (!$node->isDir() && substr($node->getFileName(), 0, 6) == 'theme-') { // Remove the theme- prefix and the .js suffix, create an user friendly and capitalized name $themeId = substr($node->getFileName(), 6, -3); $themeName = ucwords(str_replace("_", " ", $themeId)); // Add the values to the themes array if ($themeId != static::DEFAULT_THEME) { $themes[$themeId] = $themeName; } } } // Sort the theme alphabetically, and push the default theme asort($themes); return [static::DEFAULT_THEME => ucwords(static::DEFAULT_THEME)] + $themes; } }
1
12,889
Spaces for indentation, not tabs
octobercms-october
php
@@ -117,15 +117,11 @@ cvdescriptorset::DescriptorSetLayoutDef::DescriptorSetLayoutDef(const VkDescript assert(bindings_.size() == binding_count_); assert(binding_flags_.size() == binding_count_); uint32_t global_index = 0; - binding_to_global_index_range_map_.reserve(binding_count_); - // Vector order is finalized so create maps of bindings to descriptors and descriptors to indices + global_index_range_.reserve(binding_count_); + // Vector order is finalized so build vectors of descriptors and dynamic offsets by binding index for (uint32_t i = 0; i < binding_count_; ++i) { - auto binding_num = bindings_[i].binding; auto final_index = global_index + bindings_[i].descriptorCount; - binding_to_global_index_range_map_[binding_num] = IndexRange(global_index, final_index); - if (final_index != global_index) { - global_start_to_index_map_[global_index] = i; - } + global_index_range_.emplace_back(global_index, final_index); global_index = final_index; }
1
/* Copyright (c) 2015-2019 The Khronos Group Inc. * Copyright (c) 2015-2019 Valve Corporation * Copyright (c) 2015-2019 LunarG, Inc. * Copyright (C) 2015-2019 Google Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Author: Tobin Ehlis <[email protected]> * John Zulauf <[email protected]> */ // Allow use of STL min and max functions in Windows #define NOMINMAX #include "chassis.h" #include "core_validation_error_enums.h" #include "core_validation.h" #include "descriptor_sets.h" #include "hash_vk_types.h" #include "vk_enum_string_helper.h" #include "vk_safe_struct.h" #include "vk_typemap_helper.h" #include "buffer_validation.h" #include <sstream> #include <algorithm> #include <array> #include <memory> // ExtendedBinding collects a VkDescriptorSetLayoutBinding and any extended // state that comes from a different array/structure so they can stay together // while being sorted by binding number. struct ExtendedBinding { ExtendedBinding(const VkDescriptorSetLayoutBinding *l, VkDescriptorBindingFlagsEXT f) : layout_binding(l), binding_flags(f) {} const VkDescriptorSetLayoutBinding *layout_binding; VkDescriptorBindingFlagsEXT binding_flags; }; struct BindingNumCmp { bool operator()(const ExtendedBinding &a, const ExtendedBinding &b) const { return a.layout_binding->binding < b.layout_binding->binding; } }; using DescriptorSet = cvdescriptorset::DescriptorSet; using DescriptorSetLayout = cvdescriptorset::DescriptorSetLayout; using DescriptorSetLayoutDef = cvdescriptorset::DescriptorSetLayoutDef; using DescriptorSetLayoutId = cvdescriptorset::DescriptorSetLayoutId; // Canonical dictionary of DescriptorSetLayoutDef (without any handle/device specific information) cvdescriptorset::DescriptorSetLayoutDict descriptor_set_layout_dict; DescriptorSetLayoutId GetCanonicalId(const VkDescriptorSetLayoutCreateInfo *p_create_info) { return descriptor_set_layout_dict.look_up(DescriptorSetLayoutDef(p_create_info)); } // Construct DescriptorSetLayout instance from given create info // Proactively reserve and resize as possible, as the reallocation was visible in profiling cvdescriptorset::DescriptorSetLayoutDef::DescriptorSetLayoutDef(const VkDescriptorSetLayoutCreateInfo *p_create_info) : flags_(p_create_info->flags), binding_count_(0), descriptor_count_(0), dynamic_descriptor_count_(0) { const auto *flags_create_info = lvl_find_in_chain<VkDescriptorSetLayoutBindingFlagsCreateInfoEXT>(p_create_info->pNext); binding_type_stats_ = {0, 0, 0}; std::set<ExtendedBinding, BindingNumCmp> sorted_bindings; const uint32_t input_bindings_count = p_create_info->bindingCount; // Sort the input bindings in binding number order, eliminating duplicates for (uint32_t i = 0; i < input_bindings_count; i++) { VkDescriptorBindingFlagsEXT flags = 0; if (flags_create_info && flags_create_info->bindingCount == p_create_info->bindingCount) { flags = flags_create_info->pBindingFlags[i]; } sorted_bindings.insert(ExtendedBinding(p_create_info->pBindings + i, flags)); } // Store the create info in the sorted order from above std::map<uint32_t, uint32_t> binding_to_dyn_count; uint32_t index = 0; binding_count_ = static_cast<uint32_t>(sorted_bindings.size()); bindings_.reserve(binding_count_); binding_flags_.reserve(binding_count_); binding_to_index_map_.reserve(binding_count_); for (auto input_binding : sorted_bindings) { // Add to binding and map, s.t. it is robust to invalid duplication of binding_num const auto binding_num = input_binding.layout_binding->binding; binding_to_index_map_[binding_num] = index++; bindings_.emplace_back(input_binding.layout_binding); auto &binding_info = bindings_.back(); binding_flags_.emplace_back(input_binding.binding_flags); descriptor_count_ += binding_info.descriptorCount; if (binding_info.descriptorCount > 0) { non_empty_bindings_.insert(binding_num); } if (binding_info.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC || binding_info.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) { binding_to_dyn_count[binding_num] = binding_info.descriptorCount; dynamic_descriptor_count_ += binding_info.descriptorCount; binding_type_stats_.dynamic_buffer_count++; } else if ((binding_info.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) || (binding_info.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER)) { binding_type_stats_.non_dynamic_buffer_count++; } else { binding_type_stats_.image_sampler_count++; } } assert(bindings_.size() == binding_count_); assert(binding_flags_.size() == binding_count_); uint32_t global_index = 0; binding_to_global_index_range_map_.reserve(binding_count_); // Vector order is finalized so create maps of bindings to descriptors and descriptors to indices for (uint32_t i = 0; i < binding_count_; ++i) { auto binding_num = bindings_[i].binding; auto final_index = global_index + bindings_[i].descriptorCount; binding_to_global_index_range_map_[binding_num] = IndexRange(global_index, final_index); if (final_index != global_index) { global_start_to_index_map_[global_index] = i; } global_index = final_index; } // Now create dyn offset array mapping for any dynamic descriptors uint32_t dyn_array_idx = 0; binding_to_dynamic_array_idx_map_.reserve(binding_to_dyn_count.size()); for (const auto &bc_pair : binding_to_dyn_count) { binding_to_dynamic_array_idx_map_[bc_pair.first] = dyn_array_idx; dyn_array_idx += bc_pair.second; } } size_t cvdescriptorset::DescriptorSetLayoutDef::hash() const { hash_util::HashCombiner hc; hc << flags_; hc.Combine(bindings_); hc.Combine(binding_flags_); return hc.Value(); } // // Return valid index or "end" i.e. binding_count_; // The asserts in "Get" are reduced to the set where no valid answer(like null or 0) could be given // Common code for all binding lookups. uint32_t cvdescriptorset::DescriptorSetLayoutDef::GetIndexFromBinding(uint32_t binding) const { const auto &bi_itr = binding_to_index_map_.find(binding); if (bi_itr != binding_to_index_map_.cend()) return bi_itr->second; return GetBindingCount(); } VkDescriptorSetLayoutBinding const *cvdescriptorset::DescriptorSetLayoutDef::GetDescriptorSetLayoutBindingPtrFromIndex( const uint32_t index) const { if (index >= bindings_.size()) return nullptr; return bindings_[index].ptr(); } // Return descriptorCount for given index, 0 if index is unavailable uint32_t cvdescriptorset::DescriptorSetLayoutDef::GetDescriptorCountFromIndex(const uint32_t index) const { if (index >= bindings_.size()) return 0; return bindings_[index].descriptorCount; } // For the given index, return descriptorType VkDescriptorType cvdescriptorset::DescriptorSetLayoutDef::GetTypeFromIndex(const uint32_t index) const { assert(index < bindings_.size()); if (index < bindings_.size()) return bindings_[index].descriptorType; return VK_DESCRIPTOR_TYPE_MAX_ENUM; } // For the given index, return stageFlags VkShaderStageFlags cvdescriptorset::DescriptorSetLayoutDef::GetStageFlagsFromIndex(const uint32_t index) const { assert(index < bindings_.size()); if (index < bindings_.size()) return bindings_[index].stageFlags; return VkShaderStageFlags(0); } // Return binding flags for given index, 0 if index is unavailable VkDescriptorBindingFlagsEXT cvdescriptorset::DescriptorSetLayoutDef::GetDescriptorBindingFlagsFromIndex( const uint32_t index) const { if (index >= binding_flags_.size()) return 0; return binding_flags_[index]; } // For the given global index, return index uint32_t cvdescriptorset::DescriptorSetLayoutDef::GetIndexFromGlobalIndex(const uint32_t global_index) const { auto start_it = global_start_to_index_map_.upper_bound(global_index); uint32_t index = binding_count_; assert(start_it != global_start_to_index_map_.cbegin()); if (start_it != global_start_to_index_map_.cbegin()) { --start_it; index = start_it->second; #ifndef NDEBUG const auto &range = GetGlobalIndexRangeFromBinding(bindings_[index].binding); assert(range.start <= global_index && global_index < range.end); #endif } return index; } // For the given binding, return the global index range // As start and end are often needed in pairs, get both with a single hash lookup. const cvdescriptorset::IndexRange &cvdescriptorset::DescriptorSetLayoutDef::GetGlobalIndexRangeFromBinding( const uint32_t binding) const { assert(binding_to_global_index_range_map_.count(binding)); // In error case max uint32_t so index is out of bounds to break ASAP const static IndexRange kInvalidRange = {0xFFFFFFFF, 0xFFFFFFFF}; const auto &range_it = binding_to_global_index_range_map_.find(binding); if (range_it != binding_to_global_index_range_map_.end()) { return range_it->second; } return kInvalidRange; } // For given binding, return ptr to ImmutableSampler array VkSampler const *cvdescriptorset::DescriptorSetLayoutDef::GetImmutableSamplerPtrFromBinding(const uint32_t binding) const { const auto &bi_itr = binding_to_index_map_.find(binding); if (bi_itr != binding_to_index_map_.end()) { return bindings_[bi_itr->second].pImmutableSamplers; } return nullptr; } // Move to next valid binding having a non-zero binding count uint32_t cvdescriptorset::DescriptorSetLayoutDef::GetNextValidBinding(const uint32_t binding) const { auto it = non_empty_bindings_.upper_bound(binding); assert(it != non_empty_bindings_.cend()); if (it != non_empty_bindings_.cend()) return *it; return GetMaxBinding() + 1; } // For given index, return ptr to ImmutableSampler array VkSampler const *cvdescriptorset::DescriptorSetLayoutDef::GetImmutableSamplerPtrFromIndex(const uint32_t index) const { if (index < bindings_.size()) { return bindings_[index].pImmutableSamplers; } return nullptr; } // If our layout is compatible with rh_ds_layout, return true. bool cvdescriptorset::DescriptorSetLayout::IsCompatible(DescriptorSetLayout const *rh_ds_layout) const { bool compatible = (this == rh_ds_layout) || (GetLayoutDef() == rh_ds_layout->GetLayoutDef()); return compatible; } // If our layout is compatible with rh_ds_layout, return true, // else return false and fill in error_msg will description of what causes incompatibility bool cvdescriptorset::VerifySetLayoutCompatibility(DescriptorSetLayout const *lh_ds_layout, DescriptorSetLayout const *rh_ds_layout, std::string *error_msg) { // Short circuit the detailed check. if (lh_ds_layout->IsCompatible(rh_ds_layout)) return true; // Do a detailed compatibility check of this lhs def (referenced by lh_ds_layout), vs. the rhs (layout and def) // Should only be run if trivial accept has failed, and in that context should return false. VkDescriptorSetLayout lh_dsl_handle = lh_ds_layout->GetDescriptorSetLayout(); VkDescriptorSetLayout rh_dsl_handle = rh_ds_layout->GetDescriptorSetLayout(); DescriptorSetLayoutDef const *lh_ds_layout_def = lh_ds_layout->GetLayoutDef(); DescriptorSetLayoutDef const *rh_ds_layout_def = rh_ds_layout->GetLayoutDef(); // Check descriptor counts if (lh_ds_layout_def->GetTotalDescriptorCount() != rh_ds_layout_def->GetTotalDescriptorCount()) { std::stringstream error_str; error_str << "DescriptorSetLayout " << lh_dsl_handle << " has " << lh_ds_layout_def->GetTotalDescriptorCount() << " descriptors, but DescriptorSetLayout " << rh_dsl_handle << ", which comes from pipelineLayout, has " << rh_ds_layout_def->GetTotalDescriptorCount() << " descriptors."; *error_msg = error_str.str(); return false; // trivial fail case } // Descriptor counts match so need to go through bindings one-by-one // and verify that type and stageFlags match for (const auto &binding : lh_ds_layout_def->GetBindings()) { // TODO : Do we also need to check immutable samplers? // VkDescriptorSetLayoutBinding *rh_binding; if (binding.descriptorCount != rh_ds_layout_def->GetDescriptorCountFromBinding(binding.binding)) { std::stringstream error_str; error_str << "Binding " << binding.binding << " for DescriptorSetLayout " << lh_dsl_handle << " has a descriptorCount of " << binding.descriptorCount << " but binding " << binding.binding << " for DescriptorSetLayout " << rh_dsl_handle << ", which comes from pipelineLayout, has a descriptorCount of " << rh_ds_layout_def->GetDescriptorCountFromBinding(binding.binding); *error_msg = error_str.str(); return false; } else if (binding.descriptorType != rh_ds_layout_def->GetTypeFromBinding(binding.binding)) { std::stringstream error_str; error_str << "Binding " << binding.binding << " for DescriptorSetLayout " << lh_dsl_handle << " is type '" << string_VkDescriptorType(binding.descriptorType) << "' but binding " << binding.binding << " for DescriptorSetLayout " << rh_dsl_handle << ", which comes from pipelineLayout, is type '" << string_VkDescriptorType(rh_ds_layout_def->GetTypeFromBinding(binding.binding)) << "'"; *error_msg = error_str.str(); return false; } else if (binding.stageFlags != rh_ds_layout_def->GetStageFlagsFromBinding(binding.binding)) { std::stringstream error_str; error_str << "Binding " << binding.binding << " for DescriptorSetLayout " << lh_dsl_handle << " has stageFlags " << binding.stageFlags << " but binding " << binding.binding << " for DescriptorSetLayout " << rh_dsl_handle << ", which comes from pipelineLayout, has stageFlags " << rh_ds_layout_def->GetStageFlagsFromBinding(binding.binding); *error_msg = error_str.str(); return false; } } // No detailed check should succeed if the trivial check failed -- or the dictionary has failed somehow. bool compatible = true; assert(!compatible); return compatible; } bool cvdescriptorset::DescriptorSetLayoutDef::IsNextBindingConsistent(const uint32_t binding) const { if (!binding_to_index_map_.count(binding + 1)) return false; auto const &bi_itr = binding_to_index_map_.find(binding); if (bi_itr != binding_to_index_map_.end()) { const auto &next_bi_itr = binding_to_index_map_.find(binding + 1); if (next_bi_itr != binding_to_index_map_.end()) { auto type = bindings_[bi_itr->second].descriptorType; auto stage_flags = bindings_[bi_itr->second].stageFlags; auto immut_samp = bindings_[bi_itr->second].pImmutableSamplers ? true : false; auto flags = binding_flags_[bi_itr->second]; if ((type != bindings_[next_bi_itr->second].descriptorType) || (stage_flags != bindings_[next_bi_itr->second].stageFlags) || (immut_samp != (bindings_[next_bi_itr->second].pImmutableSamplers ? true : false)) || (flags != binding_flags_[next_bi_itr->second])) { return false; } return true; } } return false; } // The DescriptorSetLayout stores the per handle data for a descriptor set layout, and references the common defintion for the // handle invariant portion cvdescriptorset::DescriptorSetLayout::DescriptorSetLayout(const VkDescriptorSetLayoutCreateInfo *p_create_info, const VkDescriptorSetLayout layout) : layout_(layout), layout_destroyed_(false), layout_id_(GetCanonicalId(p_create_info)) {} // Validate descriptor set layout create info bool cvdescriptorset::ValidateDescriptorSetLayoutCreateInfo( const debug_report_data *report_data, const VkDescriptorSetLayoutCreateInfo *create_info, const bool push_descriptor_ext, const uint32_t max_push_descriptors, const bool descriptor_indexing_ext, const VkPhysicalDeviceDescriptorIndexingFeaturesEXT *descriptor_indexing_features, const VkPhysicalDeviceInlineUniformBlockFeaturesEXT *inline_uniform_block_features, const VkPhysicalDeviceInlineUniformBlockPropertiesEXT *inline_uniform_block_props) { bool skip = false; std::unordered_set<uint32_t> bindings; uint64_t total_descriptors = 0; const auto *flags_create_info = lvl_find_in_chain<VkDescriptorSetLayoutBindingFlagsCreateInfoEXT>(create_info->pNext); const bool push_descriptor_set = !!(create_info->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR); if (push_descriptor_set && !push_descriptor_ext) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, kVUID_Core_DrawState_ExtensionNotEnabled, "Attempted to use %s in %s but its required extension %s has not been enabled.\n", "VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR", "VkDescriptorSetLayoutCreateInfo::flags", VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); } const bool update_after_bind_set = !!(create_info->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT); if (update_after_bind_set && !descriptor_indexing_ext) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, kVUID_Core_DrawState_ExtensionNotEnabled, "Attemped to use %s in %s but its required extension %s has not been enabled.\n", "VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT", "VkDescriptorSetLayoutCreateInfo::flags", VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME); } auto valid_type = [push_descriptor_set](const VkDescriptorType type) { return !push_descriptor_set || ((type != VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) && (type != VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) && (type != VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT)); }; uint32_t max_binding = 0; for (uint32_t i = 0; i < create_info->bindingCount; ++i) { const auto &binding_info = create_info->pBindings[i]; max_binding = std::max(max_binding, binding_info.binding); if (!bindings.insert(binding_info.binding).second) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutCreateInfo-binding-00279", "duplicated binding number in VkDescriptorSetLayoutBinding."); } if (!valid_type(binding_info.descriptorType)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, (binding_info.descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) ? "VUID-VkDescriptorSetLayoutCreateInfo-flags-02208" : "VUID-VkDescriptorSetLayoutCreateInfo-flags-00280", "invalid type %s ,for push descriptors in VkDescriptorSetLayoutBinding entry %" PRIu32 ".", string_VkDescriptorType(binding_info.descriptorType), i); } if (binding_info.descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) { if ((binding_info.descriptorCount % 4) != 0) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutBinding-descriptorType-02209", "descriptorCount =(%" PRIu32 ") must be a multiple of 4", binding_info.descriptorCount); } if (binding_info.descriptorCount > inline_uniform_block_props->maxInlineUniformBlockSize) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutBinding-descriptorType-02210", "descriptorCount =(%" PRIu32 ") must be less than or equal to maxInlineUniformBlockSize", binding_info.descriptorCount); } } total_descriptors += binding_info.descriptorCount; } if (flags_create_info) { if (flags_create_info->bindingCount != 0 && flags_create_info->bindingCount != create_info->bindingCount) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-bindingCount-03002", "VkDescriptorSetLayoutCreateInfo::bindingCount (%d) != " "VkDescriptorSetLayoutBindingFlagsCreateInfoEXT::bindingCount (%d)", create_info->bindingCount, flags_create_info->bindingCount); } if (flags_create_info->bindingCount == create_info->bindingCount) { for (uint32_t i = 0; i < create_info->bindingCount; ++i) { const auto &binding_info = create_info->pBindings[i]; if (flags_create_info->pBindingFlags[i] & VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT) { if (!update_after_bind_set) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutCreateInfo-flags-03000", "Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i); } if (binding_info.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER && !descriptor_indexing_features->descriptorBindingUniformBufferUpdateAfterBind) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-" "descriptorBindingUniformBufferUpdateAfterBind-03005", "Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i); } if ((binding_info.descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER || binding_info.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER || binding_info.descriptorType == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE) && !descriptor_indexing_features->descriptorBindingSampledImageUpdateAfterBind) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-" "descriptorBindingSampledImageUpdateAfterBind-03006", "Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i); } if (binding_info.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE && !descriptor_indexing_features->descriptorBindingStorageImageUpdateAfterBind) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-" "descriptorBindingStorageImageUpdateAfterBind-03007", "Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i); } if (binding_info.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER && !descriptor_indexing_features->descriptorBindingStorageBufferUpdateAfterBind) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-" "descriptorBindingStorageBufferUpdateAfterBind-03008", "Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i); } if (binding_info.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER && !descriptor_indexing_features->descriptorBindingUniformTexelBufferUpdateAfterBind) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-" "descriptorBindingUniformTexelBufferUpdateAfterBind-03009", "Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i); } if (binding_info.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER && !descriptor_indexing_features->descriptorBindingStorageTexelBufferUpdateAfterBind) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-" "descriptorBindingStorageTexelBufferUpdateAfterBind-03010", "Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i); } if ((binding_info.descriptorType == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT || binding_info.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC || binding_info.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-None-03011", "Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i); } if (binding_info.descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT && !inline_uniform_block_features->descriptorBindingInlineUniformBlockUpdateAfterBind) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-" "descriptorBindingInlineUniformBlockUpdateAfterBind-02211", "Invalid flags (VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT) for " "VkDescriptorSetLayoutBinding entry %" PRIu32 " with descriptorBindingInlineUniformBlockUpdateAfterBind not enabled", i); } } if (flags_create_info->pBindingFlags[i] & VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT_EXT) { if (!descriptor_indexing_features->descriptorBindingUpdateUnusedWhilePending) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-descriptorBindingUpdateUnusedWhilePending-03012", "Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i); } } if (flags_create_info->pBindingFlags[i] & VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT_EXT) { if (!descriptor_indexing_features->descriptorBindingPartiallyBound) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-descriptorBindingPartiallyBound-03013", "Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i); } } if (flags_create_info->pBindingFlags[i] & VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT_EXT) { if (binding_info.binding != max_binding) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-pBindingFlags-03004", "Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i); } if (!descriptor_indexing_features->descriptorBindingVariableDescriptorCount) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-descriptorBindingVariableDescriptorCount-03014", "Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i); } if ((binding_info.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC || binding_info.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-pBindingFlags-03015", "Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i); } } if (push_descriptor_set && (flags_create_info->pBindingFlags[i] & (VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT | VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT_EXT | VK_DESCRIPTOR_BINDING_VARIABLE_DESCRIPTOR_COUNT_BIT_EXT))) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutBindingFlagsCreateInfoEXT-flags-03003", "Invalid flags for VkDescriptorSetLayoutBinding entry %" PRIu32, i); } } } } if ((push_descriptor_set) && (total_descriptors > max_push_descriptors)) { const char *undefined = push_descriptor_ext ? "" : " -- undefined"; skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, "VUID-VkDescriptorSetLayoutCreateInfo-flags-00281", "for push descriptor, total descriptor count in layout (%" PRIu64 ") must not be greater than VkPhysicalDevicePushDescriptorPropertiesKHR::maxPushDescriptors (%" PRIu32 "%s).", total_descriptors, max_push_descriptors, undefined); } return skip; } cvdescriptorset::AllocateDescriptorSetsData::AllocateDescriptorSetsData(uint32_t count) : required_descriptors_by_type{}, layout_nodes(count, nullptr) {} cvdescriptorset::DescriptorSet::DescriptorSet(const VkDescriptorSet set, const VkDescriptorPool pool, const std::shared_ptr<DescriptorSetLayout const> &layout, uint32_t variable_count, CoreChecks *dev_data) : some_update_(false), set_(set), pool_state_(nullptr), p_layout_(layout), device_data_(dev_data), variable_count_(variable_count) { pool_state_ = dev_data->GetDescriptorPoolState(pool); // Foreach binding, create default descriptors of given type descriptors_.reserve(p_layout_->GetTotalDescriptorCount()); for (uint32_t i = 0; i < p_layout_->GetBindingCount(); ++i) { auto type = p_layout_->GetTypeFromIndex(i); switch (type) { case VK_DESCRIPTOR_TYPE_SAMPLER: { auto immut_sampler = p_layout_->GetImmutableSamplerPtrFromIndex(i); for (uint32_t di = 0; di < p_layout_->GetDescriptorCountFromIndex(i); ++di) { if (immut_sampler) { descriptors_.emplace_back(new SamplerDescriptor(immut_sampler + di)); some_update_ = true; // Immutable samplers are updated at creation } else descriptors_.emplace_back(new SamplerDescriptor(nullptr)); } break; } case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: { auto immut = p_layout_->GetImmutableSamplerPtrFromIndex(i); for (uint32_t di = 0; di < p_layout_->GetDescriptorCountFromIndex(i); ++di) { if (immut) { descriptors_.emplace_back(new ImageSamplerDescriptor(immut + di)); some_update_ = true; // Immutable samplers are updated at creation } else descriptors_.emplace_back(new ImageSamplerDescriptor(nullptr)); } break; } // ImageDescriptors case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: for (uint32_t di = 0; di < p_layout_->GetDescriptorCountFromIndex(i); ++di) descriptors_.emplace_back(new ImageDescriptor(type)); break; case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: for (uint32_t di = 0; di < p_layout_->GetDescriptorCountFromIndex(i); ++di) descriptors_.emplace_back(new TexelDescriptor(type)); break; case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: for (uint32_t di = 0; di < p_layout_->GetDescriptorCountFromIndex(i); ++di) descriptors_.emplace_back(new BufferDescriptor(type)); break; case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT: for (uint32_t di = 0; di < p_layout_->GetDescriptorCountFromIndex(i); ++di) descriptors_.emplace_back(new InlineUniformDescriptor(type)); break; case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV: for (uint32_t di = 0; di < p_layout_->GetDescriptorCountFromIndex(i); ++di) descriptors_.emplace_back(new AccelerationStructureDescriptor(type)); break; default: assert(0); // Bad descriptor type specified break; } } } cvdescriptorset::DescriptorSet::~DescriptorSet() { InvalidateBoundCmdBuffers(); } static std::string StringDescriptorReqViewType(descriptor_req req) { std::string result(""); for (unsigned i = 0; i <= VK_IMAGE_VIEW_TYPE_END_RANGE; i++) { if (req & (1 << i)) { if (result.size()) result += ", "; result += string_VkImageViewType(VkImageViewType(i)); } } if (!result.size()) result = "(none)"; return result; } static char const *StringDescriptorReqComponentType(descriptor_req req) { if (req & DESCRIPTOR_REQ_COMPONENT_TYPE_SINT) return "SINT"; if (req & DESCRIPTOR_REQ_COMPONENT_TYPE_UINT) return "UINT"; if (req & DESCRIPTOR_REQ_COMPONENT_TYPE_FLOAT) return "FLOAT"; return "(none)"; } static unsigned DescriptorRequirementsBitsFromFormat(VkFormat fmt) { if (FormatIsSInt(fmt)) return DESCRIPTOR_REQ_COMPONENT_TYPE_SINT; if (FormatIsUInt(fmt)) return DESCRIPTOR_REQ_COMPONENT_TYPE_UINT; if (FormatIsDepthAndStencil(fmt)) return DESCRIPTOR_REQ_COMPONENT_TYPE_FLOAT | DESCRIPTOR_REQ_COMPONENT_TYPE_UINT; if (fmt == VK_FORMAT_UNDEFINED) return 0; // everything else -- UNORM/SNORM/FLOAT/USCALED/SSCALED is all float in the shader. return DESCRIPTOR_REQ_COMPONENT_TYPE_FLOAT; } // Validate that the state of this set is appropriate for the given bindings and dynamic_offsets at Draw time // This includes validating that all descriptors in the given bindings are updated, // that any update buffers are valid, and that any dynamic offsets are within the bounds of their buffers. // Return true if state is acceptable, or false and write an error message into error string bool cvdescriptorset::DescriptorSet::ValidateDrawState(const std::map<uint32_t, descriptor_req> &bindings, const std::vector<uint32_t> &dynamic_offsets, CMD_BUFFER_STATE *cb_node, const char *caller, std::string *error) const { for (auto binding_pair : bindings) { auto binding = binding_pair.first; if (!p_layout_->HasBinding(binding)) { std::stringstream error_str; error_str << "Attempting to validate DrawState for binding #" << binding << " which is an invalid binding for this descriptor set."; *error = error_str.str(); return false; } IndexRange index_range = p_layout_->GetGlobalIndexRangeFromBinding(binding); auto array_idx = 0; // Track array idx if we're dealing with array descriptors if (IsVariableDescriptorCount(binding)) { // Only validate the first N descriptors if it uses variable_count index_range.end = index_range.start + GetVariableDescriptorCount(); } for (uint32_t i = index_range.start; i < index_range.end; ++i, ++array_idx) { uint32_t index = i - index_range.start; if ((p_layout_->GetDescriptorBindingFlagsFromBinding(binding) & (VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT_EXT | VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT)) || descriptors_[i]->GetClass() == InlineUniform) { // Can't validate the descriptor because it may not have been updated, // or the view could have been destroyed continue; } else if (!descriptors_[i]->updated) { std::stringstream error_str; error_str << "Descriptor in binding #" << binding << " index " << index << " is being used in draw but has not been updated."; *error = error_str.str(); return false; } else { auto descriptor_class = descriptors_[i]->GetClass(); if (descriptor_class == GeneralBuffer) { // Verify that buffers are valid auto buffer = static_cast<BufferDescriptor *>(descriptors_[i].get())->GetBuffer(); auto buffer_node = device_data_->GetBufferState(buffer); if (!buffer_node) { std::stringstream error_str; error_str << "Descriptor in binding #" << binding << " index " << index << " references invalid buffer " << buffer << "."; *error = error_str.str(); return false; } else if (!buffer_node->sparse) { for (auto mem_binding : buffer_node->GetBoundMemory()) { if (!device_data_->GetDevMemState(mem_binding)) { std::stringstream error_str; error_str << "Descriptor in binding #" << binding << " index " << index << " uses buffer " << buffer << " that references invalid memory " << mem_binding << "."; *error = error_str.str(); return false; } } } if (descriptors_[i]->IsDynamic()) { // Validate that dynamic offsets are within the buffer auto buffer_size = buffer_node->createInfo.size; auto range = static_cast<BufferDescriptor *>(descriptors_[i].get())->GetRange(); auto desc_offset = static_cast<BufferDescriptor *>(descriptors_[i].get())->GetOffset(); auto dyn_offset = dynamic_offsets[GetDynamicOffsetIndexFromBinding(binding) + array_idx]; if (VK_WHOLE_SIZE == range) { if ((dyn_offset + desc_offset) > buffer_size) { std::stringstream error_str; error_str << "Dynamic descriptor in binding #" << binding << " index " << index << " uses buffer " << buffer << " with update range of VK_WHOLE_SIZE has dynamic offset " << dyn_offset << " combined with offset " << desc_offset << " that oversteps the buffer size of " << buffer_size << "."; *error = error_str.str(); return false; } } else { if ((dyn_offset + desc_offset + range) > buffer_size) { std::stringstream error_str; error_str << "Dynamic descriptor in binding #" << binding << " index " << index << " uses buffer " << buffer << " with dynamic offset " << dyn_offset << " combined with offset " << desc_offset << " and range " << range << " that oversteps the buffer size of " << buffer_size << "."; *error = error_str.str(); return false; } } } } else if (descriptor_class == ImageSampler || descriptor_class == Image) { VkImageView image_view; VkImageLayout image_layout; if (descriptor_class == ImageSampler) { image_view = static_cast<ImageSamplerDescriptor *>(descriptors_[i].get())->GetImageView(); image_layout = static_cast<ImageSamplerDescriptor *>(descriptors_[i].get())->GetImageLayout(); } else { image_view = static_cast<ImageDescriptor *>(descriptors_[i].get())->GetImageView(); image_layout = static_cast<ImageDescriptor *>(descriptors_[i].get())->GetImageLayout(); } auto reqs = binding_pair.second; auto image_view_state = device_data_->GetImageViewState(image_view); if (nullptr == image_view_state) { // Image view must have been destroyed since initial update. Could potentially flag the descriptor // as "invalid" (updated = false) at DestroyImageView() time and detect this error at bind time std::stringstream error_str; error_str << "Descriptor in binding #" << binding << " index " << index << " is using imageView " << image_view << " that has been destroyed."; *error = error_str.str(); return false; } auto image_view_ci = image_view_state->create_info; if ((reqs & DESCRIPTOR_REQ_ALL_VIEW_TYPE_BITS) && (~reqs & (1 << image_view_ci.viewType))) { // bad view type std::stringstream error_str; error_str << "Descriptor in binding #" << binding << " index " << index << " requires an image view of type " << StringDescriptorReqViewType(reqs) << " but got " << string_VkImageViewType(image_view_ci.viewType) << "."; *error = error_str.str(); return false; } auto format_bits = DescriptorRequirementsBitsFromFormat(image_view_ci.format); if (!(reqs & format_bits)) { // bad component type std::stringstream error_str; error_str << "Descriptor in binding #" << binding << " index " << index << " requires " << StringDescriptorReqComponentType(reqs) << " component type, but bound descriptor format is " << string_VkFormat(image_view_ci.format) << "."; *error = error_str.str(); return false; } auto image_node = device_data_->GetImageState(image_view_ci.image); assert(image_node); // Verify Image Layout // No "invalid layout" VUID required for this call, since the optimal_layout parameter is UNDEFINED. bool hit_error = false; device_data_->VerifyImageLayout(cb_node, image_node, image_view_state->normalized_subresource_range, image_view_ci.subresourceRange.aspectMask, image_layout, VK_IMAGE_LAYOUT_UNDEFINED, caller, kVUIDUndefined, "VUID-VkDescriptorImageInfo-imageLayout-00344", &hit_error); if (hit_error) { *error = "Image layout specified at vkUpdateDescriptorSet* or vkCmdPushDescriptorSet* time " "doesn't match actual image layout at time descriptor is used. See previous error callback for " "specific details."; return false; } // Verify Sample counts if ((reqs & DESCRIPTOR_REQ_SINGLE_SAMPLE) && image_node->createInfo.samples != VK_SAMPLE_COUNT_1_BIT) { std::stringstream error_str; error_str << "Descriptor in binding #" << binding << " index " << index << " requires bound image to have VK_SAMPLE_COUNT_1_BIT but got " << string_VkSampleCountFlagBits(image_node->createInfo.samples) << "."; *error = error_str.str(); return false; } if ((reqs & DESCRIPTOR_REQ_MULTI_SAMPLE) && image_node->createInfo.samples == VK_SAMPLE_COUNT_1_BIT) { std::stringstream error_str; error_str << "Descriptor in binding #" << binding << " index " << index << " requires bound image to have multiple samples, but got VK_SAMPLE_COUNT_1_BIT."; *error = error_str.str(); return false; } } else if (descriptor_class == TexelBuffer) { auto texel_buffer = static_cast<TexelDescriptor *>(descriptors_[i].get()); auto buffer_view = device_data_->GetBufferViewState(texel_buffer->GetBufferView()); if (nullptr == buffer_view) { std::stringstream error_str; error_str << "Descriptor in binding #" << binding << " index " << index << " is using bufferView " << buffer_view << " that has been destroyed."; *error = error_str.str(); return false; } auto buffer = buffer_view->create_info.buffer; auto buffer_state = device_data_->GetBufferState(buffer); if (!buffer_state) { std::stringstream error_str; error_str << "Descriptor in binding #" << binding << " index " << index << " is using buffer " << buffer_state << " that has been destroyed."; *error = error_str.str(); return false; } auto reqs = binding_pair.second; auto format_bits = DescriptorRequirementsBitsFromFormat(buffer_view->create_info.format); if (!(reqs & format_bits)) { // bad component type std::stringstream error_str; error_str << "Descriptor in binding #" << binding << " index " << index << " requires " << StringDescriptorReqComponentType(reqs) << " component type, but bound descriptor format is " << string_VkFormat(buffer_view->create_info.format) << "."; *error = error_str.str(); return false; } } if (descriptor_class == ImageSampler || descriptor_class == PlainSampler) { // Verify Sampler still valid VkSampler sampler; if (descriptor_class == ImageSampler) { sampler = static_cast<ImageSamplerDescriptor *>(descriptors_[i].get())->GetSampler(); } else { sampler = static_cast<SamplerDescriptor *>(descriptors_[i].get())->GetSampler(); } if (!ValidateSampler(sampler, device_data_)) { std::stringstream error_str; error_str << "Descriptor in binding #" << binding << " index " << index << " is using sampler " << sampler << " that has been destroyed."; *error = error_str.str(); return false; } else { SAMPLER_STATE *sampler_state = device_data_->GetSamplerState(sampler); if (sampler_state->samplerConversion && !descriptors_[i].get()->IsImmutableSampler()) { std::stringstream error_str; error_str << "sampler (" << sampler << ") in the descriptor set (" << set_ << ") contains a YCBCR conversion (" << sampler_state->samplerConversion << ") , then the sampler MUST also exists as an immutable sampler."; *error = error_str.str(); } } } } } } return true; } // For given bindings, place any update buffers or images into the passed-in unordered_sets uint32_t cvdescriptorset::DescriptorSet::GetStorageUpdates(const std::map<uint32_t, descriptor_req> &bindings, std::unordered_set<VkBuffer> *buffer_set, std::unordered_set<VkImageView> *image_set) const { auto num_updates = 0; for (auto binding_pair : bindings) { auto binding = binding_pair.first; // If a binding doesn't exist, skip it if (!p_layout_->HasBinding(binding)) { continue; } uint32_t start_idx = p_layout_->GetGlobalIndexRangeFromBinding(binding).start; if (descriptors_[start_idx]->IsStorage()) { if (Image == descriptors_[start_idx]->descriptor_class) { for (uint32_t i = 0; i < p_layout_->GetDescriptorCountFromBinding(binding); ++i) { if (descriptors_[start_idx + i]->updated) { image_set->insert(static_cast<ImageDescriptor *>(descriptors_[start_idx + i].get())->GetImageView()); num_updates++; } } } else if (TexelBuffer == descriptors_[start_idx]->descriptor_class) { for (uint32_t i = 0; i < p_layout_->GetDescriptorCountFromBinding(binding); ++i) { if (descriptors_[start_idx + i]->updated) { auto bufferview = static_cast<TexelDescriptor *>(descriptors_[start_idx + i].get())->GetBufferView(); auto bv_state = device_data_->GetBufferViewState(bufferview); if (bv_state) { buffer_set->insert(bv_state->create_info.buffer); num_updates++; } } } } else if (GeneralBuffer == descriptors_[start_idx]->descriptor_class) { for (uint32_t i = 0; i < p_layout_->GetDescriptorCountFromBinding(binding); ++i) { if (descriptors_[start_idx + i]->updated) { buffer_set->insert(static_cast<BufferDescriptor *>(descriptors_[start_idx + i].get())->GetBuffer()); num_updates++; } } } } } return num_updates; } // Set is being deleted or updates so invalidate all bound cmd buffers void cvdescriptorset::DescriptorSet::InvalidateBoundCmdBuffers() { device_data_->InvalidateCommandBuffers(cb_bindings, VulkanTypedHandle(set_, kVulkanObjectTypeDescriptorSet)); } // Loop through the write updates to do for a push descriptor set, ignoring dstSet void cvdescriptorset::DescriptorSet::PerformPushDescriptorsUpdate(uint32_t write_count, const VkWriteDescriptorSet *p_wds) { assert(IsPushDescriptor()); for (uint32_t i = 0; i < write_count; i++) { PerformWriteUpdate(&p_wds[i]); } } // Perform write update in given update struct void cvdescriptorset::DescriptorSet::PerformWriteUpdate(const VkWriteDescriptorSet *update) { // Perform update on a per-binding basis as consecutive updates roll over to next binding auto descriptors_remaining = update->descriptorCount; auto binding_being_updated = update->dstBinding; auto offset = update->dstArrayElement; uint32_t update_index = 0; while (descriptors_remaining) { uint32_t update_count = std::min(descriptors_remaining, GetDescriptorCountFromBinding(binding_being_updated)); auto global_idx = p_layout_->GetGlobalIndexRangeFromBinding(binding_being_updated).start + offset; // Loop over the updates for a single binding at a time for (uint32_t di = 0; di < update_count; ++di, ++update_index) { descriptors_[global_idx + di]->WriteUpdate(update, update_index); } // Roll over to next binding in case of consecutive update descriptors_remaining -= update_count; offset = 0; binding_being_updated++; } if (update->descriptorCount) some_update_ = true; if (!(p_layout_->GetDescriptorBindingFlagsFromBinding(update->dstBinding) & (VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT_EXT | VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT))) { InvalidateBoundCmdBuffers(); } } // Validate Copy update bool cvdescriptorset::ValidateCopyUpdate(const debug_report_data *report_data, const VkCopyDescriptorSet *update, const DescriptorSet *dst_set, const DescriptorSet *src_set, const char *func_name, std::string *error_code, std::string *error_msg) { auto dst_layout = dst_set->GetLayout(); auto src_layout = src_set->GetLayout(); // Verify dst layout still valid if (dst_layout->IsDestroyed()) { *error_code = "VUID-VkCopyDescriptorSet-dstSet-parameter"; string_sprintf(error_msg, "Cannot call %s to perform copy update on descriptor set dstSet %s" " created with destroyed VkDescriptorSetLayout %s.", func_name, report_data->FormatHandle(dst_set->GetSet()).c_str(), report_data->FormatHandle(dst_layout->GetDescriptorSetLayout()).c_str()); return false; } // Verify src layout still valid if (src_layout->IsDestroyed()) { *error_code = "VUID-VkCopyDescriptorSet-srcSet-parameter"; string_sprintf(error_msg, "Cannot call %s to perform copy update of dstSet %s" " from descriptor set srcSet %s" " created with destroyed VkDescriptorSetLayout %s.", func_name, report_data->FormatHandle(dst_set->GetSet()).c_str(), report_data->FormatHandle(src_set->GetSet()).c_str(), report_data->FormatHandle(src_layout->GetDescriptorSetLayout()).c_str()); return false; } if (!dst_layout->HasBinding(update->dstBinding)) { *error_code = "VUID-VkCopyDescriptorSet-dstBinding-00347"; std::stringstream error_str; error_str << "DescriptorSet " << dst_set->GetSet() << " does not have copy update dest binding of " << update->dstBinding; *error_msg = error_str.str(); return false; } if (!src_set->HasBinding(update->srcBinding)) { *error_code = "VUID-VkCopyDescriptorSet-srcBinding-00345"; std::stringstream error_str; error_str << "DescriptorSet " << dst_set->GetSet() << " does not have copy update src binding of " << update->srcBinding; *error_msg = error_str.str(); return false; } // Verify idle ds if (dst_set->in_use.load() && !(dst_layout->GetDescriptorBindingFlagsFromBinding(update->dstBinding) & (VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT_EXT | VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT))) { // TODO : Re-using Free Idle error code, need copy update idle error code *error_code = "VUID-vkFreeDescriptorSets-pDescriptorSets-00309"; std::stringstream error_str; error_str << "Cannot call " << func_name << " to perform copy update on descriptor set " << dst_set->GetSet() << " that is in use by a command buffer"; *error_msg = error_str.str(); return false; } // src & dst set bindings are valid // Check bounds of src & dst auto src_start_idx = src_set->GetGlobalIndexRangeFromBinding(update->srcBinding).start + update->srcArrayElement; if ((src_start_idx + update->descriptorCount) > src_set->GetTotalDescriptorCount()) { // SRC update out of bounds *error_code = "VUID-VkCopyDescriptorSet-srcArrayElement-00346"; std::stringstream error_str; error_str << "Attempting copy update from descriptorSet " << update->srcSet << " binding#" << update->srcBinding << " with offset index of " << src_set->GetGlobalIndexRangeFromBinding(update->srcBinding).start << " plus update array offset of " << update->srcArrayElement << " and update of " << update->descriptorCount << " descriptors oversteps total number of descriptors in set: " << src_set->GetTotalDescriptorCount(); *error_msg = error_str.str(); return false; } auto dst_start_idx = dst_layout->GetGlobalIndexRangeFromBinding(update->dstBinding).start + update->dstArrayElement; if ((dst_start_idx + update->descriptorCount) > dst_layout->GetTotalDescriptorCount()) { // DST update out of bounds *error_code = "VUID-VkCopyDescriptorSet-dstArrayElement-00348"; std::stringstream error_str; error_str << "Attempting copy update to descriptorSet " << dst_set->GetSet() << " binding#" << update->dstBinding << " with offset index of " << dst_layout->GetGlobalIndexRangeFromBinding(update->dstBinding).start << " plus update array offset of " << update->dstArrayElement << " and update of " << update->descriptorCount << " descriptors oversteps total number of descriptors in set: " << dst_layout->GetTotalDescriptorCount(); *error_msg = error_str.str(); return false; } // Check that types match // TODO : Base default error case going from here is "VUID-VkAcquireNextImageInfoKHR-semaphore-parameter"2ba which covers all // consistency issues, need more fine-grained error codes *error_code = "VUID-VkCopyDescriptorSet-srcSet-00349"; auto src_type = src_set->GetTypeFromBinding(update->srcBinding); auto dst_type = dst_layout->GetTypeFromBinding(update->dstBinding); if (src_type != dst_type) { std::stringstream error_str; error_str << "Attempting copy update to descriptorSet " << dst_set->GetSet() << " binding #" << update->dstBinding << " with type " << string_VkDescriptorType(dst_type) << " from descriptorSet " << src_set->GetSet() << " binding #" << update->srcBinding << " with type " << string_VkDescriptorType(src_type) << ". Types do not match"; *error_msg = error_str.str(); return false; } // Verify consistency of src & dst bindings if update crosses binding boundaries if ((!VerifyUpdateConsistency(DescriptorSetLayout::ConstBindingIterator(src_layout.get(), update->srcBinding), update->srcArrayElement, update->descriptorCount, "copy update from", src_set->GetSet(), error_msg)) || (!VerifyUpdateConsistency(DescriptorSetLayout::ConstBindingIterator(dst_layout.get(), update->dstBinding), update->dstArrayElement, update->descriptorCount, "copy update to", dst_set->GetSet(), error_msg))) { return false; } if ((src_layout->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT) && !(dst_layout->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT)) { *error_code = "VUID-VkCopyDescriptorSet-srcSet-01918"; std::stringstream error_str; error_str << "If pname:srcSet's (" << update->srcSet << ") layout was created with the " "ename:VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT flag " "set, then pname:dstSet's (" << update->dstSet << ") layout must: also have been created with the " "ename:VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT flag set"; *error_msg = error_str.str(); return false; } if (!(src_layout->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT) && (dst_layout->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT)) { *error_code = "VUID-VkCopyDescriptorSet-srcSet-01919"; std::stringstream error_str; error_str << "If pname:srcSet's (" << update->srcSet << ") layout was created without the " "ename:VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT flag " "set, then pname:dstSet's (" << update->dstSet << ") layout must: also have been created without the " "ename:VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT flag set"; *error_msg = error_str.str(); return false; } if ((src_set->GetPoolState()->createInfo.flags & VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT) && !(dst_set->GetPoolState()->createInfo.flags & VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT)) { *error_code = "VUID-VkCopyDescriptorSet-srcSet-01920"; std::stringstream error_str; error_str << "If the descriptor pool from which pname:srcSet (" << update->srcSet << ") was allocated was created " "with the ename:VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT flag " "set, then the descriptor pool from which pname:dstSet (" << update->dstSet << ") was allocated must: " "also have been created with the ename:VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT flag set"; *error_msg = error_str.str(); return false; } if (!(src_set->GetPoolState()->createInfo.flags & VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT) && (dst_set->GetPoolState()->createInfo.flags & VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT)) { *error_code = "VUID-VkCopyDescriptorSet-srcSet-01921"; std::stringstream error_str; error_str << "If the descriptor pool from which pname:srcSet (" << update->srcSet << ") was allocated was created " "without the ename:VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT flag " "set, then the descriptor pool from which pname:dstSet (" << update->dstSet << ") was allocated must: " "also have been created without the ename:VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT flag set"; *error_msg = error_str.str(); return false; } if (src_type == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) { if ((update->srcArrayElement % 4) != 0) { *error_code = "VUID-VkCopyDescriptorSet-srcBinding-02223"; std::stringstream error_str; error_str << "Attempting copy update to VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT binding with " << "srcArrayElement " << update->srcArrayElement << " not a multiple of 4"; *error_msg = error_str.str(); return false; } if ((update->dstArrayElement % 4) != 0) { *error_code = "VUID-VkCopyDescriptorSet-dstBinding-02224"; std::stringstream error_str; error_str << "Attempting copy update to VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT binding with " << "dstArrayElement " << update->dstArrayElement << " not a multiple of 4"; *error_msg = error_str.str(); return false; } if ((update->descriptorCount % 4) != 0) { *error_code = "VUID-VkCopyDescriptorSet-srcBinding-02225"; std::stringstream error_str; error_str << "Attempting copy update to VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT binding with " << "descriptorCount " << update->descriptorCount << " not a multiple of 4"; *error_msg = error_str.str(); return false; } } // Update parameters all look good and descriptor updated so verify update contents if (!VerifyCopyUpdateContents(dst_set->GetDeviceData(), update, src_set, src_type, src_start_idx, func_name, error_code, error_msg)) return false; // All checks passed so update is good return true; } // Perform Copy update void cvdescriptorset::DescriptorSet::PerformCopyUpdate(const VkCopyDescriptorSet *update, const DescriptorSet *src_set) { auto src_start_idx = src_set->GetGlobalIndexRangeFromBinding(update->srcBinding).start + update->srcArrayElement; auto dst_start_idx = p_layout_->GetGlobalIndexRangeFromBinding(update->dstBinding).start + update->dstArrayElement; // Update parameters all look good so perform update for (uint32_t di = 0; di < update->descriptorCount; ++di) { auto src = src_set->descriptors_[src_start_idx + di].get(); auto dst = descriptors_[dst_start_idx + di].get(); if (src->updated) { dst->CopyUpdate(src); some_update_ = true; } else { dst->updated = false; } } if (!(p_layout_->GetDescriptorBindingFlagsFromBinding(update->dstBinding) & (VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT_EXT | VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT))) { InvalidateBoundCmdBuffers(); } } // Update the drawing state for the affected descriptors. // Set cb_node to this set and this set to cb_node. // Add the bindings of the descriptor // Set the layout based on the current descriptor layout (will mask subsequent layer mismatch errors) // TODO: Modify the UpdateDrawState virtural functions to *only* set initial layout and not change layouts // Prereq: This should be called for a set that has been confirmed to be active for the given cb_node, meaning it's going // to be used in a draw by the given cb_node void cvdescriptorset::DescriptorSet::UpdateDrawState(CoreChecks *device_data, CMD_BUFFER_STATE *cb_node, const std::map<uint32_t, descriptor_req> &binding_req_map) { // bind cb to this descriptor set cb_bindings.insert(cb_node); // Add bindings for descriptor set, the set's pool, and individual objects in the set cb_node->object_bindings.emplace(set_, kVulkanObjectTypeDescriptorSet); pool_state_->cb_bindings.insert(cb_node); cb_node->object_bindings.emplace(pool_state_->pool, kVulkanObjectTypeDescriptorPool); // For the active slots, use set# to look up descriptorSet from boundDescriptorSets, and bind all of that descriptor set's // resources for (auto binding_req_pair : binding_req_map) { auto binding = binding_req_pair.first; // We aren't validating descriptors created with PARTIALLY_BOUND or UPDATE_AFTER_BIND, so don't record state if (p_layout_->GetDescriptorBindingFlagsFromBinding(binding) & (VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT_EXT | VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT)) { continue; } auto range = p_layout_->GetGlobalIndexRangeFromBinding(binding); for (uint32_t i = range.start; i < range.end; ++i) { descriptors_[i]->UpdateDrawState(device_data, cb_node); } } } void cvdescriptorset::DescriptorSet::FilterAndTrackOneBindingReq(const BindingReqMap::value_type &binding_req_pair, const BindingReqMap &in_req, BindingReqMap *out_req, TrackedBindings *bindings) { assert(out_req); assert(bindings); const auto binding = binding_req_pair.first; // Use insert and look at the boolean ("was inserted") in the returned pair to see if this is a new set member. // Saves one hash lookup vs. find ... compare w/ end ... insert. const auto it_bool_pair = bindings->insert(binding); if (it_bool_pair.second) { out_req->emplace(binding_req_pair); } } void cvdescriptorset::DescriptorSet::FilterAndTrackOneBindingReq(const BindingReqMap::value_type &binding_req_pair, const BindingReqMap &in_req, BindingReqMap *out_req, TrackedBindings *bindings, uint32_t limit) { if (bindings->size() < limit) FilterAndTrackOneBindingReq(binding_req_pair, in_req, out_req, bindings); } void cvdescriptorset::DescriptorSet::FilterAndTrackBindingReqs(CMD_BUFFER_STATE *cb_state, const BindingReqMap &in_req, BindingReqMap *out_req) { TrackedBindings &bound = cached_validation_[cb_state].command_binding_and_usage; if (bound.size() == GetBindingCount()) { return; // All bindings are bound, out req is empty } for (const auto &binding_req_pair : in_req) { const auto binding = binding_req_pair.first; // If a binding doesn't exist, or has already been bound, skip it if (p_layout_->HasBinding(binding)) { FilterAndTrackOneBindingReq(binding_req_pair, in_req, out_req, &bound); } } } void cvdescriptorset::DescriptorSet::FilterAndTrackBindingReqs(CMD_BUFFER_STATE *cb_state, PIPELINE_STATE *pipeline, const BindingReqMap &in_req, BindingReqMap *out_req) { auto &validated = cached_validation_[cb_state]; auto &image_sample_val = validated.image_samplers[pipeline]; auto *const dynamic_buffers = &validated.dynamic_buffers; auto *const non_dynamic_buffers = &validated.non_dynamic_buffers; const auto &stats = p_layout_->GetBindingTypeStats(); for (const auto &binding_req_pair : in_req) { auto binding = binding_req_pair.first; VkDescriptorSetLayoutBinding const *layout_binding = p_layout_->GetDescriptorSetLayoutBindingPtrFromBinding(binding); if (!layout_binding) { continue; } // Caching criteria differs per type. // If image_layout have changed , the image descriptors need to be validated against them. if ((layout_binding->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) || (layout_binding->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) { FilterAndTrackOneBindingReq(binding_req_pair, in_req, out_req, dynamic_buffers, stats.dynamic_buffer_count); } else if ((layout_binding->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) || (layout_binding->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER)) { FilterAndTrackOneBindingReq(binding_req_pair, in_req, out_req, non_dynamic_buffers, stats.non_dynamic_buffer_count); } else { // This is rather crude, as the changed layouts may not impact the bound descriptors, // but the simple "versioning" is a simple "dirt" test. auto &version = image_sample_val[binding]; // Take advantage of default construtor zero initialzing new entries if (version != cb_state->image_layout_change_count) { version = cb_state->image_layout_change_count; out_req->emplace(binding_req_pair); } } } } cvdescriptorset::SamplerDescriptor::SamplerDescriptor(const VkSampler *immut) : sampler_(VK_NULL_HANDLE), immutable_(false) { updated = false; descriptor_class = PlainSampler; if (immut) { sampler_ = *immut; immutable_ = true; updated = true; } } // Validate given sampler. Currently this only checks to make sure it exists in the samplerMap bool cvdescriptorset::ValidateSampler(const VkSampler sampler, CoreChecks *dev_data) { return (dev_data->GetSamplerState(sampler) != nullptr); } bool cvdescriptorset::ValidateImageUpdate(VkImageView image_view, VkImageLayout image_layout, VkDescriptorType type, CoreChecks *dev_data, const char *func_name, std::string *error_code, std::string *error_msg) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00326"; auto iv_state = dev_data->GetImageViewState(image_view); if (!iv_state) { std::stringstream error_str; error_str << "Invalid VkImageView: " << image_view; *error_msg = error_str.str(); return false; } // Note that when an imageview is created, we validated that memory is bound so no need to re-check here // Validate that imageLayout is compatible with aspect_mask and image format // and validate that image usage bits are correct for given usage VkImageAspectFlags aspect_mask = iv_state->create_info.subresourceRange.aspectMask; VkImage image = iv_state->create_info.image; VkFormat format = VK_FORMAT_MAX_ENUM; VkImageUsageFlags usage = 0; auto image_node = dev_data->GetImageState(image); if (image_node) { format = image_node->createInfo.format; usage = image_node->createInfo.usage; // Validate that memory is bound to image // TODO: This should have its own valid usage id apart from 2524 which is from CreateImageView case. The only // the error here occurs is if memory bound to a created imageView has been freed. if (dev_data->ValidateMemoryIsBoundToImage(image_node, func_name, "VUID-VkImageViewCreateInfo-image-01020")) { *error_code = "VUID-VkImageViewCreateInfo-image-01020"; *error_msg = "No memory bound to image."; return false; } // KHR_maintenance1 allows rendering into 2D or 2DArray views which slice a 3D image, // but not binding them to descriptor sets. if (image_node->createInfo.imageType == VK_IMAGE_TYPE_3D && (iv_state->create_info.viewType == VK_IMAGE_VIEW_TYPE_2D || iv_state->create_info.viewType == VK_IMAGE_VIEW_TYPE_2D_ARRAY)) { *error_code = "VUID-VkDescriptorImageInfo-imageView-00343"; *error_msg = "ImageView must not be a 2D or 2DArray view of a 3D image"; return false; } } // First validate that format and layout are compatible if (format == VK_FORMAT_MAX_ENUM) { std::stringstream error_str; error_str << "Invalid image (" << image << ") in imageView (" << image_view << ")."; *error_msg = error_str.str(); return false; } // TODO : The various image aspect and format checks here are based on general spec language in 11.5 Image Views section under // vkCreateImageView(). What's the best way to create unique id for these cases? bool ds = FormatIsDepthOrStencil(format); switch (image_layout) { case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: // Only Color bit must be set if ((aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) != VK_IMAGE_ASPECT_COLOR_BIT) { std::stringstream error_str; error_str << "ImageView (" << image_view << ") uses layout VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL but does not have VK_IMAGE_ASPECT_COLOR_BIT set."; *error_msg = error_str.str(); return false; } // format must NOT be DS if (ds) { std::stringstream error_str; error_str << "ImageView (" << image_view << ") uses layout VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL but the image format is " << string_VkFormat(format) << " which is not a color format."; *error_msg = error_str.str(); return false; } break; case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL: // Depth or stencil bit must be set, but both must NOT be set if (aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) { if (aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) { // both must NOT be set std::stringstream error_str; error_str << "ImageView (" << image_view << ") has both STENCIL and DEPTH aspects set"; *error_msg = error_str.str(); return false; } } else if (!(aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT)) { // Neither were set std::stringstream error_str; error_str << "ImageView (" << image_view << ") has layout " << string_VkImageLayout(image_layout) << " but does not have STENCIL or DEPTH aspects set"; *error_msg = error_str.str(); return false; } // format must be DS if (!ds) { std::stringstream error_str; error_str << "ImageView (" << image_view << ") has layout " << string_VkImageLayout(image_layout) << " but the image format is " << string_VkFormat(format) << " which is not a depth/stencil format."; *error_msg = error_str.str(); return false; } break; default: // For other layouts if the source is depth/stencil image, both aspect bits must not be set if (ds) { if (aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) { if (aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) { // both must NOT be set std::stringstream error_str; error_str << "ImageView (" << image_view << ") has layout " << string_VkImageLayout(image_layout) << " and is using depth/stencil image of format " << string_VkFormat(format) << " but it has both STENCIL and DEPTH aspects set, which is illegal. When using a depth/stencil " "image in a descriptor set, please only set either VK_IMAGE_ASPECT_DEPTH_BIT or " "VK_IMAGE_ASPECT_STENCIL_BIT depending on whether it will be used for depth reads or stencil " "reads respectively."; *error_msg = error_str.str(); return false; } } } break; } // Now validate that usage flags are correctly set for given type of update // As we're switching per-type, if any type has specific layout requirements, check those here as well // TODO : The various image usage bit requirements are in general spec language for VkImageUsageFlags bit block in 11.3 Images // under vkCreateImage() // TODO : Need to also validate case "VUID-VkWriteDescriptorSet-descriptorType-00336" where STORAGE_IMAGE & INPUT_ATTACH types // must have been created with identify swizzle const char *error_usage_bit = nullptr; switch (type) { case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: { if (!(usage & VK_IMAGE_USAGE_SAMPLED_BIT)) { error_usage_bit = "VK_IMAGE_USAGE_SAMPLED_BIT"; } break; } case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: { if (!(usage & VK_IMAGE_USAGE_STORAGE_BIT)) { error_usage_bit = "VK_IMAGE_USAGE_STORAGE_BIT"; } else if (VK_IMAGE_LAYOUT_GENERAL != image_layout) { std::stringstream error_str; // TODO : Need to create custom enum error codes for these cases if (image_node->shared_presentable) { if (VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR != image_layout) { error_str << "ImageView (" << image_view << ") of VK_DESCRIPTOR_TYPE_STORAGE_IMAGE type with a front-buffered image is being updated with " "layout " << string_VkImageLayout(image_layout) << " but according to spec section 13.1 Descriptor Types, 'Front-buffered images that report " "support for VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT must be in the " "VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR layout.'"; *error_msg = error_str.str(); return false; } } else if (VK_IMAGE_LAYOUT_GENERAL != image_layout) { error_str << "ImageView (" << image_view << ") of VK_DESCRIPTOR_TYPE_STORAGE_IMAGE type is being updated with layout " << string_VkImageLayout(image_layout) << " but according to spec section 13.1 Descriptor Types, 'Load and store operations on storage " "images can only be done on images in VK_IMAGE_LAYOUT_GENERAL layout.'"; *error_msg = error_str.str(); return false; } } break; } case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: { if (!(usage & VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT)) { error_usage_bit = "VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT"; } break; } default: break; } if (error_usage_bit) { std::stringstream error_str; error_str << "ImageView (" << image_view << ") with usage mask 0x" << usage << " being used for a descriptor update of type " << string_VkDescriptorType(type) << " does not have " << error_usage_bit << " set."; *error_msg = error_str.str(); return false; } if ((type == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE) || (type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)) { // Test that the layout is compatible with the descriptorType for the two sampled image types const static std::array<VkImageLayout, 3> valid_layouts = { {VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL}}; struct ExtensionLayout { VkImageLayout layout; bool DeviceExtensions::*extension; }; const static std::array<ExtensionLayout, 3> extended_layouts{ {// Note double brace req'd for aggregate initialization {VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR, &DeviceExtensions::vk_khr_shared_presentable_image}, {VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL, &DeviceExtensions::vk_khr_maintenance2}, {VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL, &DeviceExtensions::vk_khr_maintenance2}}}; auto is_layout = [image_layout, dev_data](const ExtensionLayout &ext_layout) { return dev_data->device_extensions.*(ext_layout.extension) && (ext_layout.layout == image_layout); }; bool valid_layout = (std::find(valid_layouts.cbegin(), valid_layouts.cend(), image_layout) != valid_layouts.cend()) || std::any_of(extended_layouts.cbegin(), extended_layouts.cend(), is_layout); if (!valid_layout) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-01403"; std::stringstream error_str; error_str << "Descriptor update with descriptorType " << string_VkDescriptorType(type) << " is being updated with invalid imageLayout " << string_VkImageLayout(image_layout) << ". Allowed layouts are: VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL, " << "VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL"; for (auto &ext_layout : extended_layouts) { if (dev_data->device_extensions.*(ext_layout.extension)) { error_str << ", " << string_VkImageLayout(ext_layout.layout); } } *error_msg = error_str.str(); return false; } } return true; } void cvdescriptorset::SamplerDescriptor::WriteUpdate(const VkWriteDescriptorSet *update, const uint32_t index) { if (!immutable_) { sampler_ = update->pImageInfo[index].sampler; } updated = true; } void cvdescriptorset::SamplerDescriptor::CopyUpdate(const Descriptor *src) { if (!immutable_) { auto update_sampler = static_cast<const SamplerDescriptor *>(src)->sampler_; sampler_ = update_sampler; } updated = true; } void cvdescriptorset::SamplerDescriptor::UpdateDrawState(CoreChecks *dev_data, CMD_BUFFER_STATE *cb_node) { if (!immutable_) { auto sampler_state = dev_data->GetSamplerState(sampler_); if (sampler_state) dev_data->AddCommandBufferBindingSampler(cb_node, sampler_state); } } cvdescriptorset::ImageSamplerDescriptor::ImageSamplerDescriptor(const VkSampler *immut) : sampler_(VK_NULL_HANDLE), immutable_(false), image_view_(VK_NULL_HANDLE), image_layout_(VK_IMAGE_LAYOUT_UNDEFINED) { updated = false; descriptor_class = ImageSampler; if (immut) { sampler_ = *immut; immutable_ = true; } } void cvdescriptorset::ImageSamplerDescriptor::WriteUpdate(const VkWriteDescriptorSet *update, const uint32_t index) { updated = true; const auto &image_info = update->pImageInfo[index]; if (!immutable_) { sampler_ = image_info.sampler; } image_view_ = image_info.imageView; image_layout_ = image_info.imageLayout; } void cvdescriptorset::ImageSamplerDescriptor::CopyUpdate(const Descriptor *src) { if (!immutable_) { auto update_sampler = static_cast<const ImageSamplerDescriptor *>(src)->sampler_; sampler_ = update_sampler; } auto image_view = static_cast<const ImageSamplerDescriptor *>(src)->image_view_; auto image_layout = static_cast<const ImageSamplerDescriptor *>(src)->image_layout_; updated = true; image_view_ = image_view; image_layout_ = image_layout; } void cvdescriptorset::ImageSamplerDescriptor::UpdateDrawState(CoreChecks *dev_data, CMD_BUFFER_STATE *cb_node) { // First add binding for any non-immutable sampler if (!immutable_) { auto sampler_state = dev_data->GetSamplerState(sampler_); if (sampler_state) dev_data->AddCommandBufferBindingSampler(cb_node, sampler_state); } // Add binding for image auto iv_state = dev_data->GetImageViewState(image_view_); if (iv_state) { dev_data->AddCommandBufferBindingImageView(cb_node, iv_state); dev_data->SetImageViewInitialLayout(cb_node, *iv_state, image_layout_); } } cvdescriptorset::ImageDescriptor::ImageDescriptor(const VkDescriptorType type) : storage_(false), image_view_(VK_NULL_HANDLE), image_layout_(VK_IMAGE_LAYOUT_UNDEFINED) { updated = false; descriptor_class = Image; if (VK_DESCRIPTOR_TYPE_STORAGE_IMAGE == type) storage_ = true; } void cvdescriptorset::ImageDescriptor::WriteUpdate(const VkWriteDescriptorSet *update, const uint32_t index) { updated = true; const auto &image_info = update->pImageInfo[index]; image_view_ = image_info.imageView; image_layout_ = image_info.imageLayout; } void cvdescriptorset::ImageDescriptor::CopyUpdate(const Descriptor *src) { auto image_view = static_cast<const ImageDescriptor *>(src)->image_view_; auto image_layout = static_cast<const ImageDescriptor *>(src)->image_layout_; updated = true; image_view_ = image_view; image_layout_ = image_layout; } void cvdescriptorset::ImageDescriptor::UpdateDrawState(CoreChecks *dev_data, CMD_BUFFER_STATE *cb_node) { // Add binding for image auto iv_state = dev_data->GetImageViewState(image_view_); if (iv_state) { dev_data->AddCommandBufferBindingImageView(cb_node, iv_state); dev_data->SetImageViewInitialLayout(cb_node, *iv_state, image_layout_); } } cvdescriptorset::BufferDescriptor::BufferDescriptor(const VkDescriptorType type) : storage_(false), dynamic_(false), buffer_(VK_NULL_HANDLE), offset_(0), range_(0) { updated = false; descriptor_class = GeneralBuffer; if (VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC == type) { dynamic_ = true; } else if (VK_DESCRIPTOR_TYPE_STORAGE_BUFFER == type) { storage_ = true; } else if (VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC == type) { dynamic_ = true; storage_ = true; } } void cvdescriptorset::BufferDescriptor::WriteUpdate(const VkWriteDescriptorSet *update, const uint32_t index) { updated = true; const auto &buffer_info = update->pBufferInfo[index]; buffer_ = buffer_info.buffer; offset_ = buffer_info.offset; range_ = buffer_info.range; } void cvdescriptorset::BufferDescriptor::CopyUpdate(const Descriptor *src) { auto buff_desc = static_cast<const BufferDescriptor *>(src); updated = true; buffer_ = buff_desc->buffer_; offset_ = buff_desc->offset_; range_ = buff_desc->range_; } void cvdescriptorset::BufferDescriptor::UpdateDrawState(CoreChecks *dev_data, CMD_BUFFER_STATE *cb_node) { auto buffer_node = dev_data->GetBufferState(buffer_); if (buffer_node) dev_data->AddCommandBufferBindingBuffer(cb_node, buffer_node); } cvdescriptorset::TexelDescriptor::TexelDescriptor(const VkDescriptorType type) : buffer_view_(VK_NULL_HANDLE), storage_(false) { updated = false; descriptor_class = TexelBuffer; if (VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER == type) storage_ = true; } void cvdescriptorset::TexelDescriptor::WriteUpdate(const VkWriteDescriptorSet *update, const uint32_t index) { updated = true; buffer_view_ = update->pTexelBufferView[index]; } void cvdescriptorset::TexelDescriptor::CopyUpdate(const Descriptor *src) { updated = true; buffer_view_ = static_cast<const TexelDescriptor *>(src)->buffer_view_; } void cvdescriptorset::TexelDescriptor::UpdateDrawState(CoreChecks *dev_data, CMD_BUFFER_STATE *cb_node) { auto bv_state = dev_data->GetBufferViewState(buffer_view_); if (bv_state) { dev_data->AddCommandBufferBindingBufferView(cb_node, bv_state); } } // This is a helper function that iterates over a set of Write and Copy updates, pulls the DescriptorSet* for updated // sets, and then calls their respective Validate[Write|Copy]Update functions. // If the update hits an issue for which the callback returns "true", meaning that the call down the chain should // be skipped, then true is returned. // If there is no issue with the update, then false is returned. bool CoreChecks::ValidateUpdateDescriptorSets(uint32_t write_count, const VkWriteDescriptorSet *p_wds, uint32_t copy_count, const VkCopyDescriptorSet *p_cds, const char *func_name) { bool skip = false; // Validate Write updates for (uint32_t i = 0; i < write_count; i++) { auto dest_set = p_wds[i].dstSet; auto set_node = GetSetNode(dest_set); if (!set_node) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(dest_set), kVUID_Core_DrawState_InvalidDescriptorSet, "Cannot call %s on descriptor set %s that has not been allocated.", func_name, report_data->FormatHandle(dest_set).c_str()); } else { std::string error_code; std::string error_str; if (!ValidateWriteUpdate(set_node, report_data, &p_wds[i], func_name, &error_code, &error_str)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(dest_set), error_code, "%s failed write update validation for Descriptor Set %s with error: %s.", func_name, report_data->FormatHandle(dest_set).c_str(), error_str.c_str()); } } } // Now validate copy updates for (uint32_t i = 0; i < copy_count; ++i) { auto dst_set = p_cds[i].dstSet; auto src_set = p_cds[i].srcSet; auto src_node = GetSetNode(src_set); auto dst_node = GetSetNode(dst_set); // Object_tracker verifies that src & dest descriptor set are valid assert(src_node); assert(dst_node); std::string error_code; std::string error_str; if (!ValidateCopyUpdate(report_data, &p_cds[i], dst_node, src_node, func_name, &error_code, &error_str)) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT, HandleToUint64(dst_set), error_code, "%s failed copy update from Descriptor Set %s to Descriptor Set %s with error: %s.", func_name, report_data->FormatHandle(src_set).c_str(), report_data->FormatHandle(dst_set).c_str(), error_str.c_str()); } } return skip; } // This is a helper function that iterates over a set of Write and Copy updates, pulls the DescriptorSet* for updated // sets, and then calls their respective Perform[Write|Copy]Update functions. // Prerequisite : ValidateUpdateDescriptorSets() should be called and return "false" prior to calling PerformUpdateDescriptorSets() // with the same set of updates. // This is split from the validate code to allow validation prior to calling down the chain, and then update after // calling down the chain. void cvdescriptorset::PerformUpdateDescriptorSets(CoreChecks *dev_data, uint32_t write_count, const VkWriteDescriptorSet *p_wds, uint32_t copy_count, const VkCopyDescriptorSet *p_cds) { // Write updates first uint32_t i = 0; for (i = 0; i < write_count; ++i) { auto dest_set = p_wds[i].dstSet; auto set_node = dev_data->GetSetNode(dest_set); if (set_node) { set_node->PerformWriteUpdate(&p_wds[i]); } } // Now copy updates for (i = 0; i < copy_count; ++i) { auto dst_set = p_cds[i].dstSet; auto src_set = p_cds[i].srcSet; auto src_node = dev_data->GetSetNode(src_set); auto dst_node = dev_data->GetSetNode(dst_set); if (src_node && dst_node) { dst_node->PerformCopyUpdate(&p_cds[i], src_node); } } } cvdescriptorset::DecodedTemplateUpdate::DecodedTemplateUpdate(CoreChecks *device_data, VkDescriptorSet descriptorSet, const TEMPLATE_STATE *template_state, const void *pData, VkDescriptorSetLayout push_layout) { auto const &create_info = template_state->create_info; inline_infos.resize(create_info.descriptorUpdateEntryCount); // Make sure we have one if we need it desc_writes.reserve(create_info.descriptorUpdateEntryCount); // emplaced, so reserved without initialization VkDescriptorSetLayout effective_dsl = create_info.templateType == VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET ? create_info.descriptorSetLayout : push_layout; auto layout_obj = GetDescriptorSetLayout(device_data, effective_dsl); // Create a WriteDescriptorSet struct for each template update entry for (uint32_t i = 0; i < create_info.descriptorUpdateEntryCount; i++) { auto binding_count = layout_obj->GetDescriptorCountFromBinding(create_info.pDescriptorUpdateEntries[i].dstBinding); auto binding_being_updated = create_info.pDescriptorUpdateEntries[i].dstBinding; auto dst_array_element = create_info.pDescriptorUpdateEntries[i].dstArrayElement; desc_writes.reserve(desc_writes.size() + create_info.pDescriptorUpdateEntries[i].descriptorCount); for (uint32_t j = 0; j < create_info.pDescriptorUpdateEntries[i].descriptorCount; j++) { desc_writes.emplace_back(); auto &write_entry = desc_writes.back(); size_t offset = create_info.pDescriptorUpdateEntries[i].offset + j * create_info.pDescriptorUpdateEntries[i].stride; char *update_entry = (char *)(pData) + offset; if (dst_array_element >= binding_count) { dst_array_element = 0; binding_being_updated = layout_obj->GetNextValidBinding(binding_being_updated); } write_entry.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; write_entry.pNext = NULL; write_entry.dstSet = descriptorSet; write_entry.dstBinding = binding_being_updated; write_entry.dstArrayElement = dst_array_element; write_entry.descriptorCount = 1; write_entry.descriptorType = create_info.pDescriptorUpdateEntries[i].descriptorType; switch (create_info.pDescriptorUpdateEntries[i].descriptorType) { case VK_DESCRIPTOR_TYPE_SAMPLER: case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: write_entry.pImageInfo = reinterpret_cast<VkDescriptorImageInfo *>(update_entry); break; case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: write_entry.pBufferInfo = reinterpret_cast<VkDescriptorBufferInfo *>(update_entry); break; case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: write_entry.pTexelBufferView = reinterpret_cast<VkBufferView *>(update_entry); break; case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT: { VkWriteDescriptorSetInlineUniformBlockEXT *inline_info = &inline_infos[i]; inline_info->sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET_INLINE_UNIFORM_BLOCK_EXT; inline_info->pNext = nullptr; inline_info->dataSize = create_info.pDescriptorUpdateEntries[i].descriptorCount; inline_info->pData = update_entry; write_entry.pNext = inline_info; // skip the rest of the array, they just represent bytes in the update j = create_info.pDescriptorUpdateEntries[i].descriptorCount; break; } default: assert(0); break; } dst_array_element++; } } } // These helper functions carry out the validate and record descriptor updates peformed via update templates. They decode // the templatized data and leverage the non-template UpdateDescriptor helper functions. bool CoreChecks::ValidateUpdateDescriptorSetsWithTemplateKHR(VkDescriptorSet descriptorSet, const TEMPLATE_STATE *template_state, const void *pData) { // Translate the templated update into a normal update for validation... cvdescriptorset::DecodedTemplateUpdate decoded_update(this, descriptorSet, template_state, pData); return ValidateUpdateDescriptorSets(static_cast<uint32_t>(decoded_update.desc_writes.size()), decoded_update.desc_writes.data(), 0, NULL, "vkUpdateDescriptorSetWithTemplate()"); } void CoreChecks::PerformUpdateDescriptorSetsWithTemplateKHR(VkDescriptorSet descriptorSet, const TEMPLATE_STATE *template_state, const void *pData) { // Translate the templated update into a normal update for validation... cvdescriptorset::DecodedTemplateUpdate decoded_update(this, descriptorSet, template_state, pData); cvdescriptorset::PerformUpdateDescriptorSets(this, static_cast<uint32_t>(decoded_update.desc_writes.size()), decoded_update.desc_writes.data(), 0, NULL); } std::string cvdescriptorset::DescriptorSet::StringifySetAndLayout() const { std::string out; uint64_t layout_handle = HandleToUint64(p_layout_->GetDescriptorSetLayout()); if (IsPushDescriptor()) { string_sprintf(&out, "Push Descriptors defined with VkDescriptorSetLayout 0x%" PRIxLEAST64, layout_handle); } else { string_sprintf(&out, "VkDescriptorSet 0x%" PRIxLEAST64 " allocated with VkDescriptorSetLayout 0x%" PRIxLEAST64, HandleToUint64(set_), layout_handle); } return out; }; // Loop through the write updates to validate for a push descriptor set, ignoring dstSet bool cvdescriptorset::ValidatePushDescriptorsUpdate(const DescriptorSet *push_set, const debug_report_data *report_data, uint32_t write_count, const VkWriteDescriptorSet *p_wds, const char *func_name) { assert(push_set->IsPushDescriptor()); bool skip = false; for (uint32_t i = 0; i < write_count; i++) { std::string error_code; std::string error_str; if (!ValidateWriteUpdate(push_set, report_data, &p_wds[i], func_name, &error_code, &error_str)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, HandleToUint64(push_set->GetDescriptorSetLayout()), error_code, "%s failed update validation: %s.", func_name, error_str.c_str()); } } return skip; } // For the given buffer, verify that its creation parameters are appropriate for the given type // If there's an error, update the error_msg string with details and return false, else return true bool cvdescriptorset::ValidateBufferUsage(BUFFER_STATE const *buffer_node, VkDescriptorType type, std::string *error_code, std::string *error_msg) { // Verify that usage bits set correctly for given type auto usage = buffer_node->createInfo.usage; const char *error_usage_bit = nullptr; switch (type) { case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: if (!(usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT)) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00334"; error_usage_bit = "VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT"; } break; case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: if (!(usage & VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT)) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00335"; error_usage_bit = "VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT"; } break; case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: if (!(usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT)) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00330"; error_usage_bit = "VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT"; } break; case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: if (!(usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT)) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00331"; error_usage_bit = "VK_BUFFER_USAGE_STORAGE_BUFFER_BIT"; } break; default: break; } if (error_usage_bit) { std::stringstream error_str; error_str << "Buffer (" << buffer_node->buffer << ") with usage mask 0x" << usage << " being used for a descriptor update of type " << string_VkDescriptorType(type) << " does not have " << error_usage_bit << " set."; *error_msg = error_str.str(); return false; } return true; } // For buffer descriptor updates, verify the buffer usage and VkDescriptorBufferInfo struct which includes: // 1. buffer is valid // 2. buffer was created with correct usage flags // 3. offset is less than buffer size // 4. range is either VK_WHOLE_SIZE or falls in (0, (buffer size - offset)] // 5. range and offset are within the device's limits // If there's an error, update the error_msg string with details and return false, else return true bool cvdescriptorset::ValidateBufferUpdate(CoreChecks *device_data, VkDescriptorBufferInfo const *buffer_info, VkDescriptorType type, const char *func_name, std::string *error_code, std::string *error_msg) { // First make sure that buffer is valid auto buffer_node = device_data->GetBufferState(buffer_info->buffer); // Any invalid buffer should already be caught by object_tracker assert(buffer_node); if (device_data->ValidateMemoryIsBoundToBuffer(buffer_node, func_name, "VUID-VkWriteDescriptorSet-descriptorType-00329")) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00329"; *error_msg = "No memory bound to buffer."; return false; } // Verify usage bits if (!ValidateBufferUsage(buffer_node, type, error_code, error_msg)) { // error_msg will have been updated by ValidateBufferUsage() return false; } // offset must be less than buffer size if (buffer_info->offset >= buffer_node->createInfo.size) { *error_code = "VUID-VkDescriptorBufferInfo-offset-00340"; std::stringstream error_str; error_str << "VkDescriptorBufferInfo offset of " << buffer_info->offset << " is greater than or equal to buffer " << buffer_node->buffer << " size of " << buffer_node->createInfo.size; *error_msg = error_str.str(); return false; } if (buffer_info->range != VK_WHOLE_SIZE) { // Range must be VK_WHOLE_SIZE or > 0 if (!buffer_info->range) { *error_code = "VUID-VkDescriptorBufferInfo-range-00341"; std::stringstream error_str; error_str << "VkDescriptorBufferInfo range is not VK_WHOLE_SIZE and is zero, which is not allowed."; *error_msg = error_str.str(); return false; } // Range must be VK_WHOLE_SIZE or <= (buffer size - offset) if (buffer_info->range > (buffer_node->createInfo.size - buffer_info->offset)) { *error_code = "VUID-VkDescriptorBufferInfo-range-00342"; std::stringstream error_str; error_str << "VkDescriptorBufferInfo range is " << buffer_info->range << " which is greater than buffer size (" << buffer_node->createInfo.size << ") minus requested offset of " << buffer_info->offset; *error_msg = error_str.str(); return false; } } // Check buffer update sizes against device limits const auto &limits = device_data->phys_dev_props.limits; if (VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER == type || VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC == type) { auto max_ub_range = limits.maxUniformBufferRange; if (buffer_info->range != VK_WHOLE_SIZE && buffer_info->range > max_ub_range) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00332"; std::stringstream error_str; error_str << "VkDescriptorBufferInfo range is " << buffer_info->range << " which is greater than this device's maxUniformBufferRange (" << max_ub_range << ")"; *error_msg = error_str.str(); return false; } else if (buffer_info->range == VK_WHOLE_SIZE && (buffer_node->createInfo.size - buffer_info->offset) > max_ub_range) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00332"; std::stringstream error_str; error_str << "VkDescriptorBufferInfo range is VK_WHOLE_SIZE but effective range " << "(" << (buffer_node->createInfo.size - buffer_info->offset) << ") is greater than this device's " << "maxUniformBufferRange (" << max_ub_range << ")"; *error_msg = error_str.str(); return false; } } else if (VK_DESCRIPTOR_TYPE_STORAGE_BUFFER == type || VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC == type) { auto max_sb_range = limits.maxStorageBufferRange; if (buffer_info->range != VK_WHOLE_SIZE && buffer_info->range > max_sb_range) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00333"; std::stringstream error_str; error_str << "VkDescriptorBufferInfo range is " << buffer_info->range << " which is greater than this device's maxStorageBufferRange (" << max_sb_range << ")"; *error_msg = error_str.str(); return false; } else if (buffer_info->range == VK_WHOLE_SIZE && (buffer_node->createInfo.size - buffer_info->offset) > max_sb_range) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00333"; std::stringstream error_str; error_str << "VkDescriptorBufferInfo range is VK_WHOLE_SIZE but effective range " << "(" << (buffer_node->createInfo.size - buffer_info->offset) << ") is greater than this device's " << "maxStorageBufferRange (" << max_sb_range << ")"; *error_msg = error_str.str(); return false; } } return true; } // Verify that the contents of the update are ok, but don't perform actual update bool cvdescriptorset::VerifyCopyUpdateContents(CoreChecks *device_data, const VkCopyDescriptorSet *update, const DescriptorSet *src_set, VkDescriptorType type, uint32_t index, const char *func_name, std::string *error_code, std::string *error_msg) { // Note : Repurposing some Write update error codes here as specific details aren't called out for copy updates like they are // for write updates switch (src_set->GetDescriptorFromGlobalIndex(index)->descriptor_class) { case PlainSampler: { for (uint32_t di = 0; di < update->descriptorCount; ++di) { const auto src_desc = src_set->GetDescriptorFromGlobalIndex(index + di); if (!src_desc->updated) continue; if (!src_desc->IsImmutableSampler()) { auto update_sampler = static_cast<const SamplerDescriptor *>(src_desc)->GetSampler(); if (!ValidateSampler(update_sampler, device_data)) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00325"; std::stringstream error_str; error_str << "Attempted copy update to sampler descriptor with invalid sampler: " << update_sampler << "."; *error_msg = error_str.str(); return false; } } else { // TODO : Warn here } } break; } case ImageSampler: { for (uint32_t di = 0; di < update->descriptorCount; ++di) { const auto src_desc = src_set->GetDescriptorFromGlobalIndex(index + di); if (!src_desc->updated) continue; auto img_samp_desc = static_cast<const ImageSamplerDescriptor *>(src_desc); // First validate sampler if (!img_samp_desc->IsImmutableSampler()) { auto update_sampler = img_samp_desc->GetSampler(); if (!ValidateSampler(update_sampler, device_data)) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00325"; std::stringstream error_str; error_str << "Attempted copy update to sampler descriptor with invalid sampler: " << update_sampler << "."; *error_msg = error_str.str(); return false; } } else { // TODO : Warn here } // Validate image auto image_view = img_samp_desc->GetImageView(); auto image_layout = img_samp_desc->GetImageLayout(); if (!ValidateImageUpdate(image_view, image_layout, type, device_data, func_name, error_code, error_msg)) { std::stringstream error_str; error_str << "Attempted copy update to combined image sampler descriptor failed due to: " << error_msg->c_str(); *error_msg = error_str.str(); return false; } } break; } case Image: { for (uint32_t di = 0; di < update->descriptorCount; ++di) { const auto src_desc = src_set->GetDescriptorFromGlobalIndex(index + di); if (!src_desc->updated) continue; auto img_desc = static_cast<const ImageDescriptor *>(src_desc); auto image_view = img_desc->GetImageView(); auto image_layout = img_desc->GetImageLayout(); if (!ValidateImageUpdate(image_view, image_layout, type, device_data, func_name, error_code, error_msg)) { std::stringstream error_str; error_str << "Attempted copy update to image descriptor failed due to: " << error_msg->c_str(); *error_msg = error_str.str(); return false; } } break; } case TexelBuffer: { for (uint32_t di = 0; di < update->descriptorCount; ++di) { const auto src_desc = src_set->GetDescriptorFromGlobalIndex(index + di); if (!src_desc->updated) continue; auto buffer_view = static_cast<const TexelDescriptor *>(src_desc)->GetBufferView(); auto bv_state = device_data->GetBufferViewState(buffer_view); if (!bv_state) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00323"; std::stringstream error_str; error_str << "Attempted copy update to texel buffer descriptor with invalid buffer view: " << buffer_view; *error_msg = error_str.str(); return false; } auto buffer = bv_state->create_info.buffer; if (!ValidateBufferUsage(device_data->GetBufferState(buffer), type, error_code, error_msg)) { std::stringstream error_str; error_str << "Attempted copy update to texel buffer descriptor failed due to: " << error_msg->c_str(); *error_msg = error_str.str(); return false; } } break; } case GeneralBuffer: { for (uint32_t di = 0; di < update->descriptorCount; ++di) { const auto src_desc = src_set->GetDescriptorFromGlobalIndex(index + di); if (!src_desc->updated) continue; auto buffer = static_cast<const BufferDescriptor *>(src_desc)->GetBuffer(); if (!ValidateBufferUsage(device_data->GetBufferState(buffer), type, error_code, error_msg)) { std::stringstream error_str; error_str << "Attempted copy update to buffer descriptor failed due to: " << error_msg->c_str(); *error_msg = error_str.str(); return false; } } break; } case InlineUniform: case AccelerationStructure: break; default: assert(0); // We've already verified update type so should never get here break; } // All checks passed so update contents are good return true; } // Update the common AllocateDescriptorSetsData void CoreChecks::UpdateAllocateDescriptorSetsData(const VkDescriptorSetAllocateInfo *p_alloc_info, cvdescriptorset::AllocateDescriptorSetsData *ds_data) { for (uint32_t i = 0; i < p_alloc_info->descriptorSetCount; i++) { auto layout = GetDescriptorSetLayout(this, p_alloc_info->pSetLayouts[i]); if (layout) { ds_data->layout_nodes[i] = layout; // Count total descriptors required per type for (uint32_t j = 0; j < layout->GetBindingCount(); ++j) { const auto &binding_layout = layout->GetDescriptorSetLayoutBindingPtrFromIndex(j); uint32_t typeIndex = static_cast<uint32_t>(binding_layout->descriptorType); ds_data->required_descriptors_by_type[typeIndex] += binding_layout->descriptorCount; } } // Any unknown layouts will be flagged as errors during ValidateAllocateDescriptorSets() call } } // Verify that the state at allocate time is correct, but don't actually allocate the sets yet bool CoreChecks::ValidateAllocateDescriptorSets(const VkDescriptorSetAllocateInfo *p_alloc_info, const cvdescriptorset::AllocateDescriptorSetsData *ds_data) { bool skip = false; auto pool_state = GetDescriptorPoolState(p_alloc_info->descriptorPool); for (uint32_t i = 0; i < p_alloc_info->descriptorSetCount; i++) { auto layout = GetDescriptorSetLayout(this, p_alloc_info->pSetLayouts[i]); if (layout) { // nullptr layout indicates no valid layout handle for this device, validated/logged in object_tracker if (layout->IsPushDescriptor()) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, HandleToUint64(p_alloc_info->pSetLayouts[i]), "VUID-VkDescriptorSetAllocateInfo-pSetLayouts-00308", "Layout %s specified at pSetLayouts[%" PRIu32 "] in vkAllocateDescriptorSets() was created with invalid flag %s set.", report_data->FormatHandle(p_alloc_info->pSetLayouts[i]).c_str(), i, "VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR"); } if (layout->GetCreateFlags() & VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT_EXT && !(pool_state->createInfo.flags & VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT_EXT)) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, 0, "VUID-VkDescriptorSetAllocateInfo-pSetLayouts-03044", "Descriptor set layout create flags and pool create flags mismatch for index (%d)", i); } } } if (!device_extensions.vk_khr_maintenance1) { // Track number of descriptorSets allowable in this pool if (pool_state->availableSets < p_alloc_info->descriptorSetCount) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, HandleToUint64(pool_state->pool), "VUID-VkDescriptorSetAllocateInfo-descriptorSetCount-00306", "Unable to allocate %u descriptorSets from pool %s" ". This pool only has %d descriptorSets remaining.", p_alloc_info->descriptorSetCount, report_data->FormatHandle(pool_state->pool).c_str(), pool_state->availableSets); } // Determine whether descriptor counts are satisfiable for (auto it = ds_data->required_descriptors_by_type.begin(); it != ds_data->required_descriptors_by_type.end(); ++it) { if (ds_data->required_descriptors_by_type.at(it->first) > pool_state->availableDescriptorTypeCount[it->first]) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, HandleToUint64(pool_state->pool), "VUID-VkDescriptorSetAllocateInfo-descriptorPool-00307", "Unable to allocate %u descriptors of type %s from pool %s" ". This pool only has %d descriptors of this type remaining.", ds_data->required_descriptors_by_type.at(it->first), string_VkDescriptorType(VkDescriptorType(it->first)), report_data->FormatHandle(pool_state->pool).c_str(), pool_state->availableDescriptorTypeCount[it->first]); } } } const auto *count_allocate_info = lvl_find_in_chain<VkDescriptorSetVariableDescriptorCountAllocateInfoEXT>(p_alloc_info->pNext); if (count_allocate_info) { if (count_allocate_info->descriptorSetCount != 0 && count_allocate_info->descriptorSetCount != p_alloc_info->descriptorSetCount) { skip |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, 0, "VUID-VkDescriptorSetVariableDescriptorCountAllocateInfoEXT-descriptorSetCount-03045", "VkDescriptorSetAllocateInfo::descriptorSetCount (%d) != " "VkDescriptorSetVariableDescriptorCountAllocateInfoEXT::descriptorSetCount (%d)", p_alloc_info->descriptorSetCount, count_allocate_info->descriptorSetCount); } if (count_allocate_info->descriptorSetCount == p_alloc_info->descriptorSetCount) { for (uint32_t i = 0; i < p_alloc_info->descriptorSetCount; i++) { auto layout = GetDescriptorSetLayout(this, p_alloc_info->pSetLayouts[i]); if (count_allocate_info->pDescriptorCounts[i] > layout->GetDescriptorCountFromBinding(layout->GetMaxBinding())) { skip |= log_msg( report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, 0, "VUID-VkDescriptorSetVariableDescriptorCountAllocateInfoEXT-pSetLayouts-03046", "pDescriptorCounts[%d] = (%d), binding's descriptorCount = (%d)", i, count_allocate_info->pDescriptorCounts[i], layout->GetDescriptorCountFromBinding(layout->GetMaxBinding())); } } } } return skip; } // Decrement allocated sets from the pool and insert new sets into set_map void CoreChecks::PerformAllocateDescriptorSets(const VkDescriptorSetAllocateInfo *p_alloc_info, const VkDescriptorSet *descriptor_sets, const cvdescriptorset::AllocateDescriptorSetsData *ds_data) { auto pool_state = descriptorPoolMap[p_alloc_info->descriptorPool].get(); // Account for sets and individual descriptors allocated from pool pool_state->availableSets -= p_alloc_info->descriptorSetCount; for (auto it = ds_data->required_descriptors_by_type.begin(); it != ds_data->required_descriptors_by_type.end(); ++it) { pool_state->availableDescriptorTypeCount[it->first] -= ds_data->required_descriptors_by_type.at(it->first); } const auto *variable_count_info = lvl_find_in_chain<VkDescriptorSetVariableDescriptorCountAllocateInfoEXT>(p_alloc_info->pNext); bool variable_count_valid = variable_count_info && variable_count_info->descriptorSetCount == p_alloc_info->descriptorSetCount; // Create tracking object for each descriptor set; insert into global map and the pool's set. for (uint32_t i = 0; i < p_alloc_info->descriptorSetCount; i++) { uint32_t variable_count = variable_count_valid ? variable_count_info->pDescriptorCounts[i] : 0; std::unique_ptr<cvdescriptorset::DescriptorSet> new_ds(new cvdescriptorset::DescriptorSet( descriptor_sets[i], p_alloc_info->descriptorPool, ds_data->layout_nodes[i], variable_count, this)); pool_state->sets.insert(new_ds.get()); new_ds->in_use.store(0); setMap[descriptor_sets[i]] = std::move(new_ds); } } cvdescriptorset::PrefilterBindRequestMap::PrefilterBindRequestMap(cvdescriptorset::DescriptorSet &ds, const BindingReqMap &in_map, CMD_BUFFER_STATE *cb_state) : filtered_map_(), orig_map_(in_map) { if (ds.GetTotalDescriptorCount() > kManyDescriptors_) { filtered_map_.reset(new std::map<uint32_t, descriptor_req>()); ds.FilterAndTrackBindingReqs(cb_state, orig_map_, filtered_map_.get()); } } cvdescriptorset::PrefilterBindRequestMap::PrefilterBindRequestMap(cvdescriptorset::DescriptorSet &ds, const BindingReqMap &in_map, CMD_BUFFER_STATE *cb_state, PIPELINE_STATE *pipeline) : filtered_map_(), orig_map_(in_map) { if (ds.GetTotalDescriptorCount() > kManyDescriptors_) { filtered_map_.reset(new std::map<uint32_t, descriptor_req>()); ds.FilterAndTrackBindingReqs(cb_state, pipeline, orig_map_, filtered_map_.get()); } } // Starting at offset descriptor of given binding, parse over update_count // descriptor updates and verify that for any binding boundaries that are crossed, the next binding(s) are all consistent // Consistency means that their type, stage flags, and whether or not they use immutable samplers matches // If so, return true. If not, fill in error_msg and return false bool cvdescriptorset::VerifyUpdateConsistency(DescriptorSetLayout::ConstBindingIterator current_binding, uint32_t offset, uint32_t update_count, const char *type, const VkDescriptorSet set, std::string *error_msg) { // Verify consecutive bindings match (if needed) auto orig_binding = current_binding; // Track count of descriptors in the current_bindings that are remaining to be updated auto binding_remaining = current_binding.GetDescriptorCount(); // First, it's legal to offset beyond your own binding so handle that case // Really this is just searching for the binding in which the update begins and adjusting offset accordingly while (offset >= binding_remaining && !current_binding.AtEnd()) { // Advance to next binding, decrement offset by binding size offset -= binding_remaining; ++current_binding; binding_remaining = current_binding.GetDescriptorCount(); // Accessors are safe if AtEnd } assert(!current_binding.AtEnd()); // As written assumes range check has been made before calling binding_remaining -= offset; while (update_count > binding_remaining) { // While our updates overstep current binding // Verify next consecutive binding matches type, stage flags & immutable sampler use auto next_binding = current_binding.Next(); if (!current_binding.IsConsistent(next_binding)) { std::stringstream error_str; error_str << "Attempting " << type; if (current_binding.Layout()->IsPushDescriptor()) { error_str << " push descriptors"; } else { error_str << " descriptor set " << set; } error_str << " binding #" << orig_binding.Binding() << " with #" << update_count << " descriptors being updated but this update oversteps the bounds of this binding and the next binding is " "not consistent with current binding so this update is invalid."; *error_msg = error_str.str(); return false; } current_binding = next_binding; // For sake of this check consider the bindings updated and grab count for next binding update_count -= binding_remaining; binding_remaining = current_binding.GetDescriptorCount(); } return true; } // Validate the state for a given write update but don't actually perform the update // If an error would occur for this update, return false and fill in details in error_msg string bool cvdescriptorset::ValidateWriteUpdate(const DescriptorSet *dest_set, const debug_report_data *report_data, const VkWriteDescriptorSet *update, const char *func_name, std::string *error_code, std::string *error_msg) { const auto dest_layout = dest_set->GetLayout(); // Verify dst layout still valid if (dest_layout->IsDestroyed()) { *error_code = "VUID-VkWriteDescriptorSet-dstSet-00320"; string_sprintf(error_msg, "Cannot call %s to perform write update on %s which has been destroyed", func_name, dest_set->StringifySetAndLayout().c_str()); return false; } // Verify dst binding exists if (!dest_layout->HasBinding(update->dstBinding)) { *error_code = "VUID-VkWriteDescriptorSet-dstBinding-00315"; std::stringstream error_str; error_str << dest_set->StringifySetAndLayout() << " does not have binding " << update->dstBinding; *error_msg = error_str.str(); return false; } DescriptorSetLayout::ConstBindingIterator dest(dest_layout.get(), update->dstBinding); // Make sure binding isn't empty if (0 == dest.GetDescriptorCount()) { *error_code = "VUID-VkWriteDescriptorSet-dstBinding-00316"; std::stringstream error_str; error_str << dest_set->StringifySetAndLayout() << " cannot updated binding " << update->dstBinding << " that has 0 descriptors"; *error_msg = error_str.str(); return false; } // Verify idle ds if (dest_set->in_use.load() && !(dest.GetDescriptorBindingFlags() & (VK_DESCRIPTOR_BINDING_UPDATE_UNUSED_WHILE_PENDING_BIT_EXT | VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT_EXT))) { // TODO : Re-using Free Idle error code, need write update idle error code *error_code = "VUID-vkFreeDescriptorSets-pDescriptorSets-00309"; std::stringstream error_str; error_str << "Cannot call " << func_name << " to perform write update on " << dest_set->StringifySetAndLayout() << " that is in use by a command buffer"; *error_msg = error_str.str(); return false; } // We know that binding is valid, verify update and do update on each descriptor auto start_idx = dest.GetGlobalIndexRange().start + update->dstArrayElement; auto type = dest.GetType(); if (type != update->descriptorType) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00319"; std::stringstream error_str; error_str << "Attempting write update to " << dest_set->StringifySetAndLayout() << " binding #" << update->dstBinding << " with type " << string_VkDescriptorType(type) << " but update type is " << string_VkDescriptorType(update->descriptorType); *error_msg = error_str.str(); return false; } auto total_descriptors = dest_layout->GetTotalDescriptorCount(); if (update->descriptorCount > (total_descriptors - start_idx)) { *error_code = "VUID-VkWriteDescriptorSet-dstArrayElement-00321"; std::stringstream error_str; error_str << "Attempting write update to " << dest_set->StringifySetAndLayout() << " binding #" << update->dstBinding << " with " << total_descriptors - start_idx << " descriptors in that binding and all successive bindings of the set, but update of " << update->descriptorCount << " descriptors combined with update array element offset of " << update->dstArrayElement << " oversteps the available number of consecutive descriptors"; *error_msg = error_str.str(); return false; } if (type == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) { if ((update->dstArrayElement % 4) != 0) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-02219"; std::stringstream error_str; error_str << "Attempting write update to " << dest_set->StringifySetAndLayout() << " binding #" << update->dstBinding << " with " << "dstArrayElement " << update->dstArrayElement << " not a multiple of 4"; *error_msg = error_str.str(); return false; } if ((update->descriptorCount % 4) != 0) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-02220"; std::stringstream error_str; error_str << "Attempting write update to " << dest_set->StringifySetAndLayout() << " binding #" << update->dstBinding << " with " << "descriptorCount " << update->descriptorCount << " not a multiple of 4"; *error_msg = error_str.str(); return false; } const auto *write_inline_info = lvl_find_in_chain<VkWriteDescriptorSetInlineUniformBlockEXT>(update->pNext); if (!write_inline_info || write_inline_info->dataSize != update->descriptorCount) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-02221"; std::stringstream error_str; if (!write_inline_info) { error_str << "Attempting write update to " << dest_set->StringifySetAndLayout() << " binding #" << update->dstBinding << " with " << "VkWriteDescriptorSetInlineUniformBlockEXT missing"; } else { error_str << "Attempting write update to " << dest_set->StringifySetAndLayout() << " binding #" << update->dstBinding << " with " << "VkWriteDescriptorSetInlineUniformBlockEXT dataSize " << write_inline_info->dataSize << " not equal to " << "VkWriteDescriptorSet descriptorCount " << update->descriptorCount; } *error_msg = error_str.str(); return false; } // This error is probably unreachable due to the previous two errors if (write_inline_info && (write_inline_info->dataSize % 4) != 0) { *error_code = "VUID-VkWriteDescriptorSetInlineUniformBlockEXT-dataSize-02222"; std::stringstream error_str; error_str << "Attempting write update to " << dest_set->StringifySetAndLayout() << " binding #" << update->dstBinding << " with " << "VkWriteDescriptorSetInlineUniformBlockEXT dataSize " << write_inline_info->dataSize << " not a multiple of 4"; *error_msg = error_str.str(); return false; } } // Verify consecutive bindings match (if needed) if (!VerifyUpdateConsistency(DescriptorSetLayout::ConstBindingIterator(dest_layout.get(), update->dstBinding), update->dstArrayElement, update->descriptorCount, "write update to", dest_set->GetSet(), error_msg)) { // TODO : Should break out "consecutive binding updates" language into valid usage statements *error_code = "VUID-VkWriteDescriptorSet-dstArrayElement-00321"; return false; } // Update is within bounds and consistent so last step is to validate update contents if (!VerifyWriteUpdateContents(dest_set, update, start_idx, func_name, error_code, error_msg)) { std::stringstream error_str; error_str << "Write update to " << dest_set->StringifySetAndLayout() << " binding #" << update->dstBinding << " failed with error message: " << error_msg->c_str(); *error_msg = error_str.str(); return false; } // All checks passed, update is clean return true; } // Verify that the contents of the update are ok, but don't perform actual update bool cvdescriptorset::VerifyWriteUpdateContents(const DescriptorSet *dest_set, const VkWriteDescriptorSet *update, const uint32_t index, const char *func_name, std::string *error_code, std::string *error_msg) { auto *device_data = dest_set->GetDeviceData(); switch (update->descriptorType) { case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: { for (uint32_t di = 0; di < update->descriptorCount; ++di) { // Validate image auto image_view = update->pImageInfo[di].imageView; auto image_layout = update->pImageInfo[di].imageLayout; if (!ValidateImageUpdate(image_view, image_layout, update->descriptorType, device_data, func_name, error_code, error_msg)) { std::stringstream error_str; error_str << "Attempted write update to combined image sampler descriptor failed due to: " << error_msg->c_str(); *error_msg = error_str.str(); return false; } if (device_data->device_extensions.vk_khr_sampler_ycbcr_conversion) { ImageSamplerDescriptor *desc = (ImageSamplerDescriptor *)dest_set->GetDescriptorFromGlobalIndex(index + di); if (desc->IsImmutableSampler()) { auto sampler_state = device_data->GetSamplerState(desc->GetSampler()); auto iv_state = device_data->GetImageViewState(image_view); if (iv_state && sampler_state) { if (iv_state->samplerConversion != sampler_state->samplerConversion) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-01948"; std::stringstream error_str; error_str << "Attempted write update to combined image sampler and image view and sampler ycbcr " "conversions are not identical, sampler: " << desc->GetSampler() << " image view: " << iv_state->image_view << "."; *error_msg = error_str.str(); return false; } } } else { auto iv_state = device_data->GetImageViewState(image_view); if (iv_state && (iv_state->samplerConversion != VK_NULL_HANDLE)) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-01947"; std::stringstream error_str; error_str << "Because dstSet (" << update->dstSet << ") is bound to image view (" << iv_state->image_view << ") that includes a YCBCR conversion, it must have been allocated with a layout that " "includes an immutable sampler."; *error_msg = error_str.str(); return false; } } } } } // fall through case VK_DESCRIPTOR_TYPE_SAMPLER: { for (uint32_t di = 0; di < update->descriptorCount; ++di) { SamplerDescriptor *desc = (SamplerDescriptor *)dest_set->GetDescriptorFromGlobalIndex(index + di); if (!desc->IsImmutableSampler()) { if (!ValidateSampler(update->pImageInfo[di].sampler, device_data)) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00325"; std::stringstream error_str; error_str << "Attempted write update to sampler descriptor with invalid sampler: " << update->pImageInfo[di].sampler << "."; *error_msg = error_str.str(); return false; } } else { // TODO : Warn here } } break; } case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: { for (uint32_t di = 0; di < update->descriptorCount; ++di) { auto image_view = update->pImageInfo[di].imageView; auto image_layout = update->pImageInfo[di].imageLayout; if (!ValidateImageUpdate(image_view, image_layout, update->descriptorType, device_data, func_name, error_code, error_msg)) { std::stringstream error_str; error_str << "Attempted write update to image descriptor failed due to: " << error_msg->c_str(); *error_msg = error_str.str(); return false; } } break; } case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: { for (uint32_t di = 0; di < update->descriptorCount; ++di) { auto buffer_view = update->pTexelBufferView[di]; auto bv_state = device_data->GetBufferViewState(buffer_view); if (!bv_state) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00323"; std::stringstream error_str; error_str << "Attempted write update to texel buffer descriptor with invalid buffer view: " << buffer_view; *error_msg = error_str.str(); return false; } auto buffer = bv_state->create_info.buffer; auto buffer_state = device_data->GetBufferState(buffer); // Verify that buffer underlying the view hasn't been destroyed prematurely if (!buffer_state) { *error_code = "VUID-VkWriteDescriptorSet-descriptorType-00323"; std::stringstream error_str; error_str << "Attempted write update to texel buffer descriptor failed because underlying buffer (" << buffer << ") has been destroyed: " << error_msg->c_str(); *error_msg = error_str.str(); return false; } else if (!ValidateBufferUsage(buffer_state, update->descriptorType, error_code, error_msg)) { std::stringstream error_str; error_str << "Attempted write update to texel buffer descriptor failed due to: " << error_msg->c_str(); *error_msg = error_str.str(); return false; } } break; } case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: { for (uint32_t di = 0; di < update->descriptorCount; ++di) { if (!ValidateBufferUpdate(device_data, update->pBufferInfo + di, update->descriptorType, func_name, error_code, error_msg)) { std::stringstream error_str; error_str << "Attempted write update to buffer descriptor failed due to: " << error_msg->c_str(); *error_msg = error_str.str(); return false; } } break; } case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT: break; case VK_DESCRIPTOR_TYPE_ACCELERATION_STRUCTURE_NV: // XXX TODO break; default: assert(0); // We've already verified update type so should never get here break; } // All checks passed so update contents are good return true; }
1
10,966
doing this as a binding map was insane, but with the ConstBindingIterator to hide the binding->index gunk and avoid the hashes during access it sticks out.
KhronosGroup-Vulkan-ValidationLayers
cpp
@@ -27,6 +27,8 @@ TEST_EVENT_PATTERN = { 'Detail': [EVENT_DETAIL] } +TEST_EVENT_PATTERN_1 = {'detail': {'EventType': ['0', '1']}} + class EventsTest(unittest.TestCase): def setUp(self):
1
# -*- coding: utf-8 -*- import os import json import uuid import unittest from localstack import config from localstack.utils import testutil from localstack.utils.aws import aws_stack from localstack.utils.common import ( load_file, retry, short_uid, get_free_tcp_port, wait_for_port_open, to_str, get_service_protocol ) from localstack.services.infra import start_proxy from localstack.utils.testutil import check_expected_lambda_log_events_length from localstack.services.generic_proxy import ProxyListener from localstack.services.events.events_listener import EVENTS_TMP_DIR from localstack.services.awslambda.lambda_utils import LAMBDA_RUNTIME_PYTHON36 THIS_FOLDER = os.path.dirname(os.path.realpath(__file__)) TEST_EVENT_BUS_NAME = 'command-bus-dev' EVENT_DETAIL = '{\"command\":\"update-account\",\"payload\":{\"acc_id\":\"0a787ecb-4015\",\"sf_id\":\"baz\"}}' TEST_EVENT_PATTERN = { 'Source': ['core.update-account-command'], 'detail-type': ['core.update-account-command'], 'Detail': [EVENT_DETAIL] } class EventsTest(unittest.TestCase): def setUp(self): self.events_client = aws_stack.connect_to_service('events') self.iam_client = aws_stack.connect_to_service('iam') self.sns_client = aws_stack.connect_to_service('sns') self.sfn_client = aws_stack.connect_to_service('stepfunctions') self.sqs_client = aws_stack.connect_to_service('sqs') def assertIsValidEvent(self, event): expected_fields = ('version', 'id', 'detail-type', 'source', 'account', 'time', 'region', 'resources', 'detail') for field in expected_fields: self.assertIn(field, event) def test_put_rule(self): rule_name = 'rule-{}'.format(short_uid()) self.events_client.put_rule(Name=rule_name, EventPattern=json.dumps(TEST_EVENT_PATTERN)) rules = self.events_client.list_rules(NamePrefix=rule_name)['Rules'] self.assertEqual(1, len(rules)) self.assertEqual(TEST_EVENT_PATTERN, json.loads(rules[0]['EventPattern'])) # clean up self.events_client.delete_rule(Name=rule_name, Force=True) def test_events_written_to_disk_are_timestamp_prefixed_for_chronological_ordering(self): event_type = str(uuid.uuid4()) event_details_to_publish = list(map(lambda n: 'event %s' % n, range(10))) for detail in event_details_to_publish: self.events_client.put_events(Entries=[{ 'Source': 'unittest', 'Resources': [], 'DetailType': event_type, 'Detail': json.dumps(detail) }]) sorted_events_written_to_disk = map( lambda filename: json.loads(str(load_file(os.path.join(EVENTS_TMP_DIR, filename)))), sorted(os.listdir(EVENTS_TMP_DIR)) ) sorted_events = list(filter(lambda event: event['DetailType'] == event_type, sorted_events_written_to_disk)) self.assertListEqual(event_details_to_publish, list(map(lambda event: json.loads(event['Detail']), sorted_events))) def test_list_tags_for_resource(self): rule_name = 'rule-{}'.format(short_uid()) rule = self.events_client.put_rule( Name=rule_name, EventPattern=json.dumps(TEST_EVENT_PATTERN) ) rule_arn = rule['RuleArn'] expected = [{'Key': 'key1', 'Value': 'value1'}, {'Key': 'key2', 'Value': 'value2'}] # insert two tags, verify both are visible self.events_client.tag_resource(ResourceARN=rule_arn, Tags=expected) actual = self.events_client.list_tags_for_resource(ResourceARN=rule_arn)['Tags'] self.assertEqual(expected, actual) # remove 'key2', verify only 'key1' remains expected = [{'Key': 'key1', 'Value': 'value1'}] self.events_client.untag_resource(ResourceARN=rule_arn, TagKeys=['key2']) actual = self.events_client.list_tags_for_resource(ResourceARN=rule_arn)['Tags'] self.assertEqual(expected, actual) # clean up self.events_client.delete_rule(Name=rule_name, Force=True) def test_put_events_with_target_sqs(self): queue_name = 'queue-{}'.format(short_uid()) rule_name = 'rule-{}'.format(short_uid()) target_id = 'target-{}'.format(short_uid()) bus_name = 'bus-{}'.format(short_uid()) sqs_client = aws_stack.connect_to_service('sqs') queue_url = sqs_client.create_queue(QueueName=queue_name)['QueueUrl'] queue_arn = aws_stack.sqs_queue_arn(queue_name) self.events_client.create_event_bus(Name=bus_name) self.events_client.put_rule( Name=rule_name, EventBusName=bus_name, EventPattern=json.dumps(TEST_EVENT_PATTERN) ) rs = self.events_client.put_targets( Rule=rule_name, EventBusName=bus_name, Targets=[{ 'Id': target_id, 'Arn': queue_arn }] ) self.assertIn('FailedEntryCount', rs) self.assertIn('FailedEntries', rs) self.assertEqual(rs['FailedEntryCount'], 0) self.assertEqual(rs['FailedEntries'], []) self.events_client.put_events( Entries=[{ 'EventBusName': bus_name, 'Source': TEST_EVENT_PATTERN['Source'][0], 'DetailType': TEST_EVENT_PATTERN['detail-type'][0], 'Detail': json.dumps(TEST_EVENT_PATTERN['Detail'][0]) }] ) def get_message(queue_url): resp = sqs_client.receive_message(QueueUrl=queue_url) return resp['Messages'] messages = retry(get_message, retries=3, sleep=1, queue_url=queue_url) self.assertEqual(len(messages), 1) actual_event = json.loads(messages[0]['Body']) self.assertIsValidEvent(actual_event) self.assertEqual(actual_event['detail'], TEST_EVENT_PATTERN['Detail'][0]) # clean up self.cleanup(bus_name, rule_name, target_id, queue_url=queue_url) def test_put_events_with_target_sns(self): queue_name = 'test-%s' % short_uid() rule_name = 'rule-{}'.format(short_uid()) target_id = 'target-{}'.format(short_uid()) bus_name = 'bus-{}'.format(short_uid()) sns_client = aws_stack.connect_to_service('sns') sqs_client = aws_stack.connect_to_service('sqs') topic_name = 'topic-{}'.format(short_uid()) topic_arn = sns_client.create_topic(Name=topic_name)['TopicArn'] queue_url = sqs_client.create_queue(QueueName=queue_name)['QueueUrl'] queue_arn = aws_stack.sqs_queue_arn(queue_name) sns_client.subscribe(TopicArn=topic_arn, Protocol='sqs', Endpoint=queue_arn) self.events_client.create_event_bus(Name=bus_name) self.events_client.put_rule( Name=rule_name, EventBusName=bus_name, EventPattern=json.dumps(TEST_EVENT_PATTERN) ) rs = self.events_client.put_targets( Rule=rule_name, EventBusName=bus_name, Targets=[ { 'Id': target_id, 'Arn': topic_arn } ] ) self.assertIn('FailedEntryCount', rs) self.assertIn('FailedEntries', rs) self.assertEqual(rs['FailedEntryCount'], 0) self.assertEqual(rs['FailedEntries'], []) self.events_client.put_events( Entries=[{ 'EventBusName': bus_name, 'Source': TEST_EVENT_PATTERN['Source'][0], 'DetailType': TEST_EVENT_PATTERN['detail-type'][0], 'Detail': json.dumps(TEST_EVENT_PATTERN['Detail'][0]) }] ) def get_message(queue_url): resp = sqs_client.receive_message(QueueUrl=queue_url) return resp['Messages'] messages = retry(get_message, retries=3, sleep=1, queue_url=queue_url) self.assertEqual(len(messages), 1) actual_event = json.loads(messages[0]['Body']).get('Message') self.assertIsValidEvent(actual_event) self.assertEqual(json.loads(actual_event).get('detail'), TEST_EVENT_PATTERN['Detail'][0]) # clean up sns_client.delete_topic(TopicArn=topic_arn) self.cleanup(bus_name, rule_name, target_id, queue_url=queue_url) def test_put_events_into_event_bus(self): queue_name = 'queue-{}'.format(short_uid()) rule_name = 'rule-{}'.format(short_uid()) target_id = 'target-{}'.format(short_uid()) bus_name_1 = 'bus1-{}'.format(short_uid()) bus_name_2 = 'bus2-{}'.format(short_uid()) sqs_client = aws_stack.connect_to_service('sqs') queue_url = sqs_client.create_queue(QueueName=queue_name)['QueueUrl'] queue_arn = aws_stack.sqs_queue_arn(queue_name) self.events_client.create_event_bus(Name=bus_name_1) resp = self.events_client.create_event_bus(Name=bus_name_2) self.events_client.put_rule(Name=rule_name, EventBusName=bus_name_1,) self.events_client.put_targets( Rule=rule_name, EventBusName=bus_name_1, Targets=[{ 'Id': target_id, 'Arn': resp.get('EventBusArn') }] ) self.events_client.put_targets( Rule=rule_name, EventBusName=bus_name_2, Targets=[{ 'Id': target_id, 'Arn': queue_arn }] ) self.events_client.put_events( Entries=[{ 'EventBusName': bus_name_1, 'Source': TEST_EVENT_PATTERN['Source'][0], 'DetailType': TEST_EVENT_PATTERN['detail-type'][0], 'Detail': json.dumps(TEST_EVENT_PATTERN['Detail'][0]) }] ) def get_message(queue_url): resp = sqs_client.receive_message(QueueUrl=queue_url) return resp['Messages'] messages = retry(get_message, retries=3, sleep=1, queue_url=queue_url) self.assertEqual(len(messages), 1) actual_event = json.loads(messages[0]['Body']) self.assertIsValidEvent(actual_event) self.assertEqual(actual_event['detail'], TEST_EVENT_PATTERN['Detail'][0]) # clean up self.cleanup(bus_name_1, rule_name, target_id) self.cleanup(bus_name_2) sqs_client.delete_queue(QueueUrl=queue_url) def test_put_events_with_target_lambda(self): rule_name = 'rule-{}'.format(short_uid()) function_name = 'lambda-func-{}'.format(short_uid()) target_id = 'target-{}'.format(short_uid()) bus_name = 'bus-{}'.format(short_uid()) handler_file = os.path.join(THIS_FOLDER, 'lambdas', 'lambda_echo.py') rs = testutil.create_lambda_function( handler_file=handler_file, func_name=function_name, runtime=LAMBDA_RUNTIME_PYTHON36) func_arn = rs['CreateFunctionResponse']['FunctionArn'] self.events_client.create_event_bus(Name=bus_name) self.events_client.put_rule( Name=rule_name, EventBusName=bus_name, EventPattern=json.dumps(TEST_EVENT_PATTERN) ) rs = self.events_client.put_targets( Rule=rule_name, EventBusName=bus_name, Targets=[{ 'Id': target_id, 'Arn': func_arn }] ) self.assertIn('FailedEntryCount', rs) self.assertIn('FailedEntries', rs) self.assertEqual(rs['FailedEntryCount'], 0) self.assertEqual(rs['FailedEntries'], []) self.events_client.put_events( Entries=[{ 'EventBusName': bus_name, 'Source': TEST_EVENT_PATTERN['Source'][0], 'DetailType': TEST_EVENT_PATTERN['detail-type'][0], 'Detail': json.dumps(TEST_EVENT_PATTERN['Detail'][0]) }] ) # Get lambda's log events events = retry(check_expected_lambda_log_events_length, retries=3, sleep=1, function_name=function_name, expected_length=1) actual_event = events[0] self.assertIsValidEvent(actual_event) self.assertDictEqual(json.loads(actual_event['detail']), json.loads(TEST_EVENT_PATTERN['Detail'][0])) # clean up testutil.delete_lambda_function(function_name) self.cleanup(bus_name, rule_name, target_id) def test_rule_disable(self): rule_name = 'rule-{}'.format(short_uid()) self.events_client.put_rule(Name=rule_name, ScheduleExpression='rate(1 minutes)') response = self.events_client.list_rules() self.assertEqual(response['Rules'][0]['State'], 'ENABLED') response = self.events_client.disable_rule(Name=rule_name) response = self.events_client.list_rules(NamePrefix=rule_name) self.assertEqual(response['Rules'][0]['State'], 'DISABLED') # clean up self.events_client.delete_rule(Name=rule_name, Force=True) def test_scheduled_expression_events(self): class HttpEndpointListener(ProxyListener): def forward_request(self, method, path, data, headers): event = json.loads(to_str(data)) events.append(event) return 200 local_port = get_free_tcp_port() proxy = start_proxy(local_port, update_listener=HttpEndpointListener()) wait_for_port_open(local_port) topic_name = 'topic-{}'.format(short_uid()) queue_name = 'queue-{}'.format(short_uid()) fifo_queue_name = 'queue-{}.fifo'.format(short_uid()) rule_name = 'rule-{}'.format(short_uid()) endpoint = '{}://{}:{}'.format(get_service_protocol(), config.LOCALSTACK_HOSTNAME, local_port) sm_role_arn = aws_stack.role_arn('sfn_role') sm_name = 'state-machine-{}'.format(short_uid()) topic_target_id = 'target-{}'.format(short_uid()) sm_target_id = 'target-{}'.format(short_uid()) queue_target_id = 'target-{}'.format(short_uid()) fifo_queue_target_id = 'target-{}'.format(short_uid()) events = [] state_machine_definition = """ { "StartAt": "Hello", "States": { "Hello": { "Type": "Pass", "Result": "World", "End": true } } } """ state_machine_arn = self.sfn_client.create_state_machine( name=sm_name, definition=state_machine_definition, roleArn=sm_role_arn)['stateMachineArn'] topic_arn = self.sns_client.create_topic(Name=topic_name)['TopicArn'] self.sns_client.subscribe(TopicArn=topic_arn, Protocol='http', Endpoint=endpoint) queue_url = self.sqs_client.create_queue(QueueName=queue_name)['QueueUrl'] fifo_queue_url = self.sqs_client.create_queue( QueueName=fifo_queue_name, Attributes={'FifoQueue': 'true'})['QueueUrl'] queue_arn = aws_stack.sqs_queue_arn(queue_name) fifo_queue_arn = aws_stack.sqs_queue_arn(fifo_queue_name) event = { 'env': 'testing' } self.events_client.put_rule(Name=rule_name, ScheduleExpression='rate(1 minutes)') self.events_client.put_targets( Rule=rule_name, Targets=[{ 'Id': topic_target_id, 'Arn': topic_arn, 'Input': json.dumps(event) }, { 'Id': sm_target_id, 'Arn': state_machine_arn, 'Input': json.dumps(event) }, { 'Id': queue_target_id, 'Arn': queue_arn, 'Input': json.dumps(event) }, { 'Id': fifo_queue_target_id, 'Arn': fifo_queue_arn, 'Input': json.dumps(event), 'SqsParameters': { 'MessageGroupId': '123' } }] ) def received(q_urls): # state machine got executed executions = self.sfn_client.list_executions(stateMachineArn=state_machine_arn)['executions'] self.assertGreaterEqual(len(executions), 1) # http endpoint got events self.assertGreaterEqual(len(events), 2) notifications = [event['Message'] for event in events if event['Type'] == 'Notification'] self.assertGreaterEqual(len(notifications), 1) # get state machine execution detail execution_arn = executions[0]['executionArn'] execution_input = self.sfn_client.describe_execution(executionArn=execution_arn)['input'] all_msgs = [] # get message from queue for url in q_urls: msgs = self.sqs_client.receive_message(QueueUrl=url).get('Messages', []) self.assertGreaterEqual(len(msgs), 1) all_msgs.append(msgs[0]) return execution_input, notifications[0], all_msgs execution_input, notification, msgs_received = retry( received, retries=5, sleep=15, q_urls=[queue_url, fifo_queue_url] ) self.assertEqual(json.loads(notification), event) self.assertEqual(json.loads(execution_input), event) for msg_received in msgs_received: self.assertEqual(json.loads(msg_received['Body']), event) # clean up proxy.stop() self.cleanup(None, rule_name, target_ids=[topic_target_id, sm_target_id], queue_url=queue_url) self.sns_client.delete_topic(TopicArn=topic_arn) self.sfn_client.delete_state_machine(stateMachineArn=state_machine_arn) def test_put_events_with_target_firehose(self): s3_bucket = 's3-{}'.format(short_uid()) s3_prefix = 'testeventdata' stream_name = 'firehose-{}'.format(short_uid()) rule_name = 'rule-{}'.format(short_uid()) target_id = 'target-{}'.format(short_uid()) bus_name = 'bus-{}'.format(short_uid()) # create firehose target bucket s3_client = aws_stack.connect_to_service('s3') s3_client.create_bucket(Bucket=s3_bucket) # create firehose delivery stream to s3 firehose_client = aws_stack.connect_to_service('firehose') stream = firehose_client.create_delivery_stream( DeliveryStreamName=stream_name, S3DestinationConfiguration={ 'RoleARN': aws_stack.iam_resource_arn('firehose'), 'BucketARN': aws_stack.s3_bucket_arn(s3_bucket), 'Prefix': s3_prefix } ) stream_arn = stream['DeliveryStreamARN'] self.events_client.create_event_bus(Name=bus_name) self.events_client.put_rule( Name=rule_name, EventBusName=bus_name, EventPattern=json.dumps(TEST_EVENT_PATTERN) ) rs = self.events_client.put_targets( Rule=rule_name, EventBusName=bus_name, Targets=[{ 'Id': target_id, 'Arn': stream_arn }] ) self.assertIn('FailedEntryCount', rs) self.assertIn('FailedEntries', rs) self.assertEqual(rs['FailedEntryCount'], 0) self.assertEqual(rs['FailedEntries'], []) self.events_client.put_events( Entries=[{ 'EventBusName': bus_name, 'Source': TEST_EVENT_PATTERN['Source'][0], 'DetailType': TEST_EVENT_PATTERN['detail-type'][0], 'Detail': json.dumps(TEST_EVENT_PATTERN['Detail'][0]) }] ) # run tests bucket_contents = s3_client.list_objects(Bucket=s3_bucket)['Contents'] self.assertEqual(len(bucket_contents), 1) key = bucket_contents[0]['Key'] s3_object = s3_client.get_object(Bucket=s3_bucket, Key=key) actual_event = json.loads(s3_object['Body'].read().decode()) self.assertIsValidEvent(actual_event) self.assertEqual(actual_event['detail'], TEST_EVENT_PATTERN['Detail'][0]) # clean up firehose_client.delete_delivery_stream(DeliveryStreamName=stream_name) # empty and delete bucket s3_client.delete_object(Bucket=s3_bucket, Key=key) s3_client.delete_bucket(Bucket=s3_bucket) self.cleanup(bus_name, rule_name, target_id) def test_put_events_with_target_sqs_new_region(self): self.events_client = aws_stack.connect_to_service('events', region_name='eu-west-1') queue_name = 'queue-{}'.format(short_uid()) rule_name = 'rule-{}'.format(short_uid()) target_id = 'target-{}'.format(short_uid()) sqs_client = aws_stack.connect_to_service('sqs', region_name='eu-west-1') sqs_client.create_queue(QueueName=queue_name) queue_arn = aws_stack.sqs_queue_arn(queue_name) self.events_client.put_rule(Name=rule_name) self.events_client.put_targets( Rule=rule_name, Targets=[{ 'Id': target_id, 'Arn': queue_arn }] ) response = self.events_client.put_events( Entries=[{ 'Source': 'com.mycompany.myapp', 'Detail': '{ "key1": "value1", "key": "value2" }', 'Resources': [], 'DetailType': 'myDetailType' }] ) self.assertIn('Entries', response) self.assertEqual(len(response.get('Entries')), 1) self.assertIn('EventId', response.get('Entries')[0]) def test_put_events_with_input_path(self): queue_name = 'queue-{}'.format(short_uid()) rule_name = 'rule-{}'.format(short_uid()) target_id = 'target-{}'.format(short_uid()) bus_name = 'bus-{}'.format(short_uid()) sqs_client = aws_stack.connect_to_service('sqs') queue_url = sqs_client.create_queue(QueueName=queue_name)['QueueUrl'] queue_arn = aws_stack.sqs_queue_arn(queue_name) self.events_client.create_event_bus(Name=bus_name) self.events_client.put_rule( Name=rule_name, EventBusName=bus_name, EventPattern=json.dumps(TEST_EVENT_PATTERN) ) self.events_client.put_targets( Rule=rule_name, EventBusName=bus_name, Targets=[ { 'Id': target_id, 'Arn': queue_arn, 'InputPath': '$.detail' } ] ) self.events_client.put_events( Entries=[{ 'EventBusName': bus_name, 'Source': TEST_EVENT_PATTERN['Source'][0], 'DetailType': TEST_EVENT_PATTERN['detail-type'][0], 'Detail': json.dumps(TEST_EVENT_PATTERN['Detail'][0]) }] ) def get_message(queue_url): resp = sqs_client.receive_message(QueueUrl=queue_url) return resp.get('Messages') messages = retry(get_message, retries=3, sleep=1, queue_url=queue_url) self.assertEqual(len(messages), 1) self.assertEqual(json.loads(messages[0].get('Body')), EVENT_DETAIL) self.events_client.put_events( Entries=[{ 'EventBusName': bus_name, 'Source': 'dummySource', 'DetailType': TEST_EVENT_PATTERN['detail-type'][0], 'Detail': json.dumps(TEST_EVENT_PATTERN['Detail'][0]) }] ) messages = retry(get_message, retries=3, sleep=1, queue_url=queue_url) self.assertEqual(messages, None) # clean up self.cleanup(bus_name, rule_name, target_id, queue_url=queue_url) def test_put_events_with_input_path_multiple(self): queue_name = 'queue-{}'.format(short_uid()) queue_name_1 = 'queue-{}'.format(short_uid()) rule_name = 'rule-{}'.format(short_uid()) target_id = 'target-{}'.format(short_uid()) target_id_1 = 'target-{}'.format(short_uid()) bus_name = 'bus-{}'.format(short_uid()) sqs_client = aws_stack.connect_to_service('sqs') queue_url = sqs_client.create_queue(QueueName=queue_name)['QueueUrl'] queue_arn = aws_stack.sqs_queue_arn(queue_name) queue_url_1 = sqs_client.create_queue(QueueName=queue_name_1)['QueueUrl'] queue_arn_1 = aws_stack.sqs_queue_arn(queue_name_1) self.events_client.create_event_bus(Name=bus_name) self.events_client.put_rule( Name=rule_name, EventBusName=bus_name, EventPattern=json.dumps(TEST_EVENT_PATTERN) ) self.events_client.put_targets( Rule=rule_name, EventBusName=bus_name, Targets=[ { 'Id': target_id, 'Arn': queue_arn, 'InputPath': '$.detail' }, { 'Id': target_id_1, 'Arn': queue_arn_1, } ] ) self.events_client.put_events( Entries=[{ 'EventBusName': bus_name, 'Source': TEST_EVENT_PATTERN['Source'][0], 'DetailType': TEST_EVENT_PATTERN['detail-type'][0], 'Detail': json.dumps(TEST_EVENT_PATTERN['Detail'][0]) }] ) def get_message(queue_url): resp = sqs_client.receive_message(QueueUrl=queue_url) return resp.get('Messages') messages = retry(get_message, retries=3, sleep=1, queue_url=queue_url) self.assertEqual(len(messages), 1) self.assertEqual(json.loads(messages[0].get('Body')), EVENT_DETAIL) messages = retry(get_message, retries=3, sleep=1, queue_url=queue_url_1) self.assertEqual(len(messages), 1) self.assertEqual(json.loads(messages[0].get('Body')).get('detail'), EVENT_DETAIL) self.events_client.put_events( Entries=[{ 'EventBusName': bus_name, 'Source': 'dummySource', 'DetailType': TEST_EVENT_PATTERN['detail-type'][0], 'Detail': json.dumps(TEST_EVENT_PATTERN['Detail'][0]) }] ) messages = retry(get_message, retries=3, sleep=1, queue_url=queue_url) self.assertEqual(messages, None) # clean up self.cleanup(bus_name, rule_name, target_id, queue_url=queue_url) def test_put_event_without_source(self): self.events_client = aws_stack.connect_to_service('events', region_name='eu-west-1') response = self.events_client.put_events(Entries=[{'DetailType': 'Test', 'Detail': '{}'}]) self.assertIn('Entries', response) def test_put_event_without_detail(self): self.events_client = aws_stack.connect_to_service('events', region_name='eu-west-1') response = self.events_client.put_events( Entries=[ { 'DetailType': 'Test', } ] ) self.assertIn('Entries', response) def test_put_event_with_content_base_rule_in_pattern(self): queue_name = 'queue-{}'.format(short_uid()) rule_name = 'rule-{}'.format(short_uid()) target_id = 'target-{}'.format(short_uid()) sqs_client = aws_stack.connect_to_service('sqs') queue_url = sqs_client.create_queue(QueueName=queue_name)['QueueUrl'] queue_arn = aws_stack.sqs_queue_arn(queue_name) pattern = { 'Source': [{'exists': True}], 'detail-type': [{'prefix': 'core.app'}], 'Detail': { 'decription': ['this-is-event-details'], 'amount': [200], 'salary': [2000, 4000], 'env': ['dev', 'prod'], 'user': ['user1', 'user2', 'user3'], 'admins': ['skyli', {'prefix': 'hey'}, {'prefix': 'ad'}], 'test1': [{'anything-but': 200}], 'test2': [{'anything-but': 'test2'}], 'test3': [{'anything-but': ['test3', 'test33']}], 'test4': [{'anything-but': {'prefix': 'test4'}}], 'ip': [{'cidr': '10.102.1.0/24'}], 'num-test1': [{'numeric': ['<', 200]}], 'num-test2': [{'numeric': ['<=', 200]}], 'num-test3': [{'numeric': ['>', 200]}], 'num-test4': [{'numeric': ['>=', 200]}], 'num-test5': [{'numeric': ['>=', 200, '<=', 500]}], 'num-test6': [{'numeric': ['>', 200, '<', 500]}], 'num-test7': [{'numeric': ['>=', 200, '<', 500]}] } } event = { 'EventBusName': TEST_EVENT_BUS_NAME, 'Source': 'core.update-account-command', 'DetailType': 'core.app.backend', 'Detail': json.dumps({ 'decription': 'this-is-event-details', 'amount': 200, 'salary': 2000, 'env': 'prod', 'user': ['user4', 'user3'], 'admins': 'admin', 'test1': 300, 'test2': 'test22', 'test3': 'test333', 'test4': 'this test4', 'ip': '10.102.1.100', 'num-test1': 100, 'num-test2': 200, 'num-test3': 300, 'num-test4': 200, 'num-test5': 500, 'num-test6': 300, 'num-test7': 300 }) } self.events_client.create_event_bus(Name=TEST_EVENT_BUS_NAME) self.events_client.put_rule( Name=rule_name, EventBusName=TEST_EVENT_BUS_NAME, EventPattern=json.dumps(pattern) ) self.events_client.put_targets( Rule=rule_name, EventBusName=TEST_EVENT_BUS_NAME, Targets=[ { 'Id': target_id, 'Arn': queue_arn, 'InputPath': '$.detail' } ] ) self.events_client.put_events(Entries=[event]) def get_message(queue_url): resp = sqs_client.receive_message(QueueUrl=queue_url) return resp.get('Messages') messages = retry(get_message, retries=3, sleep=1, queue_url=queue_url) self.assertEqual(len(messages), 1) self.assertEqual(json.loads(messages[0].get('Body')), json.loads(event['Detail'])) event_details = json.loads(event['Detail']) event_details['admins'] = 'not_admin' event['Detail'] = json.dumps(event_details) self.events_client.put_events(Entries=[event]) messages = retry(get_message, retries=3, sleep=1, queue_url=queue_url) self.assertEqual(messages, None) # clean up self.cleanup(TEST_EVENT_BUS_NAME, rule_name, target_id, queue_url=queue_url) def cleanup(self, bus_name, rule_name=None, target_ids=None, queue_url=None): kwargs = {'EventBusName': bus_name} if bus_name else {} if target_ids: target_ids = target_ids if isinstance(target_ids, list) else [target_ids] self.events_client.remove_targets(Rule=rule_name, Ids=target_ids, Force=True, **kwargs) if rule_name: self.events_client.delete_rule(Name=rule_name, Force=True, **kwargs) if bus_name: self.events_client.delete_event_bus(Name=bus_name) if queue_url: sqs_client = aws_stack.connect_to_service('sqs') sqs_client.delete_queue(QueueUrl=queue_url)
1
12,413
nitpick: I'd rather move this into the test method directly (we should avoid variables at the root scope if they are not reused in multiple places). (not critical, though - shouldn't hold back the merge..)
localstack-localstack
py
@@ -212,7 +212,12 @@ namespace TestPerf // Note: tests in the "TestPerf" namespace only run when the { RunUI(() => importPeptideSearchDlg.ClickNextButtonNoCheck()); } - // Modifications are already set up, so that page should get skipped. + // Skip Match Modifications page. + RunUI(() => + { + AssertEx.IsTrue(importPeptideSearchDlg.CurrentPage == ImportPeptideSearchDlg.Pages.match_modifications_page); + AssertEx.IsTrue(importPeptideSearchDlg.ClickNextButton()); + }); RunUI(() => importPeptideSearchDlg.FullScanSettingsControl.PrecursorCharges = new []{2,3,4,5}); RunUI(() => importPeptideSearchDlg.FullScanSettingsControl.PrecursorMassAnalyzer = FullScanMassAnalyzerType.tof); RunUI(() => importPeptideSearchDlg.FullScanSettingsControl.IonMobilityFiltering.IsUseSpectralLibraryIonMobilities = useDriftTimes);
1
/* * Original author: Brian Pratt <bspratt .at. u.washington.edu>, * MacCoss Lab, Department of Genome Sciences, UW * * Copyright 2016 University of Washington - Seattle, WA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ using System; using System.Collections.Generic; using System.Diagnostics; using System.IO; using System.Linq; using Microsoft.VisualStudio.TestTools.UnitTesting; using pwiz.Common.Chemistry; using pwiz.Common.SystemUtil; using pwiz.ProteowizardWrapper; using pwiz.Skyline.Alerts; using pwiz.Skyline.FileUI; using pwiz.Skyline.FileUI.PeptideSearch; using pwiz.Skyline.Model.DocSettings; using pwiz.Skyline.Model.IonMobility; using pwiz.Skyline.Model.Lib; using pwiz.Skyline.Model.Results; using pwiz.Skyline.Properties; using pwiz.Skyline.SettingsUI; using pwiz.Skyline.SettingsUI.IonMobility; using pwiz.Skyline.Util; using pwiz.SkylineTestUtil; namespace TestPerf // Note: tests in the "TestPerf" namespace only run when the global RunPerfTests flag is set { /// <summary> /// Verify consistent import of Agilent IMS ramped CE data in concert with SpectrumMill. /// </summary> [TestClass] public class PerfImportAgilentSpectrumMillRampedIMSTest : AbstractFunctionalTestEx { private int _testCase; [TestMethod] [Timeout(6000000)] // Initial download can take a long time public void AgilentSpectrumMillSpectralLibTest() { AgilentSpectrumMillTest(2); } [TestMethod] [Timeout(6000000)] // Initial download can take a long time public void AgilentSpectrumMillRampedIMSImportTest() { AgilentSpectrumMillTest(1); } private void AgilentSpectrumMillTest(int testCase) { // RunPerfTests = true; // Uncomment this to force test to run in UI Log.AddMemoryAppender(); _testCase = testCase; TestFilesZip = _testCase ==1 ? GetPerfTestDataURL(@"PerfImportAgilentSpectrumMillRampedIMS2.zip") : GetPerfTestDataURL(@"PerfImportAgilentSpectrumMillLibTest.zip"); TestFilesPersistent = new[] { ".d" }; // List of file basenames that we'd like to unzip alongside parent zipFile, and (re)use in place MsDataFileImpl.PerfUtilFactory.IssueDummyPerfUtils = false; // Turn on performance measurement RunFunctionalTest(); var logs = Log.GetMemoryAppendedLogEvents(); var stats = PerfUtilFactory.SummarizeLogs(logs, TestFilesPersistent); // Show summary var log = new Log("Summary"); if (TestFilesDirs != null) log.Info(stats.Replace(TestFilesDir.PersistentFilesDir, "")); // Remove tempfile info from log } private string GetTestPath(string relativePath) { return TestFilesDirs[0].GetTestPath(relativePath); } private string CheckDelta(double expected, double actual, double delta, string what, string key) { return (Math.Abs(actual - expected) > delta) ? what + " " + string.Format("{0:F02}", actual) + " differs from expected " + string.Format("{0:F02}", expected) + " by " + string.Format("{0:F02}", Math.Abs(actual - expected)) + " for " + key + "\n" : string.Empty; } private string CheckDeltaPct(double expected, double actual, double delta, string what, string key) { double pctDiff = (actual == 0) ? ((expected == 0) ? 0 : 100) : (100*Math.Abs(actual - expected)/actual); return (pctDiff > delta) ? what + " " + string.Format("{0:F02}", actual) + " differs from expected " + string.Format("{0:F02}", expected) + " by " + string.Format("{0:F02}", pctDiff) + "% for " + key + "\n" : string.Empty; } protected override void DoTest() { LibraryIonMobilityInfo driftInfoExplicitDT= null; Testit(true, ref driftInfoExplicitDT); // Read both CCS and DT if (_testCase == 1) { Testit(true, ref driftInfoExplicitDT); // Force conversion from CCS to DT, compare to previously read DT Testit(false, ref driftInfoExplicitDT); // Compare our ability to locate drift peaks, and derive CCS from those, with explicitly provided values } // ReSharper restore ConditionIsAlwaysTrueOrFalse } private void Testit( bool useDriftTimes, // If false, don't use any drift information in chromatogram extraction ref LibraryIonMobilityInfo driftInfoExplicitDT ) { bool CCSonly = driftInfoExplicitDT != null; // If true, force conversion from CCS to DT var ext = useDriftTimes ? (CCSonly ? "CCS" : "DT") : "train"; string skyfile = TestFilesDir.GetTestPath("test_" + ext + ".sky"); RunUI(() => { SkylineWindow.NewDocument(true); SkylineWindow.SaveDocument(skyfile); }); Stopwatch loadStopwatch = new Stopwatch(); loadStopwatch.Start(); // Launch import peptide search wizard WaitForDocumentLoaded(); var basename = _testCase == 1 ? "40minG_WBP_wide_z2-3_mid_BSA_5pmol_01" : "09_BSAtrypticdigest_5uL_IMQTOF_AltFramesdtramp_dAJS009"; var searchResults = GetTestPath(basename+".pep.xml"); if (CCSonly || !useDriftTimes) { // Hide the drift time info provided by SpectrumMill, so we have to convert from CCS var mzxmlFile = searchResults.Replace("pep.xml", "mzXML"); var fileContents = File.ReadAllText(mzxmlFile); fileContents = fileContents.Replace(" DT=", " xx="); if (!useDriftTimes) fileContents = fileContents.Replace(" CCS=", " xxx="); File.WriteAllText(mzxmlFile, fileContents); // Disable use of any existing ion mobility libraries var transitionSettingsDlg = ShowDialog<TransitionSettingsUI>( () => SkylineWindow.ShowTransitionSettingsUI(TransitionSettingsUI.TABS.IonMobility)); RunUI(() => { transitionSettingsDlg.IonMobilityControl.SetUseSpectralLibraryIonMobilities(false); transitionSettingsDlg.IonMobilityControl.SelectedIonMobilityLibrary = Resources.SettingsList_ELEMENT_NONE_None; transitionSettingsDlg.OkDialog(); }); WaitForClosedForm(transitionSettingsDlg); } var importPeptideSearchDlg = ShowDialog<ImportPeptideSearchDlg>(SkylineWindow.ShowImportPeptideSearchDlg); var nextFile = _testCase == 1 ? null : "10_BSAtrypticdigest_5uL_IMQTOF_AltFramesdtramp_dAJS010.d"; var searchResultsList = new[] {searchResults}; RunUI(() => { AssertEx.IsTrue(importPeptideSearchDlg.CurrentPage == ImportPeptideSearchDlg.Pages.spectra_page); importPeptideSearchDlg.BuildPepSearchLibControl.AddSearchFiles(searchResultsList); importPeptideSearchDlg.BuildPepSearchLibControl.CutOffScore = 0.95; importPeptideSearchDlg.BuildPepSearchLibControl.FilterForDocumentPeptides = false; }); var doc = SkylineWindow.Document; RunUI(() => AssertEx.IsTrue(importPeptideSearchDlg.ClickNextButton())); doc = WaitForDocumentChange(doc); // Verify document library was built string docLibPath = BiblioSpecLiteSpec.GetLibraryFileName(skyfile); string redundantDocLibPath = BiblioSpecLiteSpec.GetRedundantName(docLibPath); AssertEx.IsTrue(File.Exists(docLibPath) && File.Exists(redundantDocLibPath)); var librarySettings = SkylineWindow.Document.Settings.PeptideSettings.Libraries; AssertEx.IsTrue(librarySettings.HasDocumentLibrary); // We're on the "Extract Chromatograms" page of the wizard. // All the files should be found, and we should // just be able to move to the next page. RunUI(() => AssertEx.IsTrue(importPeptideSearchDlg.CurrentPage == ImportPeptideSearchDlg.Pages.chromatograms_page)); RunUI(() => { var importResultsControl = (ImportResultsControl) importPeptideSearchDlg.ImportResultsControl; importResultsControl.ExcludeSpectrumSourceFiles = true; importResultsControl.UpdateResultsFiles(new []{TestFilesDirs[0].PersistentFilesDir}, true); // Go look in the persistent files dir }); if (searchResultsList.Length > 1) { // Deal with the common name start dialog var importResultsNameDlg = ShowDialog<ImportResultsNameDlg>(importPeptideSearchDlg.ClickNextButtonNoCheck); RunUI(() => { importResultsNameDlg.NoDialog(); }); WaitForClosedForm(importResultsNameDlg); } else { RunUI(() => importPeptideSearchDlg.ClickNextButtonNoCheck()); } // Modifications are already set up, so that page should get skipped. RunUI(() => importPeptideSearchDlg.FullScanSettingsControl.PrecursorCharges = new []{2,3,4,5}); RunUI(() => importPeptideSearchDlg.FullScanSettingsControl.PrecursorMassAnalyzer = FullScanMassAnalyzerType.tof); RunUI(() => importPeptideSearchDlg.FullScanSettingsControl.IonMobilityFiltering.IsUseSpectralLibraryIonMobilities = useDriftTimes); RunUI(() => importPeptideSearchDlg.FullScanSettingsControl.IonMobilityFiltering.IonMobilityFilterResolvingPower = 50); RunUI(() => importPeptideSearchDlg.ClickNextButton()); // Accept the full scan settings // We're on the "Import FASTA" page of the wizard. RunUI(() => { AssertEx.IsTrue(importPeptideSearchDlg.CurrentPage == ImportPeptideSearchDlg.Pages.import_fasta_page); importPeptideSearchDlg.ImportFastaControl.SetFastaContent(GetTestPath("SwissProt.bsa-mature")); }); var peptidesPerProteinDlg = ShowDialog<PeptidesPerProteinDlg>(importPeptideSearchDlg.ClickNextButtonNoCheck); WaitForCondition(() => peptidesPerProteinDlg.DocumentFinalCalculated); OkDialog(peptidesPerProteinDlg, peptidesPerProteinDlg.OkDialog); WaitForClosedForm(importPeptideSearchDlg); var doc1 = WaitForDocumentChangeLoaded(doc, 15 * 60 * 1000); // 15 minutes if (_testCase == 1) { AssertEx.IsDocumentState(doc1, null, 1, 34, 45, 135); } else { AssertEx.IsDocumentState(doc1, null, 1, 36, 43, 129); } loadStopwatch.Stop(); DebugLog.Info("load time = {0}", loadStopwatch.ElapsedMilliseconds); var errmsg = ""; if (!useDriftTimes) { // Inspect the loaded data directly to derive DT and CCS // Verify ability to extract predictions from raw data var transitionSettingsDlg = ShowDialog<TransitionSettingsUI>( () => SkylineWindow.ShowTransitionSettingsUI(TransitionSettingsUI.TABS.IonMobility)); RunUI(() => transitionSettingsDlg.IonMobilityControl.WindowWidthType = IonMobilityWindowWidthCalculator.IonMobilityWindowWidthType.resolving_power); RunUI(() => transitionSettingsDlg.IonMobilityControl.SetResolvingPower(50)); // Simulate user picking Edit Current from the Ion Mobility Library combo control var editIonMobilityLibraryDlg = ShowDialog<EditIonMobilityLibraryDlg>(transitionSettingsDlg.IonMobilityControl.AddIonMobilityLibrary); RunUI(() => { editIonMobilityLibraryDlg.LibraryName = "test"; editIonMobilityLibraryDlg.CreateDatabaseFile(TestFilesDir.GetTestPath(editIonMobilityLibraryDlg.LibraryName + IonMobilityDb.EXT)); // Simulate user clicking Create button editIonMobilityLibraryDlg.GetIonMobilitiesFromResults(); editIonMobilityLibraryDlg.OkDialog(); }); WaitForClosedForm(editIonMobilityLibraryDlg); RunUI(() => { transitionSettingsDlg.OkDialog(); }); WaitForClosedForm(transitionSettingsDlg); var document = SkylineWindow.Document; var measuredDTs = document.Settings.TransitionSettings.IonMobilityFiltering.IonMobilityLibrary; AssertEx.IsNotNull(driftInfoExplicitDT, "driftInfoExplicitDT != null"); // ReSharper disable once PossibleNullReferenceException var explicitDTs = driftInfoExplicitDT.GetIonMobilityDict(); string errMsgAll = string.Empty; // A handful of peptides that really should have been trained on a clean sample // CONSIDER: or are they multiple conformers? They have multiple hits with distinct IM in the pepXML var expectedDiffs = LibKeyMap<double>.FromDictionary(new Dictionary<LibKey, double> { {new PeptideLibraryKey("LC[+57.0]VLHEK", 2), 18.09 }, {new PeptideLibraryKey("EC[+57.0]C[+57.0]DKPLLEK", 3), 7.0}, {new PeptideLibraryKey("SHC[+57.0]IAEVEK", 3), 6.0}, {new PeptideLibraryKey("DDPHAC[+57.0]YSTVFDK", 2), 24.0} }).AsDictionary(); foreach (var pair in doc1.PeptidePrecursorPairs) { string errMsg = string.Empty; var key = new LibKey(pair.NodePep.ModifiedSequence, pair.NodeGroup.PrecursorAdduct); double tolerCCS = 5; if (expectedDiffs.ContainsKey(key)) { tolerCCS = expectedDiffs[key] + .1; } if (!explicitDTs.ContainsKey(key)) { errMsg += "Could not locate explicit IMS info for " + key +"\n"; } var given = explicitDTs[key][0]; var measured = measuredDTs.GetIonMobilityInfo(key).First(); var msg = CheckDeltaPct(given.CollisionalCrossSectionSqA ?? 0, measured.CollisionalCrossSectionSqA ?? 0, tolerCCS, "measured CCS", key.ToString()); if (!string.IsNullOrEmpty(msg)) { errMsg += msg + CheckDeltaPct(given.IonMobility.Mobility.Value, measured.IonMobility.Mobility.Value, -1, "measured drift time", key.ToString()); } else { errMsg += CheckDelta(given.IonMobility.Mobility.Value, measured.IonMobility.Mobility.Value, 10.0, "measured drift time", key.ToString()); } errMsg += CheckDelta(given.HighEnergyIonMobilityValueOffset.Value, measured.HighEnergyIonMobilityValueOffset.Value, 2.0, "measured drift time high energy offset", key.ToString()); if (!string.IsNullOrEmpty(errMsg)) errMsgAll += "\n" + errMsg; } if (!string.IsNullOrEmpty(errMsgAll)) AssertEx.Fail(errMsgAll); return; } LibraryIonMobilityInfo libraryIonMobilityInfo; doc1.Settings.PeptideSettings.Libraries.Libraries.First().TryGetIonMobilityInfos(doc1.MoleculeLibKeys.ToArray(), 0, out libraryIonMobilityInfo); if (driftInfoExplicitDT == null) { driftInfoExplicitDT = libraryIonMobilityInfo; } else { var instrumentInfo = new DataFileInstrumentInfo(new MsDataFileImpl(GetTestPath(basename+".d"))); var dictExplicitDT = driftInfoExplicitDT.GetIonMobilityDict(); foreach (var pep in doc1.Peptides) { foreach (var nodeGroup in pep.TransitionGroups) { var calculatedDriftTime = doc1.Settings.GetIonMobilityFilter( pep, nodeGroup, null, libraryIonMobilityInfo, instrumentInfo, 0); var libKey = new LibKey(pep.ModifiedSequence, nodeGroup.PrecursorAdduct); IonMobilityAndCCS[] infoValueExplicitDT; if (!dictExplicitDT.TryGetValue(libKey, out infoValueExplicitDT)) { errmsg += "No driftinfo value found for " + libKey + "\n"; } else { var ionMobilityInfo = infoValueExplicitDT[0]; var delta = Math.Abs(ionMobilityInfo.IonMobility.Mobility.Value -calculatedDriftTime.IonMobilityAndCCS.IonMobility.Mobility.Value); var acceptableDelta = (libKey.Sequence.StartsWith("DDPHAC") || libKey.Sequence.EndsWith("VLHEK")) ? 3: 1; // These were ambiguous matches if (delta > acceptableDelta) { errmsg += String.Format("calculated DT ({0}) and explicit DT ({1}, CCS={4}) do not agree (abs delta = {2}) for {3}\n", calculatedDriftTime.IonMobilityAndCCS.IonMobility, ionMobilityInfo.IonMobility, delta, libKey, ionMobilityInfo.CollisionalCrossSectionSqA??0); } } } } } float tolerance = (float)doc1.Settings.TransitionSettings.Instrument.MzMatchTolerance; double maxHeight = 0; var results = doc1.Settings.MeasuredResults; var numPeaks = _testCase == 1 ? new[] { 8, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 9, 10, 7, 10, 10, 10, 10, 8, 10, 10, 10, 10, 10, 10, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 9, 10, 10, 8, 10, 10, 10, 10, 10 } : new[] { 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 8, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10 }; int npIndex = 0; foreach (var pair in doc1.PeptidePrecursorPairs) { ChromatogramGroupInfo[] chromGroupInfo; AssertEx.IsTrue(results.TryLoadChromatogram(0, pair.NodePep, pair.NodeGroup, tolerance, true, out chromGroupInfo)); foreach (var chromGroup in chromGroupInfo) { if (numPeaks[npIndex] != chromGroup.NumPeaks) errmsg += String.Format("unexpected peak count {0} instead of {1} in chromatogram {2}\r\n", chromGroup.NumPeaks, numPeaks[npIndex], npIndex); npIndex++; foreach (var tranInfo in chromGroup.TransitionPointSets) { maxHeight = Math.Max(maxHeight, tranInfo.MaxIntensity); } } } AssertEx.IsTrue(errmsg.Length == 0, errmsg); AssertEx.AreEqual(_testCase == 1 ? 2265204 : 1326442, maxHeight, 1); // Does CCS show up in reports? var expectedDtWindow = _testCase == 1 ? 0.74 : 0.94; TestReports( 0, expectedDtWindow); if (nextFile != null) { // Verify that we can use library generated for one file as the default for another without its own library ImportResults(nextFile); TestReports( 1, expectedDtWindow); } // And verify roundtrip of ion mobility AssertEx.RoundTrip(SkylineWindow.Document); RunUI(() => { SkylineWindow.SaveDocument(skyfile); SkylineWindow.NewDocument(true); SkylineWindow.OpenFile(skyfile); }); TestReports(1, expectedDtWindow); // Watch for problem with reimport after changed DT window var docResolvingPower = SkylineWindow.Document; var transitionSettingsUI2 = ShowDialog<TransitionSettingsUI>(SkylineWindow.ShowTransitionSettingsUI); RunUI(() => { // ReSharper disable once ConditionIsAlwaysTrueOrFalse transitionSettingsUI2.IonMobilityControl.IsUseSpectralLibraryIonMobilities = useDriftTimes; transitionSettingsUI2.IonMobilityControl.IonMobilityFilterResolvingPower = 40; }); OkDialog(transitionSettingsUI2, transitionSettingsUI2.OkDialog); var docReimport = WaitForDocumentChangeLoaded(docResolvingPower); // Reimport data for a replicate RunDlg<ManageResultsDlg>(SkylineWindow.ManageResults, dlg => { var chromatograms = docReimport.Settings.MeasuredResults.Chromatograms; dlg.SelectedChromatograms = new[] { chromatograms[0] }; dlg.ReimportResults(); dlg.OkDialog(); }); WaitForDocumentChangeLoaded(docReimport); var expectedDtWindow0 = _testCase == 2 ? 1.175 : 0.92; var expectedDtWindow1 = _testCase == 2 ? 0.94 : 0.92; TestReports(0, expectedDtWindow0, string.Format(" row {0} case {1} ccsOnly {2}", 0, _testCase, CCSonly)); TestReports(1, expectedDtWindow1, string.Format(" row {0} case {1} ccsOnly {2}", 1, _testCase, CCSonly)); } private void TestReports(int row, double expectedDtWindow, string msg = null) { // Verify reports working for CCS var documentGrid = EnableDocumentGridIonMobilityResultsColumns(); var imPrecursor = _testCase == 1 ? 18.43 : 23.50; CheckDocumentResultsGridFieldByName(documentGrid, "PrecursorResult.IonMobilityMS1", row, imPrecursor, msg); CheckDocumentResultsGridFieldByName(documentGrid, "TransitionResult.IonMobilityFragment", row, imPrecursor, msg); // Document is all precursor CheckDocumentResultsGridFieldByName(documentGrid, "PrecursorResult.IonMobilityUnits", row, IonMobilityFilter.IonMobilityUnitsL10NString(eIonMobilityUnits.drift_time_msec), msg); CheckDocumentResultsGridFieldByName(documentGrid, "PrecursorResult.IonMobilityWindow", row, expectedDtWindow, msg); CheckDocumentResultsGridFieldByName(documentGrid, "PrecursorResult.CollisionalCrossSection", row, _testCase == 1 ? 292.4 : 333.34, msg); // And clean up after ourselves RunUI(() => documentGrid.Close()); } } }
1
14,200
You should do: AssertEx.AreEqual(importPeptideSearchDlg.CurrentPage, ImportPeptideSearchDlg.Pages.match_modifications_page);
ProteoWizard-pwiz
.cs
@@ -145,7 +145,7 @@ public class MicroserviceRegisterTask extends AbstractRegisterTask { String schemaId = entry.getKey(); String content = entry.getValue(); GetSchemaResponse existSchema = extractSchema(schemaId, existSchemas); - boolean exists = existSchema != null; + boolean exists = existSchema != null && existSchema.getSummary() != null; LOGGER.info("schemaId [{}] exists {}", schemaId, exists); if (!exists) { if (!srClient.registerSchema(microservice.getServiceId(), schemaId, content)) {
1
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.servicecomb.serviceregistry.task; import java.util.HashSet; import java.util.List; import java.util.Map.Entry; import java.util.Set; import org.apache.servicecomb.serviceregistry.api.registry.Microservice; import org.apache.servicecomb.serviceregistry.api.response.GetSchemaResponse; import org.apache.servicecomb.serviceregistry.client.ServiceRegistryClient; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.util.StringUtils; import com.google.common.base.Charsets; import com.google.common.eventbus.EventBus; import com.google.common.eventbus.Subscribe; import com.google.common.hash.Hashing; public class MicroserviceRegisterTask extends AbstractRegisterTask { private static final Logger LOGGER = LoggerFactory.getLogger(MicroserviceRegisterTask.class); private boolean schemaIdSetMatch; public MicroserviceRegisterTask(EventBus eventBus, ServiceRegistryClient srClient, Microservice microservice) { super(eventBus, srClient, microservice); this.taskStatus = TaskStatus.READY; } public boolean isSchemaIdSetMatch() { return schemaIdSetMatch; } @Subscribe public void onMicroserviceInstanceHeartbeatTask(MicroserviceInstanceHeartbeatTask task) { if (task.getHeartbeatResult() != HeartbeatResult.SUCCESS && isSameMicroservice(task.getMicroservice())) { LOGGER.info("read MicroserviceInstanceHeartbeatTask status is {}", task.taskStatus); this.taskStatus = TaskStatus.READY; this.registered = false; } } @Override protected boolean doRegister() { LOGGER.info("running microservice register task."); String serviceId = srClient.getMicroserviceId(microservice.getAppId(), microservice.getServiceName(), microservice.getVersion(), microservice.getEnvironment()); if (!StringUtils.isEmpty(serviceId)) { // 已经注册过了,不需要重新注册 microservice.setServiceId(serviceId); LOGGER.info( "Microservice exists in service center, no need to register. id={} appId={}, name={}, version={}", serviceId, microservice.getAppId(), microservice.getServiceName(), microservice.getVersion()); if (!checkSchemaIdSet()) { return false; } } else { serviceId = srClient.registerMicroservice(microservice); if (StringUtils.isEmpty(serviceId)) { LOGGER.error( "Registry microservice failed. appId={}, name={}, version={}", microservice.getAppId(), microservice.getServiceName(), microservice.getVersion()); return false; } schemaIdSetMatch = true; // 重新注册服务场景下,instanceId不应该缓存 microservice.getInstance().setInstanceId(null); LOGGER.info( "Registry Microservice successfully. id={} appId={}, name={}, version={}, schemaIds={}", serviceId, microservice.getAppId(), microservice.getServiceName(), microservice.getVersion(), microservice.getSchemas()); } microservice.setServiceId(serviceId); microservice.getInstance().setServiceId(microservice.getServiceId()); return registerSchemas(); } private boolean checkSchemaIdSet() { Microservice existMicroservice = srClient.getMicroservice(microservice.getServiceId()); if (existMicroservice == null) { LOGGER.error("Error to get microservice from service center when check schema set"); return false; } Set<String> existSchemas = new HashSet<>(existMicroservice.getSchemas()); Set<String> localSchemas = new HashSet<>(microservice.getSchemas()); schemaIdSetMatch = existSchemas.equals(localSchemas); if (!schemaIdSetMatch) { LOGGER.error( "SchemaIds is different between local and service center. Please change microservice version. " + "id={} appId={}, name={}, version={}, local schemaIds={}, service center schemaIds={}", microservice.getServiceId(), microservice.getAppId(), microservice.getServiceName(), microservice.getVersion(), localSchemas, existSchemas); return true; } LOGGER.info( "SchemaIds is equals to service center. id={} appId={}, name={}, version={}, schemaIds={}", microservice.getServiceId(), microservice.getAppId(), microservice.getServiceName(), microservice.getVersion(), localSchemas); return true; } private boolean registerSchemas() { List<GetSchemaResponse> existSchemas = srClient.getSchemas(microservice.getServiceId()); for (Entry<String, String> entry : microservice.getSchemaMap().entrySet()) { String schemaId = entry.getKey(); String content = entry.getValue(); GetSchemaResponse existSchema = extractSchema(schemaId, existSchemas); boolean exists = existSchema != null; LOGGER.info("schemaId [{}] exists {}", schemaId, exists); if (!exists) { if (!srClient.registerSchema(microservice.getServiceId(), schemaId, content)) { return false; } } else { String curSchemaSumary = existSchema.getSummary(); String schemaSummary = Hashing.sha256().newHasher().putString(content, Charsets.UTF_8).hash().toString(); if (!schemaSummary.equals(curSchemaSumary)) { if (microservice.getInstance().getEnvironment().equalsIgnoreCase("development")) { LOGGER.info( "schemaId [{}]'s content changes and the current enviroment is development, so re-register it!", schemaId); if (!srClient.registerSchema(microservice.getServiceId(), schemaId, content)) { return false; } } else { throw new IllegalStateException("schemaId [" + schemaId + "] exists in service center, but the content does not match the local content that means there are interface change " + "and you need to increment microservice version before deploying. " + "Or you can configure instance_description.environment=development to work in development enviroment and ignore this error"); } } } } return true; } private GetSchemaResponse extractSchema(String schemaId, List<GetSchemaResponse> schemas) { if (schemas == null || schemas.isEmpty()) { return null; } GetSchemaResponse schema = null; for (GetSchemaResponse tempSchema : schemas) { if (tempSchema.getSchemaId().equals(schemaId)) { schema = tempSchema; break; } } return schema; } }
1
9,557
SC support batch register schemas we should swtich to new api be careful that maybe will send too big request, need to split to multi request by max size of limit.
apache-servicecomb-java-chassis
java
@@ -25,8 +25,8 @@ func NewConfig(options *compileopts.Options) (*compileopts.Config, error) { if err != nil { return nil, fmt.Errorf("could not read version from GOROOT (%v): %v", goroot, err) } - if major != 1 || minor < 11 || minor > 14 { - return nil, fmt.Errorf("requires go version 1.11, 1.12, 1.13, or 1.14, got go%d.%d", major, minor) + if major != 1 || minor < 11 || minor > 15 { + return nil, fmt.Errorf("requires go version 1.11, 1.12, 1.13, 1.14, or 1.15, got go%d.%d", major, minor) } clangHeaderPath := getClangHeaderPath(goenv.Get("TINYGOROOT")) return &compileopts.Config{
1
package builder import ( "errors" "fmt" "github.com/tinygo-org/tinygo/compileopts" "github.com/tinygo-org/tinygo/goenv" ) // NewConfig builds a new Config object from a set of compiler options. It also // loads some information from the environment while doing that. For example, it // uses the currently active GOPATH (from the goenv package) to determine the Go // version to use. func NewConfig(options *compileopts.Options) (*compileopts.Config, error) { spec, err := compileopts.LoadTarget(options.Target) if err != nil { return nil, err } goroot := goenv.Get("GOROOT") if goroot == "" { return nil, errors.New("cannot locate $GOROOT, please set it manually") } major, minor, err := goenv.GetGorootVersion(goroot) if err != nil { return nil, fmt.Errorf("could not read version from GOROOT (%v): %v", goroot, err) } if major != 1 || minor < 11 || minor > 14 { return nil, fmt.Errorf("requires go version 1.11, 1.12, 1.13, or 1.14, got go%d.%d", major, minor) } clangHeaderPath := getClangHeaderPath(goenv.Get("TINYGOROOT")) return &compileopts.Config{ Options: options, Target: spec, GoMinorVersion: minor, ClangHeaders: clangHeaderPath, TestConfig: options.TestConfig, }, nil }
1
10,426
At this point, maybe it would make more sense to write this as a range rather than listing specific versions?
tinygo-org-tinygo
go
@@ -255,6 +255,11 @@ func (p Proxy) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) { return http.StatusRequestEntityTooLarge, backendErr } + // TODO: write an 499 HTTP status as constant in someplace (and also change in proxy_test.go on line: expectedStatus, expectErr := 499, context.Canceled) + if backendErr == context.Canceled { + return 499, backendErr + } + // failover; remember this failure for some time if // request failure counting is enabled timeout := host.FailTimeout
1
// Copyright 2015 Light Code Labs, LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package proxy is middleware that proxies HTTP requests. package proxy import ( "context" "errors" "net" "net/http" "net/url" "strings" "sync/atomic" "time" "github.com/mholt/caddy/caddyhttp/httpserver" ) // Proxy represents a middleware instance that can proxy requests. type Proxy struct { Next httpserver.Handler Upstreams []Upstream } // Upstream manages a pool of proxy upstream hosts. type Upstream interface { // The path this upstream host should be routed on From() string // Selects an upstream host to be routed to. It // should return a suitable upstream host, or nil // if no such hosts are available. Select(*http.Request) *UpstreamHost // Checks if subpath is not an ignored path AllowedPath(string) bool // Gets how long to try selecting upstream hosts // in the case of cascading failures. GetTryDuration() time.Duration // Gets how long to wait between selecting upstream // hosts in the case of cascading failures. GetTryInterval() time.Duration // Gets the number of upstream hosts. GetHostCount() int // Gets how long to wait before timing out // the request GetTimeout() time.Duration // Stops the upstream from proxying requests to shutdown goroutines cleanly. Stop() error } // UpstreamHostDownFunc can be used to customize how Down behaves. type UpstreamHostDownFunc func(*UpstreamHost) bool // UpstreamHost represents a single proxy upstream type UpstreamHost struct { // This field is read & written to concurrently, so all access must use // atomic operations. Conns int64 // must be first field to be 64-bit aligned on 32-bit systems MaxConns int64 Name string // hostname of this upstream host UpstreamHeaders http.Header DownstreamHeaders http.Header FailTimeout time.Duration CheckDown UpstreamHostDownFunc WithoutPathPrefix string ReverseProxy *ReverseProxy Fails int32 // This is an int32 so that we can use atomic operations to do concurrent // reads & writes to this value. The default value of 0 indicates that it // is healthy and any non-zero value indicates unhealthy. Unhealthy int32 HealthCheckResult atomic.Value } // Down checks whether the upstream host is down or not. // Down will try to use uh.CheckDown first, and will fall // back to some default criteria if necessary. func (uh *UpstreamHost) Down() bool { if uh.CheckDown == nil { // Default settings return atomic.LoadInt32(&uh.Unhealthy) != 0 || atomic.LoadInt32(&uh.Fails) > 0 } return uh.CheckDown(uh) } // Full checks whether the upstream host has reached its maximum connections func (uh *UpstreamHost) Full() bool { return uh.MaxConns > 0 && atomic.LoadInt64(&uh.Conns) >= uh.MaxConns } // Available checks whether the upstream host is available for proxying to func (uh *UpstreamHost) Available() bool { return !uh.Down() && !uh.Full() } // ServeHTTP satisfies the httpserver.Handler interface. func (p Proxy) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) { // start by selecting most specific matching upstream config upstream := p.match(r) if upstream == nil { return p.Next.ServeHTTP(w, r) } // this replacer is used to fill in header field values replacer := httpserver.NewReplacer(r, nil, "") // outreq is the request that makes a roundtrip to the backend outreq, cancel := createUpstreamRequest(w, r) defer cancel() // If we have more than one upstream host defined and if retrying is enabled // by setting try_duration to a non-zero value, caddy will try to // retry the request at a different host if the first one failed. // // This requires us to possibly rewind and replay the request body though, // which in turn requires us to buffer the request body first. // // An unbuffered request is usually preferrable, because it reduces latency // as well as memory usage. Furthermore it enables different kinds of // HTTP streaming applications like gRPC for instance. requiresBuffering := upstream.GetHostCount() > 1 && upstream.GetTryDuration() != 0 if requiresBuffering { body, err := newBufferedBody(outreq.Body) if err != nil { return http.StatusBadRequest, errors.New("failed to read downstream request body") } if body != nil { outreq.Body = body } } // The keepRetrying function will return true if we should // loop and try to select another host, or false if we // should break and stop retrying. start := time.Now() keepRetrying := func(backendErr error) bool { // if downstream has canceled the request, break if backendErr == context.Canceled { return false } // if we've tried long enough, break if time.Since(start) >= upstream.GetTryDuration() { return false } // otherwise, wait and try the next available host time.Sleep(upstream.GetTryInterval()) return true } var backendErr error for { // since Select() should give us "up" hosts, keep retrying // hosts until timeout (or until we get a nil host). host := upstream.Select(r) if host == nil { if backendErr == nil { backendErr = errors.New("no hosts available upstream") } if !keepRetrying(backendErr) { break } continue } if rr, ok := w.(*httpserver.ResponseRecorder); ok && rr.Replacer != nil { rr.Replacer.Set("upstream", host.Name) } proxy := host.ReverseProxy // a backend's name may contain more than just the host, // so we parse it as a URL to try to isolate the host. if nameURL, err := url.Parse(host.Name); err == nil { outreq.Host = nameURL.Host if proxy == nil { proxy = NewSingleHostReverseProxy(nameURL, host.WithoutPathPrefix, http.DefaultMaxIdleConnsPerHost, upstream.GetTimeout(), ) } // use upstream credentials by default if outreq.Header.Get("Authorization") == "" && nameURL.User != nil { pwd, _ := nameURL.User.Password() outreq.SetBasicAuth(nameURL.User.Username(), pwd) } } else { outreq.Host = host.Name } if proxy == nil { return http.StatusInternalServerError, errors.New("proxy for host '" + host.Name + "' is nil") } // set headers for request going upstream if host.UpstreamHeaders != nil { // modify headers for request that will be sent to the upstream host mutateHeadersByRules(outreq.Header, host.UpstreamHeaders, replacer) if hostHeaders, ok := outreq.Header["Host"]; ok && len(hostHeaders) > 0 { outreq.Host = hostHeaders[len(hostHeaders)-1] } } // prepare a function that will update response // headers coming back downstream var downHeaderUpdateFn respUpdateFn if host.DownstreamHeaders != nil { downHeaderUpdateFn = createRespHeaderUpdateFn(host.DownstreamHeaders, replacer) } // Before we retry the request we have to make sure // that the body is rewound to it's beginning. if bb, ok := outreq.Body.(*bufferedBody); ok { if err := bb.rewind(); err != nil { return http.StatusInternalServerError, errors.New("unable to rewind downstream request body") } } // tell the proxy to serve the request // // NOTE: // The call to proxy.ServeHTTP can theoretically panic. // To prevent host.Conns from getting out-of-sync we thus have to // make sure that it's _always_ correctly decremented afterwards. func() { atomic.AddInt64(&host.Conns, 1) defer atomic.AddInt64(&host.Conns, -1) backendErr = proxy.ServeHTTP(w, outreq, downHeaderUpdateFn) }() // if no errors, we're done here if backendErr == nil { return 0, nil } if backendErr == httpserver.ErrMaxBytesExceeded { return http.StatusRequestEntityTooLarge, backendErr } // failover; remember this failure for some time if // request failure counting is enabled timeout := host.FailTimeout if timeout > 0 { atomic.AddInt32(&host.Fails, 1) go func(host *UpstreamHost, timeout time.Duration) { time.Sleep(timeout) atomic.AddInt32(&host.Fails, -1) }(host, timeout) } // if we've tried long enough, break if !keepRetrying(backendErr) { break } } return http.StatusBadGateway, backendErr } // match finds the best match for a proxy config based on r. func (p Proxy) match(r *http.Request) Upstream { var u Upstream var longestMatch int for _, upstream := range p.Upstreams { basePath := upstream.From() if !httpserver.Path(r.URL.Path).Matches(basePath) || !upstream.AllowedPath(r.URL.Path) { continue } if len(basePath) > longestMatch { longestMatch = len(basePath) u = upstream } } return u } // createUpstremRequest shallow-copies r into a new request // that can be sent upstream. // // Derived from reverseproxy.go in the standard Go httputil package. func createUpstreamRequest(rw http.ResponseWriter, r *http.Request) (*http.Request, context.CancelFunc) { // Original incoming server request may be canceled by the // user or by std lib(e.g. too many idle connections). ctx, cancel := context.WithCancel(r.Context()) if cn, ok := rw.(http.CloseNotifier); ok { notifyChan := cn.CloseNotify() go func() { select { case <-notifyChan: cancel() case <-ctx.Done(): } }() } outreq := r.WithContext(ctx) // includes shallow copies of maps, but okay // We should set body to nil explicitly if request body is empty. // For server requests the Request Body is always non-nil. if r.ContentLength == 0 { outreq.Body = nil } // We are modifying the same underlying map from req (shallow // copied above) so we only copy it if necessary. copiedHeaders := false // Remove hop-by-hop headers listed in the "Connection" header. // See RFC 2616, section 14.10. if c := outreq.Header.Get("Connection"); c != "" { for _, f := range strings.Split(c, ",") { if f = strings.TrimSpace(f); f != "" { if !copiedHeaders { outreq.Header = make(http.Header) copyHeader(outreq.Header, r.Header) copiedHeaders = true } outreq.Header.Del(f) } } } // Remove hop-by-hop headers to the backend. Especially // important is "Connection" because we want a persistent // connection, regardless of what the client sent to us. for _, h := range hopHeaders { if outreq.Header.Get(h) != "" { if !copiedHeaders { outreq.Header = make(http.Header) copyHeader(outreq.Header, r.Header) copiedHeaders = true } outreq.Header.Del(h) } } if clientIP, _, err := net.SplitHostPort(r.RemoteAddr); err == nil { // If we aren't the first proxy, retain prior // X-Forwarded-For information as a comma+space // separated list and fold multiple headers into one. if prior, ok := outreq.Header["X-Forwarded-For"]; ok { clientIP = strings.Join(prior, ", ") + ", " + clientIP } outreq.Header.Set("X-Forwarded-For", clientIP) } return outreq, cancel } func createRespHeaderUpdateFn(rules http.Header, replacer httpserver.Replacer) respUpdateFn { return func(resp *http.Response) { mutateHeadersByRules(resp.Header, rules, replacer) } } func mutateHeadersByRules(headers, rules http.Header, repl httpserver.Replacer) { for ruleField, ruleValues := range rules { if strings.HasPrefix(ruleField, "+") { for _, ruleValue := range ruleValues { replacement := repl.Replace(ruleValue) if len(replacement) > 0 { headers.Add(strings.TrimPrefix(ruleField, "+"), replacement) } } } else if strings.HasPrefix(ruleField, "-") { headers.Del(strings.TrimPrefix(ruleField, "-")) } else if len(ruleValues) > 0 { replacement := repl.Replace(ruleValues[len(ruleValues)-1]) if len(replacement) > 0 { headers.Set(ruleField, replacement) } } } }
1
12,741
I guess we can start by putting the `const StatusContextCancelled = 499` at the bottom of this file
caddyserver-caddy
go
@@ -100,6 +100,7 @@ func TestPaymentChannelVoucher(t *testing.T) { types.NewChannelID(5), types.NewAttoFILFromFIL(10), types.NewBlockHeight(0), + nil, ) require.NoError(err) assert.Equal(expectedVoucher.Channel, voucher.Channel)
1
package porcelain_test import ( "context" "testing" cbor "github.com/ipfs/go-ipld-cbor" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/filecoin-project/go-filecoin/actor" "github.com/filecoin-project/go-filecoin/actor/builtin/paymentbroker" "github.com/filecoin-project/go-filecoin/address" "github.com/filecoin-project/go-filecoin/porcelain" tf "github.com/filecoin-project/go-filecoin/testhelpers/testflags" "github.com/filecoin-project/go-filecoin/types" ) type testPaymentChannelLsPlumbing struct { require *require.Assertions channels map[string]*paymentbroker.PaymentChannel } func (p *testPaymentChannelLsPlumbing) MessageQuery(ctx context.Context, optFrom, to address.Address, method string, params ...interface{}) ([][]byte, error) { chnls, err := cbor.DumpObject(p.channels) p.require.NoError(err) return [][]byte{chnls}, nil } func (p *testPaymentChannelLsPlumbing) WalletDefaultAddress() (address.Address, error) { return address.Undef, nil } func TestPaymentChannelLs(t *testing.T) { tf.UnitTest(t) t.Run("succeeds", func(t *testing.T) { assert := assert.New(t) require := require.New(t) expectedChannels := map[string]*paymentbroker.PaymentChannel{} plumbing := &testPaymentChannelLsPlumbing{ channels: expectedChannels, require: require, } ctx := context.Background() channels, err := porcelain.PaymentChannelLs(ctx, plumbing, address.Undef, address.Undef) require.NoError(err) assert.Equal(expectedChannels, channels) }) } type testPaymentChannelVoucherPlumbing struct { require *require.Assertions voucher *types.PaymentVoucher } func (p *testPaymentChannelVoucherPlumbing) MessageQuery(ctx context.Context, optFrom, to address.Address, method string, params ...interface{}) ([][]byte, error) { result, err := actor.MarshalStorage(p.voucher) p.require.NoError(err) return [][]byte{result}, nil } func (p *testPaymentChannelVoucherPlumbing) SignBytes(data []byte, addr address.Address) (types.Signature, error) { return []byte("test"), nil } func (p *testPaymentChannelVoucherPlumbing) WalletDefaultAddress() (address.Address, error) { return address.Undef, nil } func TestPaymentChannelVoucher(t *testing.T) { tf.UnitTest(t) t.Run("succeeds", func(t *testing.T) { assert := assert.New(t) require := require.New(t) expectedVoucher := &types.PaymentVoucher{ Channel: *types.NewChannelID(5), Payer: address.Undef, Target: address.Undef, Amount: *types.NewAttoFILFromFIL(10), ValidAt: *types.NewBlockHeight(0), Signature: []byte{}, } plumbing := &testPaymentChannelVoucherPlumbing{ require: require, voucher: expectedVoucher, } ctx := context.Background() voucher, err := porcelain.PaymentChannelVoucher( ctx, plumbing, address.Undef, types.NewChannelID(5), types.NewAttoFILFromFIL(10), types.NewBlockHeight(0), ) require.NoError(err) assert.Equal(expectedVoucher.Channel, voucher.Channel) assert.Equal(expectedVoucher.Payer, voucher.Payer) assert.Equal(expectedVoucher.Target, voucher.Target) assert.Equal(expectedVoucher.Amount, voucher.Amount) assert.Equal(expectedVoucher.ValidAt, voucher.ValidAt) assert.NotEqual(expectedVoucher.Signature, voucher.Signature) }) }
1
18,741
Blocking: populate a non-empty value and check it below.
filecoin-project-venus
go
@@ -194,7 +194,14 @@ public class Permission { // Users with this permission can upload projects when the property "lockdown.upload.projects" // is turned on UPLOADPROJECTS(0x0008000), - ADMIN(0x8000000); + ADMIN(0x8000000), + // Permissions for image management APIs. + CREATE(0x0000100), + GET(0x0000200), + UPDATE(0x0000300), + DELETE(0x0000400), + IMAGE_TYPE_ADD_MEMBER(0x0000500), + IMAGE_TYPE_DELETE_MEMBER(0x0000600); private final int numVal;
1
/* * Copyright 2012 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.user; import azkaban.utils.Utils; import java.util.ArrayList; import java.util.Collection; import java.util.HashSet; import java.util.Set; public class Permission { private final Set<Type> permissions = new HashSet<>(); public Permission() { } public Permission(final int flags) { setPermissions(flags); } public Permission(final Type... list) { addPermission(list); } public void addPermissions(final Permission perm) { this.permissions.addAll(perm.getTypes()); } public void setPermission(final Type type, final boolean set) { if (set) { addPermission(type); } else { removePermissions(type); } } public void setPermissions(final int flags) { this.permissions.clear(); if ((flags & Type.ADMIN.getFlag()) != 0) { addPermission(Type.ADMIN); } else { for (final Type type : Type.values()) { if ((flags & type.getFlag()) != 0) { addPermission(type); } } } } public void addPermission(final Type... list) { // Admin is all encompassing permission. No need to add other types if (!this.permissions.contains(Type.ADMIN)) { for (final Type perm : list) { this.permissions.add(perm); } // We add everything, and if there's Admin left, we make sure that only // Admin is remaining. if (this.permissions.contains(Type.ADMIN)) { this.permissions.clear(); this.permissions.add(Type.ADMIN); } } } public void addPermissionsByName(final String... list) { for (final String perm : list) { final Type type = Type.valueOf(perm); if (type != null) { addPermission(type); } } } public void addPermissions(final Collection<Type> list) { for (final Type perm : list) { addPermission(perm); } } public void addPermissionsByName(final Collection<String> list) { for (final String perm : list) { final Type type = Type.valueOf(perm); if (type != null) { addPermission(type); } } } public Set<Type> getTypes() { return this.permissions; } public void removePermissions(final Type... list) { for (final Type perm : list) { this.permissions.remove(perm); } } public void removePermissionsByName(final String... list) { for (final String perm : list) { final Type type = Type.valueOf(perm); if (type != null) { this.permissions.remove(type); } } } public boolean isPermissionSet(final Type permission) { return this.permissions.contains(permission); } public boolean isPermissionNameSet(final String permission) { return this.permissions.contains(Type.valueOf(permission)); } public String[] toStringArray() { final ArrayList<String> list = new ArrayList<>(); int count = 0; for (final Type type : this.permissions) { list.add(type.toString()); count++; } return list.toArray(new String[count]); } @Override public String toString() { return Utils.flattenToString(this.permissions, ","); } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + ((this.permissions == null) ? 0 : this.permissions.hashCode()); return result; } @Override public boolean equals(final Object obj) { if (this == obj) { return true; } if (obj == null) { return false; } if (getClass() != obj.getClass()) { return false; } final Permission other = (Permission) obj; if (this.permissions == null) { if (other.permissions != null) { return false; } } else if (!this.permissions.equals(other.permissions)) { return false; } return true; } public int toFlags() { int flag = 0; for (final Type type : this.permissions) { flag |= type.getFlag(); } return flag; } public enum Type { READ(0x0000001), WRITE(0x0000002), EXECUTE(0x0000004), SCHEDULE(0x0000008), METRICS(0x0000010), CREATEPROJECTS(0x40000000), // Only used for roles // Users with this permission can upload projects when the property "lockdown.upload.projects" // is turned on UPLOADPROJECTS(0x0008000), ADMIN(0x8000000); private final int numVal; Type(final int numVal) { this.numVal = numVal; } public int getFlag() { return this.numVal; } } }
1
20,741
What is this number value for? How is it used?
azkaban-azkaban
java
@@ -192,6 +192,11 @@ describe Travis::Build::Script::R, :sexp do assert: true, echo: true, retry: true, timing: true] end + it 'does BiocCheck if requested' do + data[:config][:bioc_check] = true + should include_sexp [:cmd, /.*BiocCheck.*/, + assert: true, echo: true, timing: true] + it 'does install bioc with bioc_packages' do data[:config][:bioc_packages] = ['GenomicFeatures'] should include_sexp [:cmd, /.*biocLite.*/,
1
require 'spec_helper' describe Travis::Build::Script::R, :sexp do let (:data) { payload_for(:push, :r) } let (:script) { described_class.new(data) } subject { script.sexp } it { store_example } it_behaves_like 'a build script sexp' it 'normalizes bioc-devel correctly' do pending('known to fail with certain random seeds (incl 58438)') fail data[:config][:r] = 'bioc-devel' should include_sexp [:export, ['TRAVIS_R_VERSION', 'devel']] should include_sexp [:cmd, %r{source\(\"https://bioconductor.org/biocLite.R\"\)}, assert: true, echo: true, timing: true, retry: true] should include_sexp [:cmd, %r{useDevel\(TRUE\)}, assert: true, echo: true, timing: true, retry: true] end it 'normalizes bioc-release correctly' do pending('known to fail with certain random seeds (incl 58438)') fail data[:config][:r] = 'bioc-release' should include_sexp [:cmd, %r{source\(\"https://bioconductor.org/biocLite.R\"\)}, assert: true, echo: true, timing: true, retry: true] should include_sexp [:export, ['TRAVIS_R_VERSION', '3.5.0']] end it 'r_packages works with a single package set' do data[:config][:r_packages] = 'test' should include_sexp [:cmd, %r{install\.packages\(c\(\"test\"\)\)}, assert: true, echo: true, timing: true] end it 'r_packages works with multiple packages set' do data[:config][:r_packages] = ['test', 'test2'] should include_sexp [:cmd, %r{install\.packages\(c\(\"test\", \"test2\"\)\)}, assert: true, echo: true, timing: true] end it 'exports TRAVIS_R_VERSION' do data[:config][:r] = '3.3.0' should include_sexp [:export, ['TRAVIS_R_VERSION', '3.3.0']] end context "when R version is given as an array" do it 'uses the first value' do data[:config][:r] = %w(3.3.0) should include_sexp [:export, ['TRAVIS_R_VERSION', '3.3.0']] end end it 'downloads and installs latest R' do should include_sexp [:cmd, %r{^curl.*https://s3\.amazonaws\.com/rstudio-travis/R-3\.5\.0-\$\(lsb_release -cs\)\.xz}, assert: true, echo: true, retry: true, timing: true] end it 'downloads and installs latest R on OS X' do data[:config][:os] = 'osx' should include_sexp [:cmd, %r{^curl.*bin/macosx/R-latest\.pkg}, assert: true, echo: true, retry: true, timing: true] end it 'downloads and installs aliased R 3.2.5 on OS X' do data[:config][:os] = 'osx' data[:config][:r] = '3.2.5' should include_sexp [:cmd, %r{^curl.*bin/macosx/old/R-3\.2\.4-revised\.pkg}, assert: true, echo: true, retry: true, timing: true] end it 'downloads and installs other R versions on OS X' do data[:config][:os] = 'osx' data[:config][:r] = '3.1.3' should include_sexp [:cmd, %r{^curl.*bin/macosx/old/R-3\.1\.3\.pkg}, assert: true, echo: true, retry: true, timing: true] end it 'downloads and installs R devel on OS X' do data[:config][:os] = 'osx' data[:config][:r] = 'devel' should include_sexp [:cmd, %r{^curl.*r\.research\.att\.com/el-capitan/R-devel/R-devel-el-capitan-signed\.pkg}, assert: true, echo: true, retry: true, timing: true] end it 'downloads and installs gfortran libraries on OS X' do data[:config][:os] = 'osx' data[:config][:r] = 'oldrel' data[:config][:fortran] = true should include_sexp [:cmd, %r{^curl.*#{Regexp.escape('/tmp/gfortran.tar.bz2 http://r.research.att.com/libs/gfortran-4.8.2-darwin13.tar.bz2')}}, assert: true, echo: true, retry: true, timing: true] end it 'downloads and installs Coudert gfortran on OS X for R 3.4' do data[:config][:os] = 'osx' data[:config][:r] = 'release' data[:config][:fortran] = true should include_sexp [:cmd, %r{^curl.*#{Regexp.escape('/tmp/gfortran61.dmg http://coudert.name/software/gfortran-6.1-ElCapitan.dmg')}}, assert: true, echo: true, retry: true, timing: true] end it 'downloads and installs R 3.1' do data[:config][:r] = '3.1' should include_sexp [:cmd, %r{^curl.*https://s3\.amazonaws\.com/rstudio-travis/R-3\.1\.3-\$\(lsb_release -cs\)\.xz}, assert: true, echo: true, retry: true, timing: true] end it 'downloads and installs R 3.2' do data[:config][:r] = '3.2' should include_sexp [:cmd, %r{^curl.*https://s3\.amazonaws\.com/rstudio-travis/R-3\.2\.5-\$\(lsb_release -cs\)\.xz}, assert: true, echo: true, retry: true, timing: true] end it 'downloads and installs R devel' do data[:config][:r] = 'devel' should include_sexp [:cmd, %r{^curl.*https://s3\.amazonaws\.com/rstudio-travis/R-devel-\$\(lsb_release -cs\)\.xz}, assert: true, echo: true, retry: true, timing: true] end it 'downloads pandoc and installs into /usr/bin/pandoc' do data[:config][:pandoc_version] = '2.2' should include_sexp [:cmd, %r{curl.*/tmp/pandoc-2\.2-1-amd64\.deb https://github\.com/jgm/pandoc/releases/download/2\.2/pandoc-2\.2-1-amd64\.deb}, assert: true, echo: true, timing: true] should include_sexp [:cmd, %r{sudo dpkg -i /tmp/pandoc-}, assert: true, echo: true, timing: true] end it 'downloads pandoc <= 1.19.2.1 on OS X' do data[:config][:pandoc_version] = '1.19.2.1' data[:config][:os] = 'osx' should include_sexp [:cmd, %r{curl.*/tmp/pandoc-1\.19\.2\.1-osx\.pkg https://github\.com/jgm/pandoc/releases/download/1\.19\.2\.1/pandoc-1\.19\.2\.1-osx\.pkg}, assert: true, echo: true, timing: true] end it 'sets repos in ~/.Rprofile.site with defaults' do data[:config][:cran] = 'https://cloud.r-project.org' should include_sexp [:cmd, "echo 'options(repos = c(CRAN = \"https://cloud.r-project.org\"))' > ~/.Rprofile.site", assert: true, echo: true, timing: true] end it 'sets repos in ~/.Rprofile.site with user specified repos' do data[:config][:cran] = 'https://cran.rstudio.org' should include_sexp [:cmd, "echo 'options(repos = c(CRAN = \"https://cran.rstudio.org\"))' > ~/.Rprofile.site", assert: true, echo: true, timing: true] end it 'sets repos in ~/.Rprofile.site with additional user specified repos' do data[:config][:repos] = {CRAN: 'https://cran.rstudio.org', ropensci: 'http://packages.ropensci.org'} should include_sexp [:cmd, "echo 'options(repos = c(CRAN = \"https://cran.rstudio.org\", ropensci = \"http://packages.ropensci.org\"))' > ~/.Rprofile.site", assert: true, echo: true, timing: true] end it 'installs source devtools' do should include_sexp [:cmd, /Rscript -e 'install\.packages\(c\(\"devtools\"\)/, assert: true, echo: true, timing: true] should_not include_sexp [:cmd, /sudo apt-get install.*r-cran-devtools/, assert: true, echo: true, timing: true, retry: true] end it 'installs source devtools if sudo: false' do data[:config][:sudo] = false should include_sexp [:cmd, /Rscript -e 'install\.packages\(c\(\"devtools\"\)/, assert: true, echo: true, timing: true] should_not include_sexp [:cmd, /sudo apt-get install.*r-cran-devtools/, assert: true, echo: true, timing: true, retry: true] end it 'fails on package build and test failures' do should include_sexp [:cmd, /.*R CMD build.*/, assert: true, echo: true, timing: true] should include_sexp [:cmd, /.*R CMD check.*/, echo: true, timing: true] end it 'skips PDF manual when LaTeX is disabled' do data[:config][:latex] = false should include_sexp [:cmd, /.*R CMD check.* --no-manual.*/, echo: true, timing: true] end describe 'bioc configuration is optional' do it 'does not install bioc if not required' do should_not include_sexp [:cmd, /.*biocLite.*/, assert: true, echo: true, retry: true, timing: true] end it 'does install bioc if requested' do data[:config][:bioc_required] = true should include_sexp [:cmd, /.*biocLite.*/, assert: true, echo: true, retry: true, timing: true] end it 'does install bioc with bioc_packages' do data[:config][:bioc_packages] = ['GenomicFeatures'] should include_sexp [:cmd, /.*biocLite.*/, assert: true, echo: true, retry: true, timing: true] end it 'Prints installed package versions' do should include_sexp [:cmd, /.*#{Regexp.escape('devtools::session_info(installed.packages()[, "Package"])')}.*/, assert: true, echo: true, timing: true] end end describe '#cache_slug' do subject { described_class.new(data).cache_slug } it { data[:config][:r] = '3.3.0' should eq("cache-#{CACHE_SLUG_EXTRAS}--R-3.3.0") } it { data[:config][:r] = '3.2' should eq("cache-#{CACHE_SLUG_EXTRAS}--R-3.2.5") } it { data[:config][:r] = 'release' should eq("cache-#{CACHE_SLUG_EXTRAS}--R-3.5.0") } it { data[:config][:r] = 'oldrel' should eq("cache-#{CACHE_SLUG_EXTRAS}--R-3.3.3") } it { data[:config][:r] = '3.1' should eq("cache-#{CACHE_SLUG_EXTRAS}--R-3.1.3") } it { data[:config][:r] = 'devel' should eq("cache-#{CACHE_SLUG_EXTRAS}--R-devel") } end end
1
16,119
This needs an end keyword
travis-ci-travis-build
rb
@@ -0,0 +1,17 @@ +// <copyright file="DependencyTelemetryData.cs" company="Datadog"> +// Unless explicitly stated otherwise all files in this repository are licensed under the Apache 2 License. +// This product includes software developed at Datadog (https://www.datadoghq.com/). Copyright 2017 Datadog, Inc. +// </copyright> + +namespace Datadog.Trace.Telemetry +{ + /// <summary> + /// Using a record as used as dictionary key so getting equality comparison for free + /// </summary> + internal record DependencyTelemetryData + { + public string Name { get; set; } + + public string Version { get; set; } + } +}
1
1
25,725
I think this is the first record type in Datadog.Trace!
DataDog-dd-trace-dotnet
.cs
@@ -737,10 +737,7 @@ namespace Microsoft.AspNetCore.Server.Kestrel.Core.Internal.Http { return Task.CompletedTask; } - else - { - return FireOnCompletedAwaited(onCompleted); - } + return FireOnCompletedAwaited(onCompleted); } private async Task FireOnCompletedAwaited(Stack<KeyValuePair<Func<object, Task>, object>> onCompleted)
1
// Copyright (c) .NET Foundation. All rights reserved. // Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information. using System; using System.Buffers; using System.Collections.Generic; using System.Diagnostics; using System.IO; using System.IO.Pipelines; using System.Linq; using System.Net; using System.Runtime.CompilerServices; using System.Text; using System.Threading; using System.Threading.Tasks; using Microsoft.AspNetCore.Hosting.Server; using Microsoft.AspNetCore.Http; using Microsoft.AspNetCore.Http.Features; using Microsoft.AspNetCore.Protocols; using Microsoft.AspNetCore.Server.Kestrel.Core.Internal.Infrastructure; using Microsoft.Extensions.Logging; using Microsoft.Extensions.Primitives; // ReSharper disable AccessToModifiedClosure namespace Microsoft.AspNetCore.Server.Kestrel.Core.Internal.Http { public abstract partial class HttpProtocol : IHttpResponseControl { private static readonly byte[] _bytesConnectionClose = Encoding.ASCII.GetBytes("\r\nConnection: close"); private static readonly byte[] _bytesConnectionKeepAlive = Encoding.ASCII.GetBytes("\r\nConnection: keep-alive"); private static readonly byte[] _bytesTransferEncodingChunked = Encoding.ASCII.GetBytes("\r\nTransfer-Encoding: chunked"); private static readonly byte[] _bytesServer = Encoding.ASCII.GetBytes("\r\nServer: " + Constants.ServerName); private static readonly Action<PipeWriter, ReadOnlyMemory<byte>> _writeChunk = WriteChunk; private readonly object _onStartingSync = new Object(); private readonly object _onCompletedSync = new Object(); protected Streams _streams; protected Stack<KeyValuePair<Func<object, Task>, object>> _onStarting; protected Stack<KeyValuePair<Func<object, Task>, object>> _onCompleted; protected volatile int _requestAborted; protected CancellationTokenSource _abortedCts; private CancellationToken? _manuallySetRequestAbortToken; protected RequestProcessingStatus _requestProcessingStatus; protected volatile bool _keepAlive; // volatile, see: https://msdn.microsoft.com/en-us/library/x13ttww7.aspx protected bool _upgradeAvailable; private bool _canHaveBody; private bool _autoChunk; protected Exception _applicationException; private BadHttpRequestException _requestRejectedException; protected HttpVersion _httpVersion; private string _requestId; protected int _requestHeadersParsed; protected long _responseBytesWritten; private readonly IHttpProtocolContext _context; private string _scheme = null; public HttpProtocol(IHttpProtocolContext context) { _context = context; ServerOptions = ServiceContext.ServerOptions; HttpResponseControl = this; RequestBodyPipe = CreateRequestBodyPipe(); } public IHttpResponseControl HttpResponseControl { get; set; } public Pipe RequestBodyPipe { get; } public ServiceContext ServiceContext => _context.ServiceContext; private IPEndPoint LocalEndPoint => _context.LocalEndPoint; private IPEndPoint RemoteEndPoint => _context.RemoteEndPoint; public IFeatureCollection ConnectionFeatures => _context.ConnectionFeatures; public IHttpOutputProducer Output { get; protected set; } protected IKestrelTrace Log => ServiceContext.Log; private DateHeaderValueManager DateHeaderValueManager => ServiceContext.DateHeaderValueManager; // Hold direct reference to ServerOptions since this is used very often in the request processing path protected KestrelServerOptions ServerOptions { get; } protected string ConnectionId => _context.ConnectionId; public string ConnectionIdFeature { get; set; } public bool HasStartedConsumingRequestBody { get; set; } public long? MaxRequestBodySize { get; set; } public bool AllowSynchronousIO { get; set; } /// <summary> /// The request id. <seealso cref="HttpContext.TraceIdentifier"/> /// </summary> public string TraceIdentifier { set => _requestId = value; get { // don't generate an ID until it is requested if (_requestId == null) { _requestId = CreateRequestId(); } return _requestId; } } public abstract bool IsUpgradableRequest { get; } public bool IsUpgraded { get; set; } public IPAddress RemoteIpAddress { get; set; } public int RemotePort { get; set; } public IPAddress LocalIpAddress { get; set; } public int LocalPort { get; set; } public string Scheme { get; set; } public string Method { get; set; } public string PathBase { get; set; } public string Path { get; set; } public string QueryString { get; set; } public string RawTarget { get; set; } public string HttpVersion { get { if (_httpVersion == Http.HttpVersion.Http11) { return HttpUtilities.Http11Version; } if (_httpVersion == Http.HttpVersion.Http10) { return HttpUtilities.Http10Version; } if (_httpVersion == Http.HttpVersion.Http2) { return HttpUtilities.Http2Version; } return string.Empty; } [MethodImpl(MethodImplOptions.AggressiveInlining)] set { // GetKnownVersion returns versions which ReferenceEquals interned string // As most common path, check for this only in fast-path and inline if (ReferenceEquals(value, HttpUtilities.Http11Version)) { _httpVersion = Http.HttpVersion.Http11; } else if (ReferenceEquals(value, HttpUtilities.Http10Version)) { _httpVersion = Http.HttpVersion.Http10; } else if (ReferenceEquals(value, HttpUtilities.Http2Version)) { _httpVersion = Http.HttpVersion.Http2; } else { HttpVersionSetSlow(value); } } } [MethodImpl(MethodImplOptions.NoInlining)] private void HttpVersionSetSlow(string value) { if (value == HttpUtilities.Http11Version) { _httpVersion = Http.HttpVersion.Http11; } else if (value == HttpUtilities.Http10Version) { _httpVersion = Http.HttpVersion.Http10; } else if (value == HttpUtilities.Http2Version) { _httpVersion = Http.HttpVersion.Http2; } else { _httpVersion = Http.HttpVersion.Unknown; } } public IHeaderDictionary RequestHeaders { get; set; } public Stream RequestBody { get; set; } private int _statusCode; public int StatusCode { get => _statusCode; set { if (HasResponseStarted) { ThrowResponseAlreadyStartedException(nameof(StatusCode)); } _statusCode = value; } } private string _reasonPhrase; public string ReasonPhrase { get => _reasonPhrase; set { if (HasResponseStarted) { ThrowResponseAlreadyStartedException(nameof(ReasonPhrase)); } _reasonPhrase = value; } } public IHeaderDictionary ResponseHeaders { get; set; } public Stream ResponseBody { get; set; } public CancellationToken RequestAborted { get { // If a request abort token was previously explicitly set, return it. if (_manuallySetRequestAbortToken.HasValue) { return _manuallySetRequestAbortToken.Value; } // Otherwise, get the abort CTS. If we have one, which would mean that someone previously // asked for the RequestAborted token, simply return its token. If we don't, // check to see whether we've already aborted, in which case just return an // already canceled token. Finally, force a source into existence if we still // don't have one, and return its token. var cts = _abortedCts; return cts != null ? cts.Token : (_requestAborted == 1) ? new CancellationToken(true) : RequestAbortedSource.Token; } set { // Set an abort token, overriding one we create internally. This setter and associated // field exist purely to support IHttpRequestLifetimeFeature.set_RequestAborted. _manuallySetRequestAbortToken = value; } } private CancellationTokenSource RequestAbortedSource { get { // Get the abort token, lazily-initializing it if necessary. // Make sure it's canceled if an abort request already came in. // EnsureInitialized can return null since _abortedCts is reset to null // after it's already been initialized to a non-null value. // If EnsureInitialized does return null, this property was accessed between // requests so it's safe to return an ephemeral CancellationTokenSource. var cts = LazyInitializer.EnsureInitialized(ref _abortedCts, () => new CancellationTokenSource()) ?? new CancellationTokenSource(); if (_requestAborted == 1) { cts.Cancel(); } return cts; } } public bool HasResponseStarted => _requestProcessingStatus == RequestProcessingStatus.ResponseStarted; protected HttpRequestHeaders HttpRequestHeaders { get; } = new HttpRequestHeaders(); protected HttpResponseHeaders HttpResponseHeaders { get; } = new HttpResponseHeaders(); public MinDataRate MinRequestBodyDataRate { get; set; } public MinDataRate MinResponseDataRate { get; set; } public void InitializeStreams(MessageBody messageBody) { if (_streams == null) { _streams = new Streams(bodyControl: this, httpResponseControl: this); } (RequestBody, ResponseBody) = _streams.Start(messageBody); } public void PauseStreams() => _streams.Pause(); public void StopStreams() => _streams.Stop(); // For testing internal void ResetState() { _requestProcessingStatus = RequestProcessingStatus.RequestPending; } public void Reset() { _onStarting = null; _onCompleted = null; _requestProcessingStatus = RequestProcessingStatus.RequestPending; _autoChunk = false; _applicationException = null; _requestRejectedException = null; ResetFeatureCollection(); HasStartedConsumingRequestBody = false; MaxRequestBodySize = ServerOptions.Limits.MaxRequestBodySize; AllowSynchronousIO = ServerOptions.AllowSynchronousIO; TraceIdentifier = null; Method = null; PathBase = null; Path = null; RawTarget = null; QueryString = null; _httpVersion = Http.HttpVersion.Unknown; _statusCode = StatusCodes.Status200OK; _reasonPhrase = null; RemoteIpAddress = RemoteEndPoint?.Address; RemotePort = RemoteEndPoint?.Port ?? 0; LocalIpAddress = LocalEndPoint?.Address; LocalPort = LocalEndPoint?.Port ?? 0; ConnectionIdFeature = ConnectionId; HttpRequestHeaders.Reset(); HttpResponseHeaders.Reset(); RequestHeaders = HttpRequestHeaders; ResponseHeaders = HttpResponseHeaders; if (_scheme == null) { var tlsFeature = ConnectionFeatures?[typeof(ITlsConnectionFeature)]; _scheme = tlsFeature != null ? "https" : "http"; } Scheme = _scheme; _manuallySetRequestAbortToken = null; _abortedCts = null; // Allow two bytes for \r\n after headers _requestHeadersParsed = 0; _responseBytesWritten = 0; MinRequestBodyDataRate = ServerOptions.Limits.MinRequestBodyDataRate; MinResponseDataRate = ServerOptions.Limits.MinResponseDataRate; OnReset(); } protected abstract void OnReset(); protected virtual void OnRequestProcessingEnding() { } protected virtual void OnRequestProcessingEnded() { } protected virtual void BeginRequestProcessing() { } protected virtual bool BeginRead(out ValueAwaiter<ReadResult> awaitable) { awaitable = default; return false; } protected abstract string CreateRequestId(); protected abstract MessageBody CreateMessageBody(); protected abstract bool TryParseRequest(ReadResult result, out bool endConnection); private void CancelRequestAbortedToken() { try { RequestAbortedSource.Cancel(); _abortedCts = null; } catch (Exception ex) { Log.ApplicationError(ConnectionId, TraceIdentifier, ex); } } /// <summary> /// Immediate kill the connection and poison the request and response streams. /// </summary> public void Abort(Exception error) { if (Interlocked.Exchange(ref _requestAborted, 1) == 0) { _keepAlive = false; _streams?.Abort(error); Output.Abort(error); // Potentially calling user code. CancelRequestAbortedToken logs any exceptions. ServiceContext.ThreadPool.UnsafeRun(state => ((HttpProtocol)state).CancelRequestAbortedToken(), this); } } public void OnHeader(Span<byte> name, Span<byte> value) { _requestHeadersParsed++; if (_requestHeadersParsed > ServerOptions.Limits.MaxRequestHeaderCount) { ThrowRequestRejected(RequestRejectionReason.TooManyHeaders); } var valueString = value.GetAsciiStringNonNullCharacters(); HttpRequestHeaders.Append(name, valueString); } public async Task ProcessRequestsAsync<TContext>(IHttpApplication<TContext> application) { try { await ProcessRequests(application); } catch (BadHttpRequestException ex) { // Handle BadHttpRequestException thrown during request line or header parsing. // SetBadRequestState logs the error. SetBadRequestState(ex); } catch (ConnectionResetException ex) { // Don't log ECONNRESET errors made between requests. Browsers like IE will reset connections regularly. if (_requestProcessingStatus != RequestProcessingStatus.RequestPending) { Log.RequestProcessingError(ConnectionId, ex); } } catch (IOException ex) { Log.RequestProcessingError(ConnectionId, ex); } catch (Exception ex) { Log.LogWarning(0, ex, CoreStrings.RequestProcessingEndError); } finally { try { OnRequestProcessingEnding(); await TryProduceInvalidRequestResponse(); Output.Dispose(); } catch (Exception ex) { Log.LogWarning(0, ex, CoreStrings.ConnectionShutdownError); } finally { OnRequestProcessingEnded(); } } } private async Task ProcessRequests<TContext>(IHttpApplication<TContext> application) { // Keep-alive is default for HTTP/1.1 and HTTP/2; parsing and errors will change its value _keepAlive = true; do { BeginRequestProcessing(); var result = default(ReadResult); var endConnection = false; do { if (BeginRead(out var awaitable)) { result = await awaitable; } } while (!TryParseRequest(result, out endConnection)); if (endConnection) { // Connection finished, stop processing requests return; } var messageBody = CreateMessageBody(); if (!messageBody.RequestKeepAlive) { _keepAlive = false; } _upgradeAvailable = messageBody.RequestUpgrade; InitializeStreams(messageBody); var httpContext = application.CreateContext(this); BadHttpRequestException badRequestException = null; try { KestrelEventSource.Log.RequestStart(this); // Run the application code for this request await application.ProcessRequestAsync(httpContext); if (_requestAborted == 0) { VerifyResponseContentLength(); } } catch (Exception ex) { ReportApplicationError(ex); // Capture BadHttpRequestException for further processing badRequestException = ex as BadHttpRequestException; } KestrelEventSource.Log.RequestStop(this); // Trigger OnStarting if it hasn't been called yet and the app hasn't // already failed. If an OnStarting callback throws we can go through // our normal error handling in ProduceEnd. // https://github.com/aspnet/KestrelHttpServer/issues/43 if (!HasResponseStarted && _applicationException == null && _onStarting != null) { await FireOnStarting(); } PauseStreams(); if (_onCompleted != null) { await FireOnCompleted(); } if (badRequestException == null) { // If _requestAbort is set, the connection has already been closed. if (_requestAborted == 0) { // Call ProduceEnd() before consuming the rest of the request body to prevent // delaying clients waiting for the chunk terminator: // // https://github.com/dotnet/corefx/issues/17330#issuecomment-288248663 // // This also prevents the 100 Continue response from being sent if the app // never tried to read the body. // https://github.com/aspnet/KestrelHttpServer/issues/2102 // // ProduceEnd() must be called before _application.DisposeContext(), to ensure // HttpContext.Response.StatusCode is correctly set when // IHttpContextFactory.Dispose(HttpContext) is called. await ProduceEnd(); // ForZeroContentLength does not complete the reader nor the writer if (!messageBody.IsEmpty) { try { // Finish reading the request body in case the app did not. await messageBody.ConsumeAsync(); } catch (BadHttpRequestException ex) { // Capture BadHttpRequestException for further processing badRequestException = ex; } } } else if (!HasResponseStarted) { // If the request was aborted and no response was sent, there's no // meaningful status code to log. StatusCode = 0; } } if (badRequestException != null) { // Handle BadHttpRequestException thrown during app execution or remaining message body consumption. // This has to be caught here so StatusCode is set properly before disposing the HttpContext // (DisposeContext logs StatusCode). SetBadRequestState(badRequestException); } application.DisposeContext(httpContext, _applicationException); // StopStreams should be called before the end of the "if (!_requestProcessingStopping)" block // to ensure InitializeStreams has been called. StopStreams(); if (HasStartedConsumingRequestBody) { RequestBodyPipe.Reader.Complete(); // Wait for MessageBody.PumpAsync() to call RequestBodyPipe.Writer.Complete(). await messageBody.StopAsync(); // At this point both the request body pipe reader and writer should be completed. RequestBodyPipe.Reset(); } if (badRequestException != null) { // Bad request reported, stop processing requests return; } } while (_keepAlive); } public void OnStarting(Func<object, Task> callback, object state) { lock (_onStartingSync) { if (HasResponseStarted) { ThrowResponseAlreadyStartedException(nameof(OnStarting)); } if (_onStarting == null) { _onStarting = new Stack<KeyValuePair<Func<object, Task>, object>>(); } _onStarting.Push(new KeyValuePair<Func<object, Task>, object>(callback, state)); } } public void OnCompleted(Func<object, Task> callback, object state) { lock (_onCompletedSync) { if (_onCompleted == null) { _onCompleted = new Stack<KeyValuePair<Func<object, Task>, object>>(); } _onCompleted.Push(new KeyValuePair<Func<object, Task>, object>(callback, state)); } } protected Task FireOnStarting() { Stack<KeyValuePair<Func<object, Task>, object>> onStarting; lock (_onStartingSync) { onStarting = _onStarting; _onStarting = null; } if (onStarting == null) { return Task.CompletedTask; } else { return FireOnStartingMayAwait(onStarting); } } private Task FireOnStartingMayAwait(Stack<KeyValuePair<Func<object, Task>, object>> onStarting) { try { var count = onStarting.Count; for (var i = 0; i < count; i++) { var entry = onStarting.Pop(); var task = entry.Key.Invoke(entry.Value); if (!ReferenceEquals(task, Task.CompletedTask)) { return FireOnStartingAwaited(task, onStarting); } } } catch (Exception ex) { ReportApplicationError(ex); } return Task.CompletedTask; } private async Task FireOnStartingAwaited(Task currentTask, Stack<KeyValuePair<Func<object, Task>, object>> onStarting) { try { await currentTask; var count = onStarting.Count; for (var i = 0; i < count; i++) { var entry = onStarting.Pop(); await entry.Key.Invoke(entry.Value); } } catch (Exception ex) { ReportApplicationError(ex); } } protected Task FireOnCompleted() { Stack<KeyValuePair<Func<object, Task>, object>> onCompleted; lock (_onCompletedSync) { onCompleted = _onCompleted; _onCompleted = null; } if (onCompleted == null) { return Task.CompletedTask; } else { return FireOnCompletedAwaited(onCompleted); } } private async Task FireOnCompletedAwaited(Stack<KeyValuePair<Func<object, Task>, object>> onCompleted) { foreach (var entry in onCompleted) { try { await entry.Key.Invoke(entry.Value); } catch (Exception ex) { ReportApplicationError(ex); } } } public Task FlushAsync(CancellationToken cancellationToken = default(CancellationToken)) { if (!HasResponseStarted) { var initializeTask = InitializeResponseAsync(0); // If return is Task.CompletedTask no awaiting is required if (!ReferenceEquals(initializeTask, Task.CompletedTask)) { return FlushAsyncAwaited(initializeTask, cancellationToken); } } return Output.FlushAsync(cancellationToken); } [MethodImpl(MethodImplOptions.NoInlining)] private async Task FlushAsyncAwaited(Task initializeTask, CancellationToken cancellationToken) { await initializeTask; await Output.FlushAsync(cancellationToken); } public Task WriteAsync(ReadOnlyMemory<byte> data, CancellationToken cancellationToken = default(CancellationToken)) { // For the first write, ensure headers are flushed if WriteDataAsync isn't called. var firstWrite = !HasResponseStarted; if (firstWrite) { var initializeTask = InitializeResponseAsync(data.Length); // If return is Task.CompletedTask no awaiting is required if (!ReferenceEquals(initializeTask, Task.CompletedTask)) { return WriteAsyncAwaited(initializeTask, data, cancellationToken); } } else { VerifyAndUpdateWrite(data.Length); } if (_canHaveBody) { if (_autoChunk) { if (data.Length == 0) { return !firstWrite ? Task.CompletedTask : FlushAsync(cancellationToken); } return WriteChunkedAsync(data, cancellationToken); } else { CheckLastWrite(); return Output.WriteDataAsync(data.Span, cancellationToken: cancellationToken); } } else { HandleNonBodyResponseWrite(); return !firstWrite ? Task.CompletedTask : FlushAsync(cancellationToken); } } public async Task WriteAsyncAwaited(Task initializeTask, ReadOnlyMemory<byte> data, CancellationToken cancellationToken) { await initializeTask; // WriteAsyncAwaited is only called for the first write to the body. // Ensure headers are flushed if Write(Chunked)Async isn't called. if (_canHaveBody) { if (_autoChunk) { if (data.Length == 0) { await FlushAsync(cancellationToken); return; } await WriteChunkedAsync(data, cancellationToken); } else { CheckLastWrite(); await Output.WriteDataAsync(data.Span, cancellationToken: cancellationToken); } } else { HandleNonBodyResponseWrite(); await FlushAsync(cancellationToken); } } private void VerifyAndUpdateWrite(int count) { var responseHeaders = HttpResponseHeaders; if (responseHeaders != null && !responseHeaders.HasTransferEncoding && responseHeaders.ContentLength.HasValue && _responseBytesWritten + count > responseHeaders.ContentLength.Value) { _keepAlive = false; throw new InvalidOperationException( CoreStrings.FormatTooManyBytesWritten(_responseBytesWritten + count, responseHeaders.ContentLength.Value)); } _responseBytesWritten += count; } private void CheckLastWrite() { var responseHeaders = HttpResponseHeaders; // Prevent firing request aborted token if this is the last write, to avoid // aborting the request if the app is still running when the client receives // the final bytes of the response and gracefully closes the connection. // // Called after VerifyAndUpdateWrite(), so _responseBytesWritten has already been updated. if (responseHeaders != null && !responseHeaders.HasTransferEncoding && responseHeaders.ContentLength.HasValue && _responseBytesWritten == responseHeaders.ContentLength.Value) { _abortedCts = null; } } protected void VerifyResponseContentLength() { var responseHeaders = HttpResponseHeaders; if (!HttpMethods.IsHead(Method) && StatusCode != StatusCodes.Status304NotModified && !responseHeaders.HasTransferEncoding && responseHeaders.ContentLength.HasValue && _responseBytesWritten < responseHeaders.ContentLength.Value) { // We need to close the connection if any bytes were written since the client // cannot be certain of how many bytes it will receive. if (_responseBytesWritten > 0) { _keepAlive = false; } ReportApplicationError(new InvalidOperationException( CoreStrings.FormatTooFewBytesWritten(_responseBytesWritten, responseHeaders.ContentLength.Value))); } } private Task WriteChunkedAsync(ReadOnlyMemory<byte> data, CancellationToken cancellationToken) { return Output.WriteAsync(_writeChunk, data); } private static void WriteChunk(PipeWriter writableBuffer, ReadOnlyMemory<byte> buffer) { var writer = OutputWriter.Create(writableBuffer); if (buffer.Length > 0) { ChunkWriter.WriteBeginChunkBytes(ref writer, buffer.Length); writer.Write(buffer.Span); ChunkWriter.WriteEndChunkBytes(ref writer); } } private static ArraySegment<byte> CreateAsciiByteArraySegment(string text) { var bytes = Encoding.ASCII.GetBytes(text); return new ArraySegment<byte>(bytes); } public void ProduceContinue() { if (HasResponseStarted) { return; } if (_httpVersion != Http.HttpVersion.Http10 && RequestHeaders.TryGetValue("Expect", out var expect) && (expect.FirstOrDefault() ?? "").Equals("100-continue", StringComparison.OrdinalIgnoreCase)) { Output.Write100ContinueAsync(default(CancellationToken)).GetAwaiter().GetResult(); } } public Task InitializeResponseAsync(int firstWriteByteCount) { var startingTask = FireOnStarting(); // If return is Task.CompletedTask no awaiting is required if (!ReferenceEquals(startingTask, Task.CompletedTask)) { return InitializeResponseAwaited(startingTask, firstWriteByteCount); } if (_applicationException != null) { ThrowResponseAbortedException(); } VerifyAndUpdateWrite(firstWriteByteCount); ProduceStart(appCompleted: false); return Task.CompletedTask; } [MethodImpl(MethodImplOptions.NoInlining)] public async Task InitializeResponseAwaited(Task startingTask, int firstWriteByteCount) { await startingTask; if (_applicationException != null) { ThrowResponseAbortedException(); } VerifyAndUpdateWrite(firstWriteByteCount); ProduceStart(appCompleted: false); } private void ProduceStart(bool appCompleted) { if (HasResponseStarted) { return; } _requestProcessingStatus = RequestProcessingStatus.ResponseStarted; CreateResponseHeader(appCompleted); } protected Task TryProduceInvalidRequestResponse() { // If _requestAborted is set, the connection has already been closed. if (_requestRejectedException != null && _requestAborted == 0) { return ProduceEnd(); } return Task.CompletedTask; } protected Task ProduceEnd() { if (_requestRejectedException != null || _applicationException != null) { if (HasResponseStarted) { // We can no longer change the response, so we simply close the connection. _keepAlive = false; return Task.CompletedTask; } // If the request was rejected, the error state has already been set by SetBadRequestState and // that should take precedence. if (_requestRejectedException != null) { SetErrorResponseException(_requestRejectedException); } else { // 500 Internal Server Error SetErrorResponseHeaders(statusCode: StatusCodes.Status500InternalServerError); } } if (!HasResponseStarted) { return ProduceEndAwaited(); } return WriteSuffix(); } [MethodImpl(MethodImplOptions.NoInlining)] private async Task ProduceEndAwaited() { ProduceStart(appCompleted: true); // Force flush await Output.FlushAsync(default(CancellationToken)); await WriteSuffix(); } private Task WriteSuffix() { // _autoChunk should be checked after we are sure ProduceStart() has been called // since ProduceStart() may set _autoChunk to true. if (_autoChunk || _httpVersion == Http.HttpVersion.Http2) { return WriteSuffixAwaited(); } if (_keepAlive) { Log.ConnectionKeepAlive(ConnectionId); } if (HttpMethods.IsHead(Method) && _responseBytesWritten > 0) { Log.ConnectionHeadResponseBodyWrite(ConnectionId, _responseBytesWritten); } return Task.CompletedTask; } private async Task WriteSuffixAwaited() { // For the same reason we call CheckLastWrite() in Content-Length responses. _abortedCts = null; await Output.WriteStreamSuffixAsync(default(CancellationToken)); if (_keepAlive) { Log.ConnectionKeepAlive(ConnectionId); } if (HttpMethods.IsHead(Method) && _responseBytesWritten > 0) { Log.ConnectionHeadResponseBodyWrite(ConnectionId, _responseBytesWritten); } } private void CreateResponseHeader(bool appCompleted) { var responseHeaders = HttpResponseHeaders; var hasConnection = responseHeaders.HasConnection; var connectionOptions = HttpHeaders.ParseConnection(responseHeaders.HeaderConnection); var hasTransferEncoding = responseHeaders.HasTransferEncoding; var transferCoding = HttpHeaders.GetFinalTransferCoding(responseHeaders.HeaderTransferEncoding); if (_keepAlive && hasConnection && (connectionOptions & ConnectionOptions.KeepAlive) != ConnectionOptions.KeepAlive) { _keepAlive = false; } // https://tools.ietf.org/html/rfc7230#section-3.3.1 // If any transfer coding other than // chunked is applied to a response payload body, the sender MUST either // apply chunked as the final transfer coding or terminate the message // by closing the connection. if (hasTransferEncoding && transferCoding != TransferCoding.Chunked) { _keepAlive = false; } // Set whether response can have body _canHaveBody = StatusCanHaveBody(StatusCode) && Method != "HEAD"; // Don't set the Content-Length or Transfer-Encoding headers // automatically for HEAD requests or 204, 205, 304 responses. if (_canHaveBody) { if (!hasTransferEncoding && !responseHeaders.ContentLength.HasValue) { if (appCompleted && StatusCode != StatusCodes.Status101SwitchingProtocols) { // Since the app has completed and we are only now generating // the headers we can safely set the Content-Length to 0. responseHeaders.ContentLength = 0; } else { // Note for future reference: never change this to set _autoChunk to true on HTTP/1.0 // connections, even if we were to infer the client supports it because an HTTP/1.0 request // was received that used chunked encoding. Sending a chunked response to an HTTP/1.0 // client would break compliance with RFC 7230 (section 3.3.1): // // A server MUST NOT send a response containing Transfer-Encoding unless the corresponding // request indicates HTTP/1.1 (or later). // // This also covers HTTP/2, which forbids chunked encoding in RFC 7540 (section 8.1: // // The chunked transfer encoding defined in Section 4.1 of [RFC7230] MUST NOT be used in HTTP/2. if (_httpVersion == Http.HttpVersion.Http11 && StatusCode != StatusCodes.Status101SwitchingProtocols) { _autoChunk = true; responseHeaders.SetRawTransferEncoding("chunked", _bytesTransferEncodingChunked); } else { _keepAlive = false; } } } } else if (hasTransferEncoding) { RejectNonBodyTransferEncodingResponse(appCompleted); } responseHeaders.SetReadOnly(); if (!hasConnection && _httpVersion != Http.HttpVersion.Http2) { if (!_keepAlive) { responseHeaders.SetRawConnection("close", _bytesConnectionClose); } else if (_httpVersion == Http.HttpVersion.Http10) { responseHeaders.SetRawConnection("keep-alive", _bytesConnectionKeepAlive); } } if (ServerOptions.AddServerHeader && !responseHeaders.HasServer) { responseHeaders.SetRawServer(Constants.ServerName, _bytesServer); } if (!responseHeaders.HasDate) { var dateHeaderValues = DateHeaderValueManager.GetDateHeaderValues(); responseHeaders.SetRawDate(dateHeaderValues.String, dateHeaderValues.Bytes); } Output.WriteResponseHeaders(StatusCode, ReasonPhrase, responseHeaders); } public bool StatusCanHaveBody(int statusCode) { // List of status codes taken from Microsoft.Net.Http.Server.Response return statusCode != StatusCodes.Status204NoContent && statusCode != StatusCodes.Status205ResetContent && statusCode != StatusCodes.Status304NotModified; } private void ThrowResponseAlreadyStartedException(string value) { throw new InvalidOperationException(CoreStrings.FormatParameterReadOnlyAfterResponseStarted(value)); } private void RejectNonBodyTransferEncodingResponse(bool appCompleted) { var ex = new InvalidOperationException(CoreStrings.FormatHeaderNotAllowedOnResponse("Transfer-Encoding", StatusCode)); if (!appCompleted) { // Back out of header creation surface exeception in user code _requestProcessingStatus = RequestProcessingStatus.AppStarted; throw ex; } else { ReportApplicationError(ex); // 500 Internal Server Error SetErrorResponseHeaders(statusCode: StatusCodes.Status500InternalServerError); } } private void SetErrorResponseException(BadHttpRequestException ex) { SetErrorResponseHeaders(ex.StatusCode); if (!StringValues.IsNullOrEmpty(ex.AllowedHeader)) { HttpResponseHeaders.HeaderAllow = ex.AllowedHeader; } } private void SetErrorResponseHeaders(int statusCode) { Debug.Assert(!HasResponseStarted, $"{nameof(SetErrorResponseHeaders)} called after response had already started."); StatusCode = statusCode; ReasonPhrase = null; var responseHeaders = HttpResponseHeaders; responseHeaders.Reset(); var dateHeaderValues = DateHeaderValueManager.GetDateHeaderValues(); responseHeaders.SetRawDate(dateHeaderValues.String, dateHeaderValues.Bytes); responseHeaders.ContentLength = 0; if (ServerOptions.AddServerHeader) { responseHeaders.SetRawServer(Constants.ServerName, _bytesServer); } } public void HandleNonBodyResponseWrite() { // Writes to HEAD response are ignored and logged at the end of the request if (Method != "HEAD") { // Throw Exception for 204, 205, 304 responses. throw new InvalidOperationException(CoreStrings.FormatWritingToResponseBodyNotSupported(StatusCode)); } } private void ThrowResponseAbortedException() { throw new ObjectDisposedException(CoreStrings.UnhandledApplicationException, _applicationException); } public void ThrowRequestRejected(RequestRejectionReason reason) => throw BadHttpRequestException.GetException(reason); public void ThrowRequestRejected(RequestRejectionReason reason, string detail) => throw BadHttpRequestException.GetException(reason, detail); public void ThrowRequestTargetRejected(Span<byte> target) => throw GetInvalidRequestTargetException(target); private BadHttpRequestException GetInvalidRequestTargetException(Span<byte> target) => BadHttpRequestException.GetException( RequestRejectionReason.InvalidRequestTarget, Log.IsEnabled(LogLevel.Information) ? target.GetAsciiStringEscaped(Constants.MaxExceptionDetailSize) : string.Empty); public void SetBadRequestState(RequestRejectionReason reason) { SetBadRequestState(BadHttpRequestException.GetException(reason)); } public void SetBadRequestState(BadHttpRequestException ex) { Log.ConnectionBadRequest(ConnectionId, ex); if (!HasResponseStarted) { SetErrorResponseException(ex); } _keepAlive = false; _requestRejectedException = ex; } protected void ReportApplicationError(Exception ex) { if (_applicationException == null) { _applicationException = ex; } else if (_applicationException is AggregateException) { _applicationException = new AggregateException(_applicationException, ex).Flatten(); } else { _applicationException = new AggregateException(_applicationException, ex); } Log.ApplicationError(ConnectionId, TraceIdentifier, ex); } private Pipe CreateRequestBodyPipe() => new Pipe(new PipeOptions ( pool: _context.MemoryPool, readerScheduler: ServiceContext.ThreadPool, writerScheduler: PipeScheduler.Inline, pauseWriterThreshold: 1, resumeWriterThreshold: 1 )); } }
1
14,823
Nit: missing newline above.
aspnet-KestrelHttpServer
.cs
@@ -76,6 +76,13 @@ func StartKubeProxy(k8s kubernetes.Interface, hostname string, } go func() { + // Before we start, scan for all finished / timed out connections to + // free up the conntrack table asap as it may take time to sync up the + // proxy and kick off the first full cleaner scan. + lc := conntrack.NewLivenessScanner(kp.conntrackTimeouts, kp.dsrEnabled) + connScan := conntrack.NewScanner(kp.ctMap, lc.ScanEntry) + connScan.Scan() + err := kp.start() if err != nil { log.WithError(err).Panic("kube-proxy failed to start")
1
// Copyright (c) 2020 Tigera, Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package proxy import ( "net" "sync" "github.com/pkg/errors" log "github.com/sirupsen/logrus" "k8s.io/client-go/kubernetes" "github.com/projectcalico/felix/bpf" "github.com/projectcalico/felix/bpf/conntrack" "github.com/projectcalico/felix/bpf/routes" ) // KubeProxy is a wrapper of Proxy that deals with higher level issue like // configuration, restarting etc. type KubeProxy struct { proxy Proxy hostIPUpdates chan []net.IP stopOnce sync.Once lock sync.Mutex exiting chan struct{} wg sync.WaitGroup k8s kubernetes.Interface hostname string frontendMap bpf.Map backendMap bpf.Map affinityMap bpf.Map ctMap bpf.Map rt *RTCache opts []Option conntrackTimeouts conntrack.Timeouts dsrEnabled bool } // StartKubeProxy start a new kube-proxy if there was no error func StartKubeProxy(k8s kubernetes.Interface, hostname string, frontendMap, backendMap, affinityMap, ctMap bpf.Map, opts ...Option) (*KubeProxy, error) { kp := &KubeProxy{ k8s: k8s, hostname: hostname, frontendMap: frontendMap, backendMap: backendMap, affinityMap: affinityMap, ctMap: ctMap, opts: opts, rt: NewRTCache(), hostIPUpdates: make(chan []net.IP, 1), exiting: make(chan struct{}), } for _, o := range opts { if err := o(kp); err != nil { return nil, errors.WithMessage(err, "applying option to kube-proxy") } } go func() { err := kp.start() if err != nil { log.WithError(err).Panic("kube-proxy failed to start") } }() return kp, nil } // Stop stops KubeProxy and waits for it to exit func (kp *KubeProxy) Stop() { kp.stopOnce.Do(func() { kp.lock.Lock() defer kp.lock.Unlock() close(kp.exiting) close(kp.hostIPUpdates) kp.proxy.Stop() kp.wg.Wait() }) } func (kp *KubeProxy) run(hostIPs []net.IP) error { kp.lock.Lock() defer kp.lock.Unlock() withLocalNP := make([]net.IP, len(hostIPs), len(hostIPs)+1) copy(withLocalNP, hostIPs) withLocalNP = append(withLocalNP, podNPIP) syncer, err := NewSyncer(withLocalNP, kp.frontendMap, kp.backendMap, kp.affinityMap, kp.rt) if err != nil { return errors.WithMessage(err, "new bpf syncer") } lc := conntrack.NewLivenessScanner(kp.conntrackTimeouts, kp.dsrEnabled) connScan := conntrack.NewScanner(kp.ctMap, lc.ScanEntry, conntrack.NewStaleNATScanner(syncer.ConntrackFrontendHasBackend), ) proxy, err := New(kp.k8s, syncer, connScan, kp.hostname, kp.opts...) if err != nil { return errors.WithMessage(err, "new proxy") } log.Infof("kube-proxy started, hostname=%q hostIPs=%+v", kp.hostname, hostIPs) kp.proxy = proxy return nil } func (kp *KubeProxy) start() error { // wait for the initial update hostIPs := <-kp.hostIPUpdates err := kp.run(hostIPs) if err != nil { return err } kp.wg.Add(1) go func() { defer kp.wg.Done() for { hostIPs, ok := <-kp.hostIPUpdates if !ok { defer log.Error("kube-proxy stopped since hostIPUpdates closed") kp.proxy.Stop() return } stopped := make(chan struct{}) go func() { defer close(stopped) defer log.Info("kube-proxy stopped to restart with updated host IPs") kp.proxy.Stop() }() waitforstop: for { select { case hostIPs, ok = <-kp.hostIPUpdates: if !ok { log.Error("kube-proxy: hostIPUpdates closed") return } case <-kp.exiting: log.Info("kube-proxy: exiting") return case <-stopped: err = kp.run(hostIPs) if err != nil { log.Panic("kube-proxy failed to start after host IPs update") } break waitforstop } } } }() return nil } // OnHostIPsUpdate should be used by an external user to update the proxy's list // of host IPs func (kp *KubeProxy) OnHostIPsUpdate(IPs []net.IP) { select { case kp.hostIPUpdates <- IPs: // nothing default: // in case we would block, drop the now stale update and replace it // with a new one. Do it non-blocking way in case it was just consumed. select { case <-kp.hostIPUpdates: default: } kp.hostIPUpdates <- IPs } log.Debugf("kube-proxy OnHostIPsUpdate: %+v", IPs) } // OnRouteUpdate should be used to update the internal state of routing tables func (kp *KubeProxy) OnRouteUpdate(k routes.Key, v routes.Value) { if err := kp.rt.Update(k, v); err != nil { log.WithField("error", err).Error("kube-proxy: OnRouteUpdate") } else { log.WithFields(log.Fields{"key": k, "value": v}).Debug("kube-proxy: OnRouteUpdate") } } // OnRouteDelete should be used to update the internal state of routing tables func (kp *KubeProxy) OnRouteDelete(k routes.Key) { _ = kp.rt.Delete(k) log.WithField("key", k).Debug("kube-proxy: OnRouteDelete") }
1
18,281
Since it's a one-off, worth putting an info log before and after.
projectcalico-felix
go
@@ -140,8 +140,10 @@ app.controller('CalController', ['$scope', 'Calendar', 'CalendarService', 'VEven $scope.$apply(); }); } else { - $scope.calendarsPromise = CalendarService.getPublicCalendar(constants.publicSharingToken).then(function(calendar) { - $scope.calendars = [calendar]; + constants.publicSharingToken.split(".").forEach( (token) => { + $scope.calendarsPromise = CalendarService.getPublicCalendar(token).then(function(calendar) { + $scope.calendars.push(calendar); + }); is.loading = false; // TODO - scope.apply should not be necessary here $scope.$apply();
1
/** * Calendar App * * @author Raghu Nayyar * @author Georg Ehrke * @copyright 2016 Raghu Nayyar <[email protected]> * @copyright 2016 Georg Ehrke <[email protected]> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE * License as published by the Free Software Foundation; either * version 3 of the License, or any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU AFFERO GENERAL PUBLIC LICENSE for more details. * * You should have received a copy of the GNU Affero General Public * License along with this library. If not, see <http://www.gnu.org/licenses/>. * */ /** * Controller: CalController * Description: The fullcalendar controller. */ app.controller('CalController', ['$scope', 'Calendar', 'CalendarService', 'VEventService', 'SettingsService', 'TimezoneService', 'VEvent', 'is', 'fc', 'EventsEditorDialogService', 'PopoverPositioningUtility', '$window', 'isPublic', 'constants', 'settings', function ($scope, Calendar, CalendarService, VEventService, SettingsService, TimezoneService, VEvent, is, fc, EventsEditorDialogService, PopoverPositioningUtility, $window, isPublic, constants, settings) { 'use strict'; is.loading = true; $scope.calendars = []; $scope.eventSource = {}; $scope.eventModal = null; var switcher = []; if (settings.timezone === 'automatic') { $scope.defaulttimezone = TimezoneService.getDetected(); } else { $scope.defaulttimezone = settings.timezone; } function showCalendar(url) { if (switcher.indexOf(url) === -1 && $scope.eventSource[url].isRendering === false) { switcher.push(url); fc.elm.fullCalendar( 'removeEventSource', $scope.eventSource[url]); fc.elm.fullCalendar( 'addEventSource', $scope.eventSource[url]); } } function hideCalendar(url) { fc.elm.fullCalendar( 'removeEventSource', $scope.eventSource[url]); if (switcher.indexOf(url) !== -1) { switcher.splice(switcher.indexOf(url), 1); } } function createAndRenderEvent(calendar, data, start, end, tz) { VEventService.create(calendar, data).then(function(vevent) { if (calendar.enabled) { fc.elm.fullCalendar('refetchEventSources', calendar.fcEventSource); } }); } function deleteAndRemoveEvent(vevent, fcEvent) { VEventService.delete(vevent).then(function() { fc.elm.fullCalendar('removeEvents', fcEvent.id); }); } $scope.$watchCollection('calendars', function(newCalendars, oldCalendars) { newCalendars.filter(function(calendar) { return oldCalendars.indexOf(calendar) === -1; }).forEach(function(calendar) { $scope.eventSource[calendar.url] = calendar.fcEventSource; if (calendar.enabled) { showCalendar(calendar.url); } calendar.register(Calendar.hookEnabledChanged, function(enabled) { if (enabled) { showCalendar(calendar.url); } else { hideCalendar(calendar.url); //calendar.list.loading = false; } }); calendar.register(Calendar.hookColorChanged, function() { if (calendar.enabled) { hideCalendar(calendar.url); showCalendar(calendar.url); } }); }); oldCalendars.filter(function(calendar) { return newCalendars.indexOf(calendar) === -1; }).forEach(function(calendar) { var url = calendar.url; hideCalendar(calendar.url); delete $scope.eventSource[url]; }); }); TimezoneService.get($scope.defaulttimezone).then(function(timezone) { if (timezone) { ICAL.TimezoneService.register($scope.defaulttimezone, timezone.jCal); } }).catch(function() { OC.Notification.showTemporary( t('calendar', 'You are in an unknown timezone ({tz}), falling back to UTC', { tz: $scope.defaulttimezone }) ); $scope.defaulttimezone = 'UTC'; $scope.fcConfig.timezone = 'UTC'; fc.elm.fullCalendar('option', 'timezone', 'UTC'); }); if (!isPublic) { $scope.calendarsPromise = CalendarService.getAll().then(function (calendars) { $scope.calendars = calendars; is.loading = false; // TODO - scope.apply should not be necessary here $scope.$apply(); }); } else { $scope.calendarsPromise = CalendarService.getPublicCalendar(constants.publicSharingToken).then(function(calendar) { $scope.calendars = [calendar]; is.loading = false; // TODO - scope.apply should not be necessary here $scope.$apply(); }).catch((reason) => { angular.element('#header-right').css('display', 'none'); angular.element('#emptycontent-container').css('display', 'block'); }); } /** * Calendar UI Configuration. */ $scope.fcConfig = { timezone: $scope.defaulttimezone, select: function (start, end, jsEvent, view) { var writableCalendars = $scope.calendars.filter(function(elem) { return elem.isWritable(); }); if (writableCalendars.length === 0) { if (!isPublic) { OC.Notification.showTemporary(t('calendar', 'Please create a calendar first.')); } return; } start.add(start.toDate().getTimezoneOffset(), 'minutes'); end.add(end.toDate().getTimezoneOffset(), 'minutes'); var vevent = VEvent.fromStartEnd(start, end, $scope.defaulttimezone); vevent.calendar = writableCalendars[0]; var timestamp = Date.now(); var fcEventClass = 'new-event-dummy-' + timestamp; vevent.getFcEvent(view.start, view.end, $scope.defaulttimezone).then((fcEvents) => { const fcEvent = fcEvents[0]; fcEvent.title = t('calendar', 'New event'); fcEvent.className.push(fcEventClass); fcEvent.editable = false; fc.elm.fullCalendar('renderEvent', fcEvent); EventsEditorDialogService.open($scope, fcEvent, function() { const elements = angular.element('.' + fcEventClass); const isHidden = angular.element(elements[0]).parents('.fc-limited').length !== 0; if (isHidden) { return PopoverPositioningUtility.calculate(jsEvent.clientX, jsEvent.clientY, jsEvent.clientX, jsEvent.clientY, view); } else { return PopoverPositioningUtility.calculateByTarget(elements[0], view); } }, function() { return null; }, function() { fc.elm.fullCalendar('removeEvents', function(fcEventToCheck) { if (Array.isArray(fcEventToCheck.className)) { return (fcEventToCheck.className.indexOf(fcEventClass) !== -1); } else { return false; } }); }).then(function(result) { createAndRenderEvent(result.calendar, result.vevent.data, view.start, view.end, $scope.defaulttimezone); }).catch(function(reason) { //fcEvent is removed by unlock callback //no need to anything return null; }); }); }, eventClick: function(fcEvent, jsEvent, view) { var vevent = fcEvent.vevent; var oldCalendar = vevent.calendar; var fcEvt = fcEvent; EventsEditorDialogService.open($scope, fcEvent, function() { return PopoverPositioningUtility.calculateByTarget(jsEvent.currentTarget, view); }, function() { fcEvt.editable = false; fc.elm.fullCalendar('updateEvent', fcEvt); }, function() { fcEvt.editable = fcEvent.calendar.writable; fc.elm.fullCalendar('updateEvent', fcEvt); }).then(function(result) { // was the event moved to another calendar? if (result.calendar === oldCalendar) { VEventService.update(vevent).then(function() { fc.elm.fullCalendar('removeEvents', fcEvent.id); if (result.calendar.enabled) { fc.elm.fullCalendar('refetchEventSources', result.calendar.fcEventSource); } }); } else { deleteAndRemoveEvent(vevent, fcEvent); createAndRenderEvent(result.calendar, result.vevent.data, view.start, view.end, $scope.defaulttimezone); } }).catch(function(reason) { if (reason === 'delete') { deleteAndRemoveEvent(vevent, fcEvent); } }); }, eventResize: function (fcEvent, delta, revertFunc) { fcEvent.resize(delta); VEventService.update(fcEvent.vevent).catch(function() { revertFunc(); }); }, eventDrop: function (fcEvent, delta, revertFunc) { const isAllDay = !fcEvent.start.hasTime(); const defaultAllDayEventDuration = fc.elm.fullCalendar('option', 'defaultAllDayEventDuration'); const defaultAllDayEventMomentDuration = moment.duration(defaultAllDayEventDuration); const defaultTimedEventDuration = fc.elm.fullCalendar('option', 'defaultTimedEventDuration'); const defaultTimedEventMomentDuration = moment.duration(defaultTimedEventDuration); const timezone = $scope.defaulttimezone; fcEvent.drop(delta, isAllDay, timezone, defaultTimedEventMomentDuration, defaultAllDayEventMomentDuration); VEventService.update(fcEvent.vevent).catch(function() { revertFunc(); }); }, viewRender: function (view, element) { angular.element('#firstrow').find('.datepicker_current').html(view.title).text(); angular.element('#datecontrol_date').datepicker('setDate', element.fullCalendar('getDate')); var newView = view.name; if (newView !== $scope.defaultView && !isPublic) { SettingsService.setView(newView); $scope.defaultView = newView; } if (newView === 'agendaDay') { angular.element('td.fc-state-highlight').css('background-color', '#ffffff'); } else { angular.element('.fc-bg td.fc-state-highlight').css('background-color', '#ffa'); } if (newView ==='agendaWeek') { element.fullCalendar('option', 'aspectRatio', 0.1); } else { element.fullCalendar('option', 'aspectRatio', 1.35); } }, eventRender: function(event, element) { var status = event.getSimpleEvent().status; if (status !== null) { if (status.value === 'TENTATIVE') { element.css({'opacity': 0.5}); } else if (status.value === 'CANCELLED') { element.css({ 'text-decoration': 'line-through', 'opacity': 0.5 }); } } } }; } ]);
1
6,324
I'm sorry, but the indentation is still wrong. This should be indented by one tab.
nextcloud-calendar
js
@@ -360,6 +360,18 @@ class DataFrame(_Frame): self._sdf = sdf self._metadata = self._metadata.copy(column_fields=names) + @property + def dtypes(self): + """Return the dtypes in the DataFrame. + + This returns a Series with the data type of each column. The result's index is the original + DataFrame's columns. Columns with mixed types are stored with the object dtype. + + :return: :class:`pd.Series` The data type of each column. + """ + return pd.Series([self[col].dtype for col in self._metadata.column_fields], + index=self._metadata.column_fields) + @derived_from(pd.DataFrame, ua_args=['axis', 'level', 'numeric_only']) def count(self): return self._sdf.count()
1
# # Copyright (C) 2019 Databricks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ A wrapper class for Spark DataFrame to behave similar to pandas DataFrame. """ from decorator import dispatch_on from functools import partial, reduce import numpy as np import pandas as pd from pyspark import sql as spark from pyspark.sql import functions as F from pyspark.sql.types import StructType, to_arrow_type from pyspark.sql.utils import AnalysisException from databricks.koalas.utils import default_session from databricks.koalas.dask.compatibility import string_types from databricks.koalas.dask.utils import derived_from from databricks.koalas.generic import _Frame, max_display_count from databricks.koalas.metadata import Metadata from databricks.koalas.missing.frame import _MissingPandasLikeDataFrame from databricks.koalas.selection import SparkDataFrameLocator class DataFrame(_Frame): """ Koala DataFrame that corresponds to Pandas DataFrame logically. This holds Spark DataFrame internally. :ivar _sdf: Spark Column instance :ivar _metadata: Metadata related to column names and index information. """ @derived_from(pd.DataFrame) @dispatch_on('data') def __init__(self, data=None, index=None, columns=None, dtype=None, copy=False): pdf = pd.DataFrame(data=data, index=index, columns=columns, dtype=dtype, copy=copy) self._init_from_pandas(pdf) @__init__.register(pd.DataFrame) def _init_from_pandas(self, pdf, *args): metadata = Metadata.from_pandas(pdf) reset_index = pdf.reset_index() reset_index.columns = metadata.all_fields self._init_from_spark(default_session().createDataFrame(reset_index), metadata) @__init__.register(spark.DataFrame) def _init_from_spark(self, sdf, metadata=None, *args): self._sdf = sdf if metadata is None: self._metadata = Metadata(column_fields=self._sdf.schema.fieldNames()) else: self._metadata = metadata @property def _index_columns(self): return [self._sdf.__getitem__(field) for field in self._metadata.index_fields] def _reduce_for_stat_function(self, sfun): sdf = self._sdf.select([sfun(self._sdf[col]).alias(col) for col in self.columns]) pdf = sdf.toPandas() assert len(pdf) == 1, (sdf, pdf) row = pdf.iloc[0] row.name = None return row # Return first row as a Series @derived_from(pd.DataFrame) def iteritems(self): cols = list(self.columns) return list((col_name, self[col_name]) for col_name in cols) @derived_from(pd.DataFrame) def to_html(self, buf=None, columns=None, col_space=None, header=True, index=True, na_rep='NaN', formatters=None, float_format=None, sparsify=None, index_names=True, justify=None, max_rows=None, max_cols=None, show_dimensions=False, decimal='.', bold_rows=True, classes=None, escape=True, notebook=False, border=None, table_id=None, render_links=False): return self.toPandas().to_html( buf=buf, columns=columns, col_space=col_space, header=header, index=index, na_rep=na_rep, formatters=formatters, float_format=float_format, sparsify=sparsify, index_names=index_names, justify=justify, max_rows=max_rows, max_cols=max_cols, show_dimensions=show_dimensions, decimal=decimal, bold_rows=bold_rows, classes=classes, escape=escape, notebook=notebook, border=border, table_id=table_id, render_links=render_links) @property def index(self): """The index (row labels) Column of the DataFrame. Currently supported only when the DataFrame has a single index. """ from databricks.koalas.series import Series if len(self._metadata.index_info) != 1: raise KeyError('Currently supported only when the DataFrame has a single index.') return Series(self._index_columns[0], self, []) def set_index(self, keys, drop=True, append=False, inplace=False): """Set the DataFrame index (row labels) using one or more existing columns. By default yields a new object. :param keys: column label or list of column labels / arrays :param drop: boolean, default True Delete columns to be used as the new index :param append: boolean, default False Whether to append columns to existing index :param inplace: boolean, default False Modify the DataFrame in place (do not create a new object) :return: :class:`DataFrame` """ if isinstance(keys, string_types): keys = [keys] else: keys = list(keys) for key in keys: if key not in self.columns: raise KeyError(key) if drop: columns = [column for column in self._metadata.column_fields if column not in keys] else: columns = self._metadata.column_fields if append: index_info = self._metadata.index_info + [(column, column) for column in keys] else: index_info = [(column, column) for column in keys] metadata = self._metadata.copy(column_fields=columns, index_info=index_info) if inplace: self._metadata = metadata else: kdf = self.copy() kdf._metadata = metadata return kdf def reset_index(self, level=None, drop=False, inplace=False): """For DataFrame with multi-level index, return new DataFrame with labeling information in the columns under the index names, defaulting to 'level_0', 'level_1', etc. if any are None. For a standard index, the index name will be used (if set), otherwise a default 'index' or 'level_0' (if 'index' is already taken) will be used. :param level: int, str, tuple, or list, default None Only remove the given levels from the index. Removes all levels by default :param drop: boolean, default False Do not try to insert index into dataframe columns. This resets the index to the default integer index. :param inplace: boolean, default False Modify the DataFrame in place (do not create a new object) :return: :class:`DataFrame` """ if len(self._metadata.index_info) == 0: raise NotImplementedError('Can\'t reset index because there is no index.') multi_index = len(self._metadata.index_info) > 1 if multi_index: rename = lambda i: 'level_{}'.format(i) else: rename = lambda i: \ 'index' if 'index' not in self._metadata.column_fields else 'level_{}'.fomat(i) if level is None: index_columns = [(column, name if name is not None else rename(i)) for i, (column, name) in enumerate(self._metadata.index_info)] index_info = [] else: if isinstance(level, (int, string_types)): level = [level] level = list(level) if all(isinstance(l, int) for l in level): for l in level: if l >= len(self._metadata.index_info): raise IndexError('Too many levels: Index has only {} level, not {}' .format(len(self._metadata.index_info), l + 1)) idx = level elif all(isinstance(l, string_types) for l in level): idx = [] for l in level: try: i = self._metadata.index_fields.index(l) idx.append(i) except ValueError: if multi_index: raise KeyError('Level unknown not found') else: raise KeyError('Level unknown must be same as name ({})' .format(self._metadata.index_fields[0])) else: raise ValueError('Level should be all int or all string.') idx.sort() index_columns = [] index_info = self._metadata.index_info.copy() for i in idx: info = self._metadata.index_info[i] column_field, index_name = info index_columns.append((column_field, index_name if index_name is not None else rename(index_name))) index_info.remove(info) if drop: index_columns = [] metadata = self._metadata.copy( column_fields=[column for column, _ in index_columns] + self._metadata.column_fields, index_info=index_info) columns = [name for _, name in index_columns] + self._metadata.column_fields if inplace: self._metadata = metadata self.columns = columns else: kdf = self.copy() kdf._metadata = metadata kdf.columns = columns return kdf @derived_from(pd.DataFrame) def isnull(self): kdf = self.copy() for name, ks in kdf.iteritems(): kdf[name] = ks.isnull() return kdf isna = isnull @derived_from(pd.DataFrame) def notnull(self): kdf = self.copy() for name, ks in kdf.iteritems(): kdf[name] = ks.notnull() return kdf notna = notnull @derived_from(spark.DataFrame) def toPandas(self): sdf = self._sdf.select(['`{}`'.format(name) for name in self._metadata.all_fields]) pdf = sdf.toPandas() if len(pdf) == 0 and len(sdf.schema) > 0: # TODO: push to OSS pdf = pdf.astype({field.name: to_arrow_type(field.dataType).to_pandas_dtype() for field in sdf.schema}) if len(self._metadata.index_info) > 0: append = False for index_field in self._metadata.index_fields: drop = index_field not in self._metadata.column_fields pdf = pdf.set_index(index_field, drop=drop, append=append) append = True pdf = pdf[self._metadata.column_fields] index_names = self._metadata.index_names if len(index_names) > 0: if isinstance(pdf.index, pd.MultiIndex): pdf.index.names = index_names else: pdf.index.name = index_names[0] return pdf @derived_from(pd.DataFrame) def assign(self, **kwargs): from databricks.koalas.series import Series for k, v in kwargs.items(): if not (isinstance(v, (Series, spark.Column)) or callable(v) or pd.api.types.is_scalar(v)): raise TypeError("Column assignment doesn't support type " "{0}".format(type(v).__name__)) if callable(v): kwargs[k] = v(self) pairs = list(kwargs.items()) sdf = self._sdf for (name, c) in pairs: if isinstance(c, Series): sdf = sdf.withColumn(name, c._scol) else: sdf = sdf.withColumn(name, c) metadata = self._metadata.copy( column_fields=(self._metadata.column_fields + [name for name, _ in pairs if name not in self._metadata.column_fields])) return DataFrame(sdf, metadata) @property def loc(self): return SparkDataFrameLocator(self) def copy(self): return DataFrame(self._sdf, self._metadata.copy()) @derived_from(pd.DataFrame) def dropna(self, axis=0, how='any', thresh=None, subset=None, inplace=False): if axis == 0 or axis == 'index': if subset is not None: if isinstance(subset, string_types): columns = [subset] else: columns = list(subset) invalids = [column for column in columns if column not in self._metadata.column_fields] if len(invalids) > 0: raise KeyError(invalids) else: columns = list(self.columns) cnt = reduce(lambda x, y: x + y, [F.when(self[column].notna()._scol, 1).otherwise(0) for column in columns], F.lit(0)) if thresh is not None: pred = cnt >= F.lit(int(thresh)) elif how == 'any': pred = cnt == F.lit(len(columns)) elif how == 'all': pred = cnt > F.lit(0) else: if how is not None: raise ValueError('invalid how option: {h}'.format(h=how)) else: raise TypeError('must specify how or thresh') sdf = self._sdf.filter(pred) if inplace: self._sdf = sdf else: return DataFrame(sdf, self._metadata.copy()) else: raise NotImplementedError("dropna currently only works for axis=0 or axis='index'") def head(self, n=5): return DataFrame(self._sdf.limit(n), self._metadata.copy()) @property def columns(self): return pd.Index(self._metadata.column_fields) @columns.setter def columns(self, names): old_names = self._metadata.column_fields if len(old_names) != len(names): raise ValueError( "Length mismatch: Expected axis has %d elements, new values have %d elements" % (len(old_names), len(names))) sdf = self._sdf.select(self._metadata.index_fields + [self[old_name]._scol.alias(new_name) for (old_name, new_name) in zip(old_names, names)]) self._sdf = sdf self._metadata = self._metadata.copy(column_fields=names) @derived_from(pd.DataFrame, ua_args=['axis', 'level', 'numeric_only']) def count(self): return self._sdf.count() def unique(self): sdf = self._sdf return DataFrame(spark.DataFrame(sdf._jdf.distinct(), sdf.sql_ctx), self._metadata.copy()) @derived_from(pd.DataFrame) def drop(self, labels, axis=0, errors='raise'): axis = self._validate_axis(axis) if axis == 1: if isinstance(labels, list): sdf = self._sdf.drop(*labels) metadata = self._metadata.copy( column_fields=[column for column in self._metadata.column_fields if column not in labels]) else: sdf = self._sdf.drop(labels) metadata = self._metadata.copy( column_fields=[column for column in self._metadata.column_fields if column != labels]) return DataFrame(sdf, metadata) raise NotImplementedError("Drop currently only works for axis=1") @derived_from(pd.DataFrame) def get(self, key, default=None): try: return self._pd_getitem(key) except (KeyError, ValueError, IndexError): return default def sort_values(self, by): return DataFrame(self._sdf.sort(by), self._metadata.copy()) def groupby(self, by): from databricks.koalas.groups import PandasLikeGroupBy gp = self._sdf.groupby(by) return PandasLikeGroupBy(self, gp, None) @derived_from(pd.DataFrame) def pipe(self, func, *args, **kwargs): # Taken from pandas: # https://github.com/pydata/pandas/blob/master/pandas/core/generic.py#L2698-L2707 if isinstance(func, tuple): func, target = func if target in kwargs: raise ValueError('%s is both the pipe target and a keyword ' 'argument' % target) kwargs[target] = self return func(*args, **kwargs) else: return func(self, *args, **kwargs) @property def shape(self): return len(self), len(self.columns) def _pd_getitem(self, key): from databricks.koalas.series import Series if key is None: raise KeyError("none key") if isinstance(key, string_types): try: return Series(self._sdf.__getitem__(key), self, self._metadata.index_info) except AnalysisException: raise KeyError(key) if np.isscalar(key) or isinstance(key, (tuple, string_types)): raise NotImplementedError(key) elif isinstance(key, slice): return self.loc[key] if isinstance(key, (pd.Series, np.ndarray, pd.Index)): raise NotImplementedError(key) if isinstance(key, list): return self.loc[:, key] if isinstance(key, DataFrame): # TODO Should not implement alignment, too dangerous? return Series(self._sdf.__getitem__(key), self, self._metadata.index_info) if isinstance(key, Series): # TODO Should not implement alignment, too dangerous? # It is assumed to be only a filter, otherwise .loc should be used. bcol = key._scol.cast("boolean") return DataFrame(self._sdf.filter(bcol), self._metadata.copy()) raise NotImplementedError(key) def __getitem__(self, key): return self._pd_getitem(key) def __setitem__(self, key, value): from databricks.koalas.series import Series # For now, we don't support realignment against different dataframes. # This is too expensive in Spark. # Are we assigning against a column? if isinstance(value, Series): assert value._kdf is self, \ "Cannot combine column argument because it comes from a different dataframe" if isinstance(key, (tuple, list)): assert isinstance(value.schema, StructType) field_names = value.schema.fieldNames() kdf = self.assign(**{k: value[c] for k, c in zip(key, field_names)}) else: kdf = self.assign(**{key: value}) self._sdf = kdf._sdf self._metadata = kdf._metadata def __getattr__(self, key): from databricks.koalas.series import Series if key.startswith("__") or key.startswith("_pandas_") or key.startswith("_spark_"): raise AttributeError(key) if hasattr(_MissingPandasLikeDataFrame, key): return partial(getattr(_MissingPandasLikeDataFrame, key), self) return Series(self._sdf.__getattr__(key), self, self._metadata.index_info) def __iter__(self): return self.toPandas().__iter__() def __len__(self): return self._sdf.count() def __dir__(self): fields = [f for f in self._sdf.schema.fieldNames() if ' ' not in f] return super(DataFrame, self).__dir__() + fields def _repr_html_(self): return self.head(max_display_count).toPandas()._repr_html_() @classmethod def _validate_axis(cls, axis=0): if axis not in (0, 1, 'index', 'columns', None): raise ValueError('No axis named {0}'.format(axis)) # convert to numeric axis return {None: 0, 'index': 0, 'columns': 1}.get(axis, axis) def _reduce_spark_multi(sdf, aggs): """ Performs a reduction on a dataframe, the functions being known sql aggregate functions. """ assert isinstance(sdf, spark.DataFrame) sdf0 = sdf.agg(*aggs) l = sdf0.head(2) assert len(l) == 1, (sdf, l) row = l[0] l2 = list(row) assert len(l2) == len(aggs), (row, l2) return l2
1
8,492
can we add an example?
databricks-koalas
py
@@ -156,6 +156,13 @@ func identifyUID(ctx context.Context, nug normalizedUsernameGetter, func identifyUser(ctx context.Context, nug normalizedUsernameGetter, identifier identifier, username libkb.NormalizedUsername, uid keybase1.UID, isPublic bool) error { + + // check to see if identify should be skipped altogether + ei := getExtendedIdentify(ctx) + if ei.behavior == keybase1.TLFIdentifyBehavior_CHAT_SKIP { + return nil + } + var reason string if isPublic { reason = "You accessed a public folder."
1
// Copyright 2016 Keybase Inc. All rights reserved. // Use of this source code is governed by a BSD // license that can be found in the LICENSE file. package libkbfs import ( "errors" "fmt" "sync" "github.com/keybase/client/go/libkb" "github.com/keybase/client/go/protocol/keybase1" "golang.org/x/net/context" "golang.org/x/sync/errgroup" ) type extendedIdentify struct { behavior keybase1.TLFIdentifyBehavior // lock guards userBreaks and tlfBreaks lock sync.Mutex userBreaks chan keybase1.TLFIdentifyFailure tlfBreaks *keybase1.TLFBreak } func (ei *extendedIdentify) userBreak(username libkb.NormalizedUsername, uid keybase1.UID, breaks *keybase1.IdentifyTrackBreaks) { if ei.userBreaks == nil { return } ei.userBreaks <- keybase1.TLFIdentifyFailure{ Breaks: breaks, User: keybase1.User{ Uid: uid, Username: string(username), }, } } func (ei *extendedIdentify) makeTlfBreaksIfNeeded( ctx context.Context, numUserInTlf int) error { if ei.userBreaks == nil { return nil } ei.lock.Lock() defer ei.lock.Unlock() b := &keybase1.TLFBreak{} for i := 0; i < numUserInTlf; i++ { select { case ub, ok := <-ei.userBreaks: if !ok { return errors.New("makeTlfBreaksIfNeeded called on extendedIdentify" + " with closed userBreaks channel.") } if ub.Breaks != nil { b.Breaks = append(b.Breaks, ub) } case <-ctx.Done(): return ctx.Err() } } ei.tlfBreaks = b return nil } // getTlfBreakOrBust returns a keybase1.TLFBreak. This should only be called // for behavior.WarningInsteadOfErrorOnBrokenTracks() == true, and after // makeTlfBreaksIfNeeded is called, to make sure user proof breaks get // populated in GUI mode. // // If called otherwise, we don't panic here anymore, since we can't panic on // nil ei.tlfBreaks. The reason is if a previous successful identify has // already happened recently, it could cause this identify to be skipped, which // means ei.tlfBreaks is never populated. In this case, it's safe to return an // empty keybase1.TLFBreak. func (ei *extendedIdentify) getTlfBreakAndClose() keybase1.TLFBreak { ei.lock.Lock() defer ei.lock.Unlock() if ei.userBreaks != nil { close(ei.userBreaks) ei.userBreaks = nil } if ei.tlfBreaks != nil { return *ei.tlfBreaks } return keybase1.TLFBreak{} } // ctxExtendedIdentifyKeyType is a type for the context key for using // extendedIdentify type ctxExtendedIdentifyKeyType int const ( // ctxExtendedIdentifyKeyType is a context key for using extendedIdentify ctxExtendedIdentifyKey ctxExtendedIdentifyKeyType = iota ) // ExtendedIdentifyAlreadyExists is returned when makeExtendedIdentify is // called on a context already with extendedIdentify. type ExtendedIdentifyAlreadyExists struct{} func (e ExtendedIdentifyAlreadyExists) Error() string { return "extendedIdentify already exists" } func makeExtendedIdentify(ctx context.Context, behavior keybase1.TLFIdentifyBehavior) (context.Context, error) { if _, ok := ctx.Value(ctxExtendedIdentifyKey).(*extendedIdentify); ok { return nil, ExtendedIdentifyAlreadyExists{} } if !behavior.WarningInsteadOfErrorOnBrokenTracks() { return NewContextReplayable(ctx, func(ctx context.Context) context.Context { return context.WithValue(ctx, ctxExtendedIdentifyKey, &extendedIdentify{ behavior: behavior, }) }), nil } ch := make(chan keybase1.TLFIdentifyFailure) return NewContextReplayable(ctx, func(ctx context.Context) context.Context { return context.WithValue(ctx, ctxExtendedIdentifyKey, &extendedIdentify{ behavior: behavior, userBreaks: ch, }) }), nil } func getExtendedIdentify(ctx context.Context) (ei *extendedIdentify) { if ei, ok := ctx.Value(ctxExtendedIdentifyKey).(*extendedIdentify); ok { return ei } return &extendedIdentify{ behavior: keybase1.TLFIdentifyBehavior_DEFAULT_KBFS, } } // identifyUID performs identify based only on UID. It should be // used only if the username is not known - as e.g. when rekeying. func identifyUID(ctx context.Context, nug normalizedUsernameGetter, identifier identifier, uid keybase1.UID, isPublic bool) error { username, err := nug.GetNormalizedUsername(ctx, uid) if err != nil { return err } return identifyUser(ctx, nug, identifier, username, uid, isPublic) } // identifyUser is the preferred way to run identifies. func identifyUser(ctx context.Context, nug normalizedUsernameGetter, identifier identifier, username libkb.NormalizedUsername, uid keybase1.UID, isPublic bool) error { var reason string if isPublic { reason = "You accessed a public folder." } else { reason = fmt.Sprintf("You accessed a private folder with %s.", username.String()) } userInfo, err := identifier.Identify(ctx, username.String(), reason) if err != nil { // Convert libkb.NoSigChainError into one we can report. (See // KBFS-1252). if _, ok := err.(libkb.NoSigChainError); ok { return NoSigChainError{username} } return err } if userInfo.Name != username { return fmt.Errorf("Identify returned name=%s, expected %s", userInfo.Name, username) } if userInfo.UID != uid { return fmt.Errorf("Identify returned uid=%s, expected %s", userInfo.UID, uid) } return nil } // identifyUserToChan calls identifyUser and plugs the result into the error channnel. func identifyUserToChan(ctx context.Context, nug normalizedUsernameGetter, identifier identifier, name libkb.NormalizedUsername, uid keybase1.UID, isPublic bool, errChan chan error) { errChan <- identifyUser(ctx, nug, identifier, name, uid, isPublic) } // identifyUsers identifies the users in the given maps. func identifyUsers(ctx context.Context, nug normalizedUsernameGetter, identifier identifier, users map[keybase1.UID]libkb.NormalizedUsername, public bool) error { eg, ctx := errgroup.WithContext(ctx) // TODO: limit the number of concurrent identifies? // TODO: implement a version of errgroup with limited concurrency. for uid, name := range users { // Capture range variables. uid, name := uid, name eg.Go(func() error { return identifyUser(ctx, nug, identifier, name, uid, public) }) } return eg.Wait() } // identifyUserList identifies the users in the given list. // Only use this when the usernames are not known - like when rekeying. func identifyUserList(ctx context.Context, nug normalizedUsernameGetter, identifier identifier, uids []keybase1.UID, public bool) error { eg, ctx := errgroup.WithContext(ctx) // TODO: limit the number of concurrent identifies? // TODO: implement concurrency limited version of errgroup. for _, uid := range uids { // Capture range variable. uid := uid eg.Go(func() error { return identifyUID(ctx, nug, identifier, uid, public) }) } return eg.Wait() } // identifyUsersForTLF is a helper for identifyHandle for easier testing. func identifyUsersForTLF(ctx context.Context, nug normalizedUsernameGetter, identifier identifier, users map[keybase1.UID]libkb.NormalizedUsername, public bool) error { eg, ctx := errgroup.WithContext(ctx) eg.Go(func() error { ei := getExtendedIdentify(ctx) return ei.makeTlfBreaksIfNeeded(ctx, len(users)) }) eg.Go(func() error { return identifyUsers(ctx, nug, identifier, users, public) }) return eg.Wait() } // identifyHandle identifies the canonical names in the given handle. func identifyHandle(ctx context.Context, nug normalizedUsernameGetter, identifier identifier, h *TlfHandle) error { return identifyUsersForTLF(ctx, nug, identifier, h.ResolvedUsersMap(), h.IsPublic()) }
1
16,329
We usually avoid blank lines at the start of functions.
keybase-kbfs
go
@@ -0,0 +1,18 @@ +<?php + +declare(strict_types=1); + +/* + * This file is part of the Sonata Project package. + * + * (c) Thomas Rabaix <[email protected]> + * + * For the full copyright and license information, please view the LICENSE + * file that was distributed with this source code. + */ + +use Symfony\Component\DependencyInjection\Loader\Configurator\ContainerConfigurator; + +return static function (ContainerConfigurator $containerConfigurator): void { + $containerConfigurator->services(); +};
1
1
12,569
I dont think this line is needed
sonata-project-SonataMediaBundle
php
@@ -22,7 +22,12 @@ const ( // requests w.r.t a single Volume. func (s *HTTPServer) volumeSpecificRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) { // check the feature gate & switch if enabled - if util.CASTemplateFeatureGate() { + feature, err := util.CASTemplateFeatureGate() + if err != nil { + // exit if invalid value + glog.Fatalf("invalid feature gate value for %s only boolean values allowed", util.CASTemplateFeatureGateENVK) + } + if feature { return s.volumeV1alpha1SpecificRequest(resp, req) }
1
package server import ( "fmt" "net/http" "strings" "github.com/golang/glog" "github.com/openebs/maya/pkg/util" "github.com/openebs/maya/types/v1" policies_v1 "github.com/openebs/maya/volume/policies/v1" "github.com/openebs/maya/volume/provisioners" ) const ( // NamespaceKey is used in request headers to get the // namespace NamespaceKey string = "namespace" ) // VolumeSpecificRequest is a http handler implementation. It deals with HTTP // requests w.r.t a single Volume. func (s *HTTPServer) volumeSpecificRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) { // check the feature gate & switch if enabled if util.CASTemplateFeatureGate() { return s.volumeV1alpha1SpecificRequest(resp, req) } glog.Infof("received volume request: method '%s'", req.Method) switch req.Method { case "PUT", "POST": return s.volumeAdd(resp, req) case "GET": return s.volumeSpecificGetRequest(resp, req) default: return nil, CodedError(405, ErrInvalidMethod) } } // VolumeSpecificGetRequest deals with HTTP GET request w.r.t a single Volume func (s *HTTPServer) volumeSpecificGetRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) { // Extract info from path after trimming path := strings.TrimPrefix(req.URL.Path, "/latest/volumes") // Is req valid ? if path == req.URL.Path { return nil, CodedError(405, ErrInvalidMethod) } switch { case strings.Contains(path, "/info/"): volName := strings.TrimPrefix(path, "/info/") return s.volumeRead(resp, req, volName) case strings.Contains(path, "/delete/"): volName := strings.TrimPrefix(path, "/delete/") return s.volumeDelete(resp, req, volName) case path == "/": return s.volumeList(resp, req) default: return nil, CodedError(405, ErrInvalidMethod) } } // VolumeList is the http handler that lists Volumes func (s *HTTPServer) volumeList(resp http.ResponseWriter, req *http.Request) (interface{}, error) { glog.Infof("Processing Volume list request") // Get the namespace if provided ns := "" if req != nil { ns = req.Header.Get(NamespaceKey) } if ns == "" { // We shall override if empty. This seems to be simple enough // that works for most of the usecases. // Otherwise we need to introduce logic to decide for default // namespace depending on operation type. ns = v1.DefaultNamespaceForListOps } // Create a Volume vol := &v1.Volume{} vol.Namespace = ns // Pass through the policy enforcement logic policy, err := policies_v1.VolumeGenericPolicy() if err != nil { return nil, err } vol, err = policy.Enforce(vol) if err != nil { return nil, err } // Get the persistent volume provisioner instance pvp, err := provisioners.GetVolumeProvisioner(nil) if err != nil { return nil, err } // Set the volume provisioner profile to provisioner _, err = pvp.Profile(vol) if err != nil { return nil, err } lister, ok, err := pvp.Lister() if err != nil { return nil, err } if !ok { return nil, fmt.Errorf("Volume list is not supported by '%s:%s'", pvp.Label(), pvp.Name()) } l, err := lister.List() if err != nil { return nil, err } glog.Infof("Processed Volume list request successfully") return l, nil } // VolumeRead is the http handler that fetches the details of a Volume func (s *HTTPServer) volumeRead(resp http.ResponseWriter, req *http.Request, volName string) (*v1.Volume, error) { glog.Infof("Processing Volume read request") if volName == "" { return nil, CodedError(400, fmt.Sprintf("Volume name is missing")) } // Get the namespace if provided ns := "" if req != nil { ns = req.Header.Get(NamespaceKey) } // Create a Volume vol := &v1.Volume{} vol.Name = volName vol.Namespace = ns // Pass through the policy enforcement logic policy, err := policies_v1.VolumeGenericPolicy() if err != nil { return nil, err } vol, err = policy.Enforce(vol) if err != nil { return nil, err } // Get persistent volume provisioner instance pvp, err := provisioners.GetVolumeProvisioner(nil) if err != nil { return nil, err } // Set the volume provisioner profile to provisioner _, err = pvp.Profile(vol) if err != nil { return nil, err } reader, ok := pvp.Reader() if !ok { return nil, fmt.Errorf("Volume read is not supported by '%s:%s'", pvp.Label(), pvp.Name()) } // TODO // vol should not be passed again !! details, err := reader.Read(vol) if err != nil { return nil, err } if details == nil { return nil, CodedError(404, fmt.Sprintf("Volume '%s' not found", volName)) } glog.Infof("Processed Volume read request successfully for '" + volName + "'") return details, nil } // VolumeDelete is the http handler that fetches the details of a Volume func (s *HTTPServer) volumeDelete(resp http.ResponseWriter, req *http.Request, volName string) (interface{}, error) { glog.Infof("Processing Volume delete request") if volName == "" { return nil, CodedError(400, fmt.Sprintf("Volume name is missing")) } // Get the namespace if provided ns := "" if req != nil { ns = req.Header.Get(NamespaceKey) } // Create a Volume vol := &v1.Volume{} vol.Name = volName vol.Namespace = ns // Pass through the policy enforcement logic policy, err := policies_v1.VolumeGenericPolicy() if err != nil { return nil, err } vol, err = policy.Enforce(vol) if err != nil { return nil, err } // Get the persistent volume provisioner instance pvp, err := provisioners.GetVolumeProvisioner(nil) if err != nil { return nil, err } // Set the volume provisioner profile _, err = pvp.Profile(vol) if err != nil { return nil, err } remover, ok, err := pvp.Remover() if err != nil { return nil, err } if !ok { return nil, fmt.Errorf("Volume delete is not supported by '%s:%s'", pvp.Label(), pvp.Name()) } removed, err := remover.Remove() if err != nil { return nil, err } // If there was not any err & still no removal if !removed { return nil, CodedError(404, fmt.Sprintf("Volume '%s' not found", volName)) } glog.Infof("Processed Volume delete request successfully for '" + volName + "'") return fmt.Sprintf("Volume '%s' deleted successfully", volName), nil } // VolumeAdd is the http handler that fetches the details of a Volume func (s *HTTPServer) volumeAdd(resp http.ResponseWriter, req *http.Request) (interface{}, error) { glog.Infof("Processing Volume add request") vol := &v1.Volume{} // The yaml/json spec is decoded to vol struct if err := decodeBody(req, vol); err != nil { return nil, CodedError(400, err.Error()) } // Name is expected to be available even in the minimalist specs if vol.Name == "" { return nil, CodedError(400, fmt.Sprintf("Volume name missing in '%v'", vol)) } // Pass through the policy enforcement logic policy, err := policies_v1.VolumeAddPolicy() if err != nil { return nil, err } vol, err = policy.Enforce(vol) if err != nil { return nil, err } // Get persistent volume provisioner instance pvp, err := provisioners.GetVolumeProvisioner(nil) if err != nil { return nil, err } // Set the volume provisioner profile to provisioner _, err = pvp.Profile(vol) if err != nil { return nil, err } adder, ok := pvp.Adder() if !ok { return nil, fmt.Errorf("Volume add operation is not supported by '%s:%s'", pvp.Label(), pvp.Name()) } // TODO // vol should not be passed again !! details, err := adder.Add(vol) if err != nil { return nil, err } glog.Infof("Processed Volume add request successfully for '" + vol.Name + "'") return details, nil }
1
8,720
We should not panic here !!! We return error i.e. 500 http code
openebs-maya
go
@@ -1305,8 +1305,16 @@ nano: use nanosecond-precision (requires libpcap >= 1.5.0) else: pkt = pkt.__iter__() for p in pkt: + if not self.header_present: self._write_header(p) + + if self.linktype != conf.l2types.get(type(p), None): + warning("Inconsistent linktypes detected!" + " The resulting PCAP file might contain" + " invalid packets." + ) + self._write_packet(p) def _write_packet(self, packet, sec=None, usec=None, caplen=None,
1
# This file is part of Scapy # See http://www.secdev.org/projects/scapy for more information # Copyright (C) Philippe Biondi <[email protected]> # This program is published under a GPLv2 license """ General utility functions. """ from __future__ import absolute_import from __future__ import print_function from decimal import Decimal import os import sys import socket import collections import random import time import gzip import re import struct import array import subprocess import tempfile import threading import scapy.modules.six as six from scapy.modules.six.moves import range from scapy.config import conf from scapy.consts import DARWIN, WINDOWS, WINDOWS_XP, OPENBSD from scapy.data import MTU, DLT_EN10MB from scapy.compat import orb, raw, plain_str, chb, bytes_base64,\ base64_bytes, hex_bytes, lambda_tuple_converter, bytes_encode from scapy.error import log_runtime, Scapy_Exception, warning from scapy.pton_ntop import inet_pton ########### # Tools # ########### def issubtype(x, t): """issubtype(C, B) -> bool Return whether C is a class and if it is a subclass of class B. When using a tuple as the second argument issubtype(X, (A, B, ...)), is a shortcut for issubtype(X, A) or issubtype(X, B) or ... (etc.). """ return isinstance(x, type) and issubclass(x, t) def get_temp_file(keep=False, autoext="", fd=False): """Creates a temporary file. :param keep: If False, automatically delete the file when Scapy exits. :param autoext: Suffix to add to the generated file name. :param fd: If True, this returns a file-like object with the temporary file opened. If False (default), this returns a file path. """ f = tempfile.NamedTemporaryFile(prefix="scapy", suffix=autoext, delete=False) if not keep: conf.temp_files.append(f.name) if fd: return f else: # Close the file so something else can take it. f.close() return f.name def get_temp_dir(keep=False): """Creates a temporary file, and returns its name. :param keep: If False (default), the directory will be recursively deleted when Scapy exits. :return: A full path to a temporary directory. """ dname = tempfile.mkdtemp(prefix="scapy") if not keep: conf.temp_files.append(dname) return dname def sane_color(x): r = "" for i in x: j = orb(i) if (j < 32) or (j >= 127): r += conf.color_theme.not_printable(".") else: r += chr(j) return r def sane(x): r = "" for i in x: j = orb(i) if (j < 32) or (j >= 127): r += "." else: r += chr(j) return r @conf.commands.register def restart(): """Restarts scapy""" if not conf.interactive or not os.path.isfile(sys.argv[0]): raise OSError("Scapy was not started from console") if WINDOWS: try: res_code = subprocess.call([sys.executable] + sys.argv) except KeyboardInterrupt: res_code = 1 finally: os._exit(res_code) os.execv(sys.executable, [sys.executable] + sys.argv) def lhex(x): if type(x) in six.integer_types: return hex(x) elif isinstance(x, tuple): return "(%s)" % ", ".join(map(lhex, x)) elif isinstance(x, list): return "[%s]" % ", ".join(map(lhex, x)) else: return x @conf.commands.register def hexdump(x, dump=False): """Build a tcpdump like hexadecimal view :param x: a Packet :param dump: define if the result must be printed or returned in a variable :returns: a String only when dump=True """ s = "" x = bytes_encode(x) x_len = len(x) i = 0 while i < x_len: s += "%04x " % i for j in range(16): if i + j < x_len: s += "%02X " % orb(x[i + j]) else: s += " " s += " %s\n" % sane_color(x[i:i + 16]) i += 16 # remove trailing \n s = s[:-1] if s.endswith("\n") else s if dump: return s else: print(s) @conf.commands.register def linehexdump(x, onlyasc=0, onlyhex=0, dump=False): """Build an equivalent view of hexdump() on a single line Note that setting both onlyasc and onlyhex to 1 results in a empty output :param x: a Packet :param onlyasc: 1 to display only the ascii view :param onlyhex: 1 to display only the hexadecimal view :param dump: print the view if False :returns: a String only when dump=True """ s = "" s = hexstr(x, onlyasc=onlyasc, onlyhex=onlyhex, color=not dump) if dump: return s else: print(s) @conf.commands.register def chexdump(x, dump=False): """Build a per byte hexadecimal representation Example: >>> chexdump(IP()) 0x45, 0x00, 0x00, 0x14, 0x00, 0x01, 0x00, 0x00, 0x40, 0x00, 0x7c, 0xe7, 0x7f, 0x00, 0x00, 0x01, 0x7f, 0x00, 0x00, 0x01 # noqa: E501 :param x: a Packet :param dump: print the view if False :returns: a String only if dump=True """ x = bytes_encode(x) s = ", ".join("%#04x" % orb(x) for x in x) if dump: return s else: print(s) @conf.commands.register def hexstr(x, onlyasc=0, onlyhex=0, color=False): """Build a fancy tcpdump like hex from bytes.""" x = bytes_encode(x) _sane_func = sane_color if color else sane s = [] if not onlyasc: s.append(" ".join("%02X" % orb(b) for b in x)) if not onlyhex: s.append(_sane_func(x)) return " ".join(s) def repr_hex(s): """ Convert provided bitstring to a simple string of hex digits """ return "".join("%02x" % orb(x) for x in s) @conf.commands.register def hexdiff(x, y): """Show differences between 2 binary strings""" x = bytes_encode(x)[::-1] y = bytes_encode(y)[::-1] SUBST = 1 INSERT = 1 d = {(-1, -1): (0, (-1, -1))} for j in range(len(y)): d[-1, j] = d[-1, j - 1][0] + INSERT, (-1, j - 1) for i in range(len(x)): d[i, -1] = d[i - 1, -1][0] + INSERT, (i - 1, -1) for j in range(len(y)): for i in range(len(x)): d[i, j] = min((d[i - 1, j - 1][0] + SUBST * (x[i] != y[j]), (i - 1, j - 1)), # noqa: E501 (d[i - 1, j][0] + INSERT, (i - 1, j)), (d[i, j - 1][0] + INSERT, (i, j - 1))) backtrackx = [] backtracky = [] i = len(x) - 1 j = len(y) - 1 while not (i == j == -1): i2, j2 = d[i, j][1] backtrackx.append(x[i2 + 1:i + 1]) backtracky.append(y[j2 + 1:j + 1]) i, j = i2, j2 x = y = i = 0 colorize = {0: lambda x: x, -1: conf.color_theme.left, 1: conf.color_theme.right} dox = 1 doy = 0 btx_len = len(backtrackx) while i < btx_len: linex = backtrackx[i:i + 16] liney = backtracky[i:i + 16] xx = sum(len(k) for k in linex) yy = sum(len(k) for k in liney) if dox and not xx: dox = 0 doy = 1 if dox and linex == liney: doy = 1 if dox: xd = y j = 0 while not linex[j]: j += 1 xd -= 1 print(colorize[doy - dox]("%04x" % xd), end=' ') x += xx line = linex else: print(" ", end=' ') if doy: yd = y j = 0 while not liney[j]: j += 1 yd -= 1 print(colorize[doy - dox]("%04x" % yd), end=' ') y += yy line = liney else: print(" ", end=' ') print(" ", end=' ') cl = "" for j in range(16): if i + j < btx_len: if line[j]: col = colorize[(linex[j] != liney[j]) * (doy - dox)] print(col("%02X" % orb(line[j])), end=' ') if linex[j] == liney[j]: cl += sane_color(line[j]) else: cl += col(sane(line[j])) else: print(" ", end=' ') cl += " " else: print(" ", end=' ') if j == 7: print("", end=' ') print(" ", cl) if doy or not yy: doy = 0 dox = 1 i += 16 else: if yy: dox = 0 doy = 1 else: i += 16 if struct.pack("H", 1) == b"\x00\x01": # big endian checksum_endian_transform = lambda chk: chk else: checksum_endian_transform = lambda chk: ((chk >> 8) & 0xff) | chk << 8 def checksum(pkt): if len(pkt) % 2 == 1: pkt += b"\0" s = sum(array.array("H", pkt)) s = (s >> 16) + (s & 0xffff) s += s >> 16 s = ~s return checksum_endian_transform(s) & 0xffff def _fletcher16(charbuf): # This is based on the GPLed C implementation in Zebra <http://www.zebra.org/> # noqa: E501 c0 = c1 = 0 for char in charbuf: c0 += orb(char) c1 += c0 c0 %= 255 c1 %= 255 return (c0, c1) @conf.commands.register def fletcher16_checksum(binbuf): """Calculates Fletcher-16 checksum of the given buffer. Note: If the buffer contains the two checkbytes derived from the Fletcher-16 checksum # noqa: E501 the result of this function has to be 0. Otherwise the buffer has been corrupted. # noqa: E501 """ (c0, c1) = _fletcher16(binbuf) return (c1 << 8) | c0 @conf.commands.register def fletcher16_checkbytes(binbuf, offset): """Calculates the Fletcher-16 checkbytes returned as 2 byte binary-string. Including the bytes into the buffer (at the position marked by offset) the # noqa: E501 global Fletcher-16 checksum of the buffer will be 0. Thus it is easy to verify # noqa: E501 the integrity of the buffer on the receiver side. For details on the algorithm, see RFC 2328 chapter 12.1.7 and RFC 905 Annex B. # noqa: E501 """ # This is based on the GPLed C implementation in Zebra <http://www.zebra.org/> # noqa: E501 if len(binbuf) < offset: raise Exception("Packet too short for checkbytes %d" % len(binbuf)) binbuf = binbuf[:offset] + b"\x00\x00" + binbuf[offset + 2:] (c0, c1) = _fletcher16(binbuf) x = ((len(binbuf) - offset - 1) * c0 - c1) % 255 if (x <= 0): x += 255 y = 510 - c0 - x if (y > 255): y -= 255 return chb(x) + chb(y) def mac2str(mac): return b"".join(chb(int(x, 16)) for x in plain_str(mac).split(':')) def valid_mac(mac): try: return len(mac2str(mac)) == 6 except ValueError: pass return False def str2mac(s): if isinstance(s, str): return ("%02x:" * 6)[:-1] % tuple(map(ord, s)) return ("%02x:" * 6)[:-1] % tuple(s) def randstring(l): """ Returns a random string of length l (l >= 0) """ return b"".join(struct.pack('B', random.randint(0, 255)) for _ in range(l)) def zerofree_randstring(l): """ Returns a random string of length l (l >= 0) without zero in it. """ return b"".join(struct.pack('B', random.randint(1, 255)) for _ in range(l)) def strxor(s1, s2): """ Returns the binary XOR of the 2 provided strings s1 and s2. s1 and s2 must be of same length. """ return b"".join(map(lambda x, y: chb(orb(x) ^ orb(y)), s1, s2)) def strand(s1, s2): """ Returns the binary AND of the 2 provided strings s1 and s2. s1 and s2 must be of same length. """ return b"".join(map(lambda x, y: chb(orb(x) & orb(y)), s1, s2)) # Workaround bug 643005 : https://sourceforge.net/tracker/?func=detail&atid=105470&aid=643005&group_id=5470 # noqa: E501 try: socket.inet_aton("255.255.255.255") except socket.error: def inet_aton(x): if x == "255.255.255.255": return b"\xff" * 4 else: return socket.inet_aton(x) else: inet_aton = socket.inet_aton inet_ntoa = socket.inet_ntoa def atol(x): try: ip = inet_aton(x) except socket.error: ip = inet_aton(socket.gethostbyname(x)) return struct.unpack("!I", ip)[0] def valid_ip(addr): try: addr = plain_str(addr) except UnicodeDecodeError: return False try: atol(addr) except (OSError, ValueError, socket.error): return False return True def valid_net(addr): try: addr = plain_str(addr) except UnicodeDecodeError: return False if '/' in addr: ip, mask = addr.split('/', 1) return valid_ip(ip) and mask.isdigit() and 0 <= int(mask) <= 32 return valid_ip(addr) def valid_ip6(addr): try: addr = plain_str(addr) except UnicodeDecodeError: return False try: inet_pton(socket.AF_INET6, addr) except socket.error: try: socket.getaddrinfo(addr, None, socket.AF_INET6)[0][4][0] except socket.error: return False return True def valid_net6(addr): try: addr = plain_str(addr) except UnicodeDecodeError: return False if '/' in addr: ip, mask = addr.split('/', 1) return valid_ip6(ip) and mask.isdigit() and 0 <= int(mask) <= 128 return valid_ip6(addr) if WINDOWS_XP: # That is a hell of compatibility :( def ltoa(x): return inet_ntoa(struct.pack("<I", x & 0xffffffff)) else: def ltoa(x): return inet_ntoa(struct.pack("!I", x & 0xffffffff)) def itom(x): return (0xffffffff00000000 >> x) & 0xffffffff class ContextManagerSubprocess(object): """ Context manager that eases checking for unknown command. Example: >>> with ContextManagerSubprocess("my custom message", "unknown_command"): >>> subprocess.Popen(["unknown_command"]) """ def __init__(self, name, prog): self.name = name self.prog = prog def __enter__(self): pass def __exit__(self, exc_type, exc_value, traceback): if isinstance(exc_value, (OSError, TypeError)): msg = "%s: executing %r failed" % (self.name, self.prog) if self.prog else "Could not execute %s, is it installed ?" % self.name # noqa: E501 if not conf.interactive: raise OSError(msg) else: log_runtime.error(msg, exc_info=True) return True # Suppress the exception class ContextManagerCaptureOutput(object): """ Context manager that intercept the console's output. Example: >>> with ContextManagerCaptureOutput() as cmco: ... print("hey") ... assert cmco.get_output() == "hey" """ def __init__(self): self.result_export_object = "" try: import mock # noqa: F401 except Exception: raise ImportError("The mock module needs to be installed !") def __enter__(self): import mock def write(s, decorator=self): decorator.result_export_object += s mock_stdout = mock.Mock() mock_stdout.write = write self.bck_stdout = sys.stdout sys.stdout = mock_stdout return self def __exit__(self, *exc): sys.stdout = self.bck_stdout return False def get_output(self, eval_bytes=False): if self.result_export_object.startswith("b'") and eval_bytes: return plain_str(eval(self.result_export_object)) return self.result_export_object def do_graph(graph, prog=None, format=None, target=None, type=None, string=None, options=None): # noqa: E501 """do_graph(graph, prog=conf.prog.dot, format="svg", target="| conf.prog.display", options=None, [string=1]): string: if not None, simply return the graph string graph: GraphViz graph description format: output type (svg, ps, gif, jpg, etc.), passed to dot's "-T" option target: filename or redirect. Defaults pipe to Imagemagick's display program # noqa: E501 prog: which graphviz program to use options: options to be passed to prog""" if format is None: if WINDOWS: format = "png" # use common format to make sure a viewer is installed # noqa: E501 else: format = "svg" if string: return graph if type is not None: format = type if prog is None: prog = conf.prog.dot start_viewer = False if target is None: if WINDOWS: target = get_temp_file(autoext="." + format) start_viewer = True else: with ContextManagerSubprocess("do_graph()", conf.prog.display): target = subprocess.Popen([conf.prog.display], stdin=subprocess.PIPE).stdin if format is not None: format = "-T%s" % format if isinstance(target, str): if target.startswith('|'): target = subprocess.Popen(target[1:].lstrip(), shell=True, stdin=subprocess.PIPE).stdin elif target.startswith('>'): target = open(target[1:].lstrip(), "wb") else: target = open(os.path.abspath(target), "wb") proc = subprocess.Popen("\"%s\" %s %s" % (prog, options or "", format or ""), # noqa: E501 shell=True, stdin=subprocess.PIPE, stdout=target) proc.stdin.write(bytes_encode(graph)) proc.stdin.close() proc.wait() try: target.close() except Exception: pass if start_viewer: # Workaround for file not found error: We wait until tempfile is written. # noqa: E501 waiting_start = time.time() while not os.path.exists(target.name): time.sleep(0.1) if time.time() - waiting_start > 3: warning("Temporary file '%s' could not be written. Graphic will not be displayed.", tempfile) # noqa: E501 break else: if conf.prog.display == conf.prog._default: os.startfile(target.name) else: with ContextManagerSubprocess("do_graph()", conf.prog.display): subprocess.Popen([conf.prog.display, target.name]) _TEX_TR = { "{": "{\\tt\\char123}", "}": "{\\tt\\char125}", "\\": "{\\tt\\char92}", "^": "\\^{}", "$": "\\$", "#": "\\#", "_": "\\_", "&": "\\&", "%": "\\%", "|": "{\\tt\\char124}", "~": "{\\tt\\char126}", "<": "{\\tt\\char60}", ">": "{\\tt\\char62}", } def tex_escape(x): s = "" for c in x: s += _TEX_TR.get(c, c) return s def colgen(*lstcol, **kargs): """Returns a generator that mixes provided quantities forever trans: a function to convert the three arguments into a color. lambda x,y,z:(x,y,z) by default""" # noqa: E501 if len(lstcol) < 2: lstcol *= 2 trans = kargs.get("trans", lambda x, y, z: (x, y, z)) while True: for i in range(len(lstcol)): for j in range(len(lstcol)): for k in range(len(lstcol)): if i != j or j != k or k != i: yield trans(lstcol[(i + j) % len(lstcol)], lstcol[(j + k) % len(lstcol)], lstcol[(k + i) % len(lstcol)]) # noqa: E501 def incremental_label(label="tag%05i", start=0): while True: yield label % start start += 1 def binrepr(val): return bin(val)[2:] def long_converter(s): return int(s.replace('\n', '').replace(' ', ''), 16) ######################### # Enum management # ######################### class EnumElement: _value = None def __init__(self, key, value): self._key = key self._value = value def __repr__(self): return "<%s %s[%r]>" % (self.__dict__.get("_name", self.__class__.__name__), self._key, self._value) # noqa: E501 def __getattr__(self, attr): return getattr(self._value, attr) def __str__(self): return self._key def __bytes__(self): return bytes_encode(self.__str__()) def __hash__(self): return self._value def __int__(self): return int(self._value) def __eq__(self, other): return self._value == int(other) def __neq__(self, other): return not self.__eq__(other) class Enum_metaclass(type): element_class = EnumElement def __new__(cls, name, bases, dct): rdict = {} for k, v in six.iteritems(dct): if isinstance(v, int): v = cls.element_class(k, v) dct[k] = v rdict[v] = k dct["__rdict__"] = rdict return super(Enum_metaclass, cls).__new__(cls, name, bases, dct) def __getitem__(self, attr): return self.__rdict__[attr] def __contains__(self, val): return val in self.__rdict__ def get(self, attr, val=None): return self.__rdict__.get(attr, val) def __repr__(self): return "<%s>" % self.__dict__.get("name", self.__name__) ################### # Object saving # ################### def export_object(obj): print(bytes_base64(gzip.zlib.compress(six.moves.cPickle.dumps(obj, 2), 9))) def import_object(obj=None): if obj is None: obj = sys.stdin.read() return six.moves.cPickle.loads(gzip.zlib.decompress(base64_bytes(obj.strip()))) # noqa: E501 def save_object(fname, obj): """Pickle a Python object""" fd = gzip.open(fname, "wb") six.moves.cPickle.dump(obj, fd) fd.close() def load_object(fname): """unpickle a Python object""" return six.moves.cPickle.load(gzip.open(fname, "rb")) @conf.commands.register def corrupt_bytes(s, p=0.01, n=None): """Corrupt a given percentage or number of bytes from a string""" s = array.array("B", bytes_encode(s)) s_len = len(s) if n is None: n = max(1, int(s_len * p)) for i in random.sample(range(s_len), n): s[i] = (s[i] + random.randint(1, 255)) % 256 return s.tostring() if six.PY2 else s.tobytes() @conf.commands.register def corrupt_bits(s, p=0.01, n=None): """Flip a given percentage or number of bits from a string""" s = array.array("B", bytes_encode(s)) s_len = len(s) * 8 if n is None: n = max(1, int(s_len * p)) for i in random.sample(range(s_len), n): s[i // 8] ^= 1 << (i % 8) return s.tostring() if six.PY2 else s.tobytes() ############################# # pcap capture file stuff # ############################# @conf.commands.register def wrpcap(filename, pkt, *args, **kargs): """Write a list of packets to a pcap file filename: the name of the file to write packets to, or an open, writable file-like object. The file descriptor will be closed at the end of the call, so do not use an object you do not want to close (e.g., running wrpcap(sys.stdout, []) in interactive mode will crash Scapy). gz: set to 1 to save a gzipped capture linktype: force linktype value endianness: "<" or ">", force endianness sync: do not bufferize writes to the capture file """ with PcapWriter(filename, *args, **kargs) as fdesc: fdesc.write(pkt) @conf.commands.register def rdpcap(filename, count=-1): """Read a pcap or pcapng file and return a packet list count: read only <count> packets """ with PcapReader(filename) as fdesc: return fdesc.read_all(count=count) class PcapReader_metaclass(type): """Metaclass for (Raw)Pcap(Ng)Readers""" def __new__(cls, name, bases, dct): """The `alternative` class attribute is declared in the PcapNg variant, and set here to the Pcap variant. """ newcls = super(PcapReader_metaclass, cls).__new__(cls, name, bases, dct) # noqa: E501 if 'alternative' in dct: dct['alternative'].alternative = newcls return newcls def __call__(cls, filename): """Creates a cls instance, use the `alternative` if that fails. """ i = cls.__new__(cls, cls.__name__, cls.__bases__, cls.__dict__) filename, fdesc, magic = cls.open(filename) try: i.__init__(filename, fdesc, magic) except Scapy_Exception: if "alternative" in cls.__dict__: cls = cls.__dict__["alternative"] i = cls.__new__(cls, cls.__name__, cls.__bases__, cls.__dict__) try: i.__init__(filename, fdesc, magic) except Scapy_Exception: try: i.f.seek(-4, 1) except Exception: pass raise Scapy_Exception("Not a supported capture file") return i @staticmethod def open(filename): """Open (if necessary) filename, and read the magic.""" if isinstance(filename, six.string_types): try: fdesc = gzip.open(filename, "rb") magic = fdesc.read(4) except IOError: fdesc = open(filename, "rb") magic = fdesc.read(4) else: fdesc = filename filename = getattr(fdesc, "name", "No name") magic = fdesc.read(4) return filename, fdesc, magic class RawPcapReader(six.with_metaclass(PcapReader_metaclass)): """A stateful pcap reader. Each packet is returned as a string""" read_allowed_exceptions = () # emulate SuperSocket nonblocking_socket = True PacketMetadata = collections.namedtuple("PacketMetadata", ["sec", "usec", "wirelen", "caplen"]) # noqa: E501 def __init__(self, filename, fdesc, magic): self.filename = filename self.f = fdesc if magic == b"\xa1\xb2\xc3\xd4": # big endian self.endian = ">" self.nano = False elif magic == b"\xd4\xc3\xb2\xa1": # little endian self.endian = "<" self.nano = False elif magic == b"\xa1\xb2\x3c\x4d": # big endian, nanosecond-precision self.endian = ">" self.nano = True elif magic == b"\x4d\x3c\xb2\xa1": # little endian, nanosecond-precision # noqa: E501 self.endian = "<" self.nano = True else: raise Scapy_Exception( "Not a pcap capture file (bad magic: %r)" % magic ) hdr = self.f.read(20) if len(hdr) < 20: raise Scapy_Exception("Invalid pcap file (too short)") vermaj, vermin, tz, sig, snaplen, linktype = struct.unpack( self.endian + "HHIIII", hdr ) self.linktype = linktype def __iter__(self): return self def next(self): """implement the iterator protocol on a set of packets in a pcap file pkt is a tuple (pkt_data, pkt_metadata) as defined in RawPcapReader.read_packet() """ try: return self.read_packet() except EOFError: raise StopIteration __next__ = next def read_packet(self, size=MTU): """return a single packet read from the file as a tuple containing (pkt_data, pkt_metadata) raise EOFError when no more packets are available """ hdr = self.f.read(16) if len(hdr) < 16: raise EOFError sec, usec, caplen, wirelen = struct.unpack(self.endian + "IIII", hdr) return (self.f.read(caplen)[:size], RawPcapReader.PacketMetadata(sec=sec, usec=usec, wirelen=wirelen, caplen=caplen)) def dispatch(self, callback): """call the specified callback routine for each packet read This is just a convenience function for the main loop that allows for easy launching of packet processing in a thread. """ for p in self: callback(p) def read_all(self, count=-1): """return a list of all packets in the pcap file """ res = [] while count != 0: count -= 1 try: p = self.read_packet() except EOFError: break res.append(p) return res def recv(self, size=MTU): """ Emulate a socket """ return self.read_packet(size=size)[0] def fileno(self): return self.f.fileno() def close(self): return self.f.close() def __enter__(self): return self def __exit__(self, exc_type, exc_value, tracback): self.close() # emulate SuperSocket @staticmethod def select(sockets, remain=None): return sockets, None class PcapReader(RawPcapReader): def __init__(self, filename, fdesc, magic): RawPcapReader.__init__(self, filename, fdesc, magic) try: self.LLcls = conf.l2types[self.linktype] except KeyError: warning("PcapReader: unknown LL type [%i]/[%#x]. Using Raw packets" % (self.linktype, self.linktype)) # noqa: E501 self.LLcls = conf.raw_layer def read_packet(self, size=MTU): rp = super(PcapReader, self).read_packet(size=size) if rp is None: raise EOFError s, pkt_info = rp try: p = self.LLcls(s) except KeyboardInterrupt: raise except Exception: if conf.debug_dissector: from scapy.sendrecv import debug debug.crashed_on = (self.LLcls, s) raise p = conf.raw_layer(s) power = Decimal(10) ** Decimal(-9 if self.nano else -6) p.time = Decimal(pkt_info.sec + power * pkt_info.usec) p.wirelen = pkt_info.wirelen return p def read_all(self, count=-1): res = RawPcapReader.read_all(self, count) from scapy import plist return plist.PacketList(res, name=os.path.basename(self.filename)) def recv(self, size=MTU): return self.read_packet(size=size) class RawPcapNgReader(RawPcapReader): """A stateful pcapng reader. Each packet is returned as a string. """ alternative = RawPcapReader PacketMetadata = collections.namedtuple("PacketMetadata", ["linktype", "tsresol", "tshigh", "tslow", "wirelen"]) def __init__(self, filename, fdesc, magic): self.filename = filename self.f = fdesc # A list of (linktype, snaplen, tsresol); will be populated by IDBs. self.interfaces = [] self.blocktypes = { 1: self.read_block_idb, 2: self.read_block_pkt, 3: self.read_block_spb, 6: self.read_block_epb, } if magic != b"\x0a\x0d\x0d\x0a": # PcapNg: raise Scapy_Exception( "Not a pcapng capture file (bad magic: %r)" % magic ) # see https://github.com/pcapng/pcapng blocklen, magic = self.f.read(4), self.f.read(4) # noqa: F841 if magic == b"\x1a\x2b\x3c\x4d": self.endian = ">" elif magic == b"\x4d\x3c\x2b\x1a": self.endian = "<" else: raise Scapy_Exception("Not a pcapng capture file (bad magic)") try: self.f.seek(0) except Exception: pass def read_packet(self, size=MTU): """Read blocks until it reaches either EOF or a packet, and returns None or (packet, (linktype, sec, usec, wirelen)), where packet is a string. """ while True: try: blocktype, blocklen = struct.unpack(self.endian + "2I", self.f.read(8)) except struct.error: raise EOFError block = self.f.read(blocklen - 12) if blocklen % 4: pad = self.f.read(4 - (blocklen % 4)) warning("PcapNg: bad blocklen %d (MUST be a multiple of 4. " "Ignored padding %r" % (blocklen, pad)) try: if (blocklen,) != struct.unpack(self.endian + 'I', self.f.read(4)): warning("PcapNg: Invalid pcapng block (bad blocklen)") except struct.error: raise EOFError res = self.blocktypes.get(blocktype, lambda block, size: None)(block, size) if res is not None: return res def read_block_idb(self, block, _): """Interface Description Block""" options = block[16:] tsresol = 1000000 while len(options) >= 4: code, length = struct.unpack(self.endian + "HH", options[:4]) # PCAP Next Generation (pcapng) Capture File Format # 4.2. - Interface Description Block # http://xml2rfc.tools.ietf.org/cgi-bin/xml2rfc.cgi?url=https://raw.githubusercontent.com/pcapng/pcapng/master/draft-tuexen-opsawg-pcapng.xml&modeAsFormat=html/ascii&type=ascii#rfc.section.4.2 if code == 9 and length == 1 and len(options) >= 5: tsresol = orb(options[4]) tsresol = (2 if tsresol & 128 else 10) ** (tsresol & 127) if code == 0: if length != 0: warning("PcapNg: invalid option length %d for end-of-option" % length) # noqa: E501 break if length % 4: length += (4 - (length % 4)) options = options[4 + length:] self.interfaces.append(struct.unpack(self.endian + "HxxI", block[:8]) + (tsresol,)) def read_block_epb(self, block, size): """Enhanced Packet Block""" intid, tshigh, tslow, caplen, wirelen = struct.unpack( self.endian + "5I", block[:20], ) return (block[20:20 + caplen][:size], RawPcapNgReader.PacketMetadata(linktype=self.interfaces[intid][0], # noqa: E501 tsresol=self.interfaces[intid][2], # noqa: E501 tshigh=tshigh, tslow=tslow, wirelen=wirelen)) def read_block_spb(self, block, size): """Simple Packet Block""" # "it MUST be assumed that all the Simple Packet Blocks have # been captured on the interface previously specified in the # first Interface Description Block." intid = 0 wirelen, = struct.unpack(self.endian + "I", block[:4]) caplen = min(wirelen, self.interfaces[intid][1]) return (block[4:4 + caplen][:size], RawPcapNgReader.PacketMetadata(linktype=self.interfaces[intid][0], # noqa: E501 tsresol=self.interfaces[intid][2], # noqa: E501 tshigh=None, tslow=None, wirelen=wirelen)) def read_block_pkt(self, block, size): """(Obsolete) Packet Block""" intid, drops, tshigh, tslow, caplen, wirelen = struct.unpack( self.endian + "HH4I", block[:20], ) return (block[20:20 + caplen][:size], RawPcapNgReader.PacketMetadata(linktype=self.interfaces[intid][0], # noqa: E501 tsresol=self.interfaces[intid][2], # noqa: E501 tshigh=tshigh, tslow=tslow, wirelen=wirelen)) class PcapNgReader(RawPcapNgReader): alternative = PcapReader def __init__(self, filename, fdesc, magic): RawPcapNgReader.__init__(self, filename, fdesc, magic) def read_packet(self, size=MTU): rp = super(PcapNgReader, self).read_packet(size=size) if rp is None: raise EOFError s, (linktype, tsresol, tshigh, tslow, wirelen) = rp try: p = conf.l2types[linktype](s) except KeyboardInterrupt: raise except Exception: if conf.debug_dissector: raise p = conf.raw_layer(s) if tshigh is not None: p.time = float((tshigh << 32) + tslow) / tsresol p.wirelen = wirelen return p def read_all(self, count=-1): res = RawPcapNgReader.read_all(self, count) from scapy import plist return plist.PacketList(res, name=os.path.basename(self.filename)) def recv(self, size=MTU): return self.read_packet() class RawPcapWriter: """A stream PCAP writer with more control than wrpcap()""" def __init__(self, filename, linktype=None, gz=False, endianness="", append=False, sync=False, nano=False): """ filename: the name of the file to write packets to, or an open, writable file-like object. linktype: force linktype to a given value. If None, linktype is taken from the first writer packet gz: compress the capture on the fly endianness: force an endianness (little:"<", big:">"). Default is native append: append packets to the capture file instead of truncating it sync: do not bufferize writes to the capture file nano: use nanosecond-precision (requires libpcap >= 1.5.0) """ self.linktype = linktype self.header_present = 0 self.append = append self.gz = gz self.endian = endianness self.sync = sync self.nano = nano bufsz = 4096 if sync: bufsz = 0 if isinstance(filename, six.string_types): self.filename = filename self.f = [open, gzip.open][gz](filename, append and "ab" or "wb", gz and 9 or bufsz) # noqa: E501 else: self.f = filename self.filename = getattr(filename, "name", "No name") def fileno(self): return self.f.fileno() def _write_header(self, pkt): self.header_present = 1 if self.append: # Even if prone to race conditions, this seems to be # safest way to tell whether the header is already present # because we have to handle compressed streams that # are not as flexible as basic files g = [open, gzip.open][self.gz](self.filename, "rb") if g.read(16): return self.f.write(struct.pack(self.endian + "IHHIIII", 0xa1b23c4d if self.nano else 0xa1b2c3d4, # noqa: E501 2, 4, 0, 0, MTU, self.linktype)) self.f.flush() def write(self, pkt): """ Writes a Packet, a SndRcvList object, or bytes to a pcap file. :param pkt: Packet(s) to write (one record for each Packet), or raw bytes to write (as one record). :type pkt: iterable[Packet], Packet or bytes """ if isinstance(pkt, bytes): if not self.header_present: self._write_header(pkt) self._write_packet(pkt) else: # Import here to avoid a circular dependency from scapy.plist import SndRcvList if isinstance(pkt, SndRcvList): pkt = (p for t in pkt for p in t) else: pkt = pkt.__iter__() for p in pkt: if not self.header_present: self._write_header(p) self._write_packet(p) def _write_packet(self, packet, sec=None, usec=None, caplen=None, wirelen=None): """ Writes a single packet to the pcap file. :param packet: bytes for a single packet :type packet: bytes :param sec: time the packet was captured, in seconds since epoch. If not supplied, defaults to now. :type sec: int or long :param usec: If ``nano=True``, then number of nanoseconds after the second that the packet was captured. If ``nano=False``, then the number of microseconds after the second the packet was captured :type usec: int or long :param caplen: The length of the packet in the capture file. If not specified, uses ``len(packet)``. :type caplen: int :param wirelen: The length of the packet on the wire. If not specified, uses ``caplen``. :type wirelen: int :returns: None :rtype: None """ if caplen is None: caplen = len(packet) if wirelen is None: wirelen = caplen if sec is None or usec is None: t = time.time() it = int(t) if sec is None: sec = it usec = int(round((t - it) * (1000000000 if self.nano else 1000000))) elif usec is None: usec = 0 self.f.write(struct.pack(self.endian + "IIII", sec, usec, caplen, wirelen)) self.f.write(packet) if self.sync: self.f.flush() def flush(self): return self.f.flush() def close(self): if not self.header_present: self._write_header(None) return self.f.close() def __enter__(self): return self def __exit__(self, exc_type, exc_value, tracback): self.flush() self.close() class PcapWriter(RawPcapWriter): """A stream PCAP writer with more control than wrpcap()""" def _write_header(self, pkt): if self.linktype is None: try: self.linktype = conf.l2types[pkt.__class__] # Import here to prevent import loops from scapy.layers.inet import IP from scapy.layers.inet6 import IPv6 if OPENBSD and isinstance(pkt, (IP, IPv6)): self.linktype = 14 # DLT_RAW except KeyError: warning("PcapWriter: unknown LL type for %s. Using type 1 (Ethernet)", pkt.__class__.__name__) # noqa: E501 self.linktype = DLT_EN10MB RawPcapWriter._write_header(self, pkt) def _write_packet(self, packet, sec=None, usec=None, caplen=None, wirelen=None): """ Writes a single packet to the pcap file. :param packet: Packet, or bytes for a single packet :type packet: Packet or bytes :param sec: time the packet was captured, in seconds since epoch. If not supplied, defaults to now. :type sec: int or long :param usec: If ``nano=True``, then number of nanoseconds after the second that the packet was captured. If ``nano=False``, then the number of microseconds after the second the packet was captured. If ``sec`` is not specified, this value is ignored. :type usec: int or long :param caplen: The length of the packet in the capture file. If not specified, uses ``len(raw(packet))``. :type caplen: int :param wirelen: The length of the packet on the wire. If not specified, tries ``packet.wirelen``, otherwise uses ``caplen``. :type wirelen: int :returns: None :rtype: None """ if hasattr(packet, "time"): if sec is None: sec = int(packet.time) usec = int(round((packet.time - sec) * (1000000000 if self.nano else 1000000))) if usec is None: usec = 0 rawpkt = raw(packet) caplen = len(rawpkt) if caplen is None else caplen if wirelen is None: if hasattr(packet, "wirelen"): wirelen = packet.wirelen if wirelen is None: wirelen = caplen RawPcapWriter._write_packet( self, rawpkt, sec=sec, usec=usec, caplen=caplen, wirelen=wirelen) @conf.commands.register def import_hexcap(): """Imports a tcpdump like hexadecimal view e.g: exported via hexdump() or tcpdump or wireshark's "export as hex" """ re_extract_hexcap = re.compile(r"^((0x)?[0-9a-fA-F]{2,}[ :\t]{,3}|) *(([0-9a-fA-F]{2} {,2}){,16})") # noqa: E501 p = "" try: while True: line = input().strip() if not line: break try: p += re_extract_hexcap.match(line).groups()[2] except Exception: warning("Parsing error during hexcap") continue except EOFError: pass p = p.replace(" ", "") return hex_bytes(p) @conf.commands.register def wireshark(pktlist, wait=False, **kwargs): """ Runs Wireshark on a list of packets. See :func:`tcpdump` for more parameter description. Note: this defaults to wait=False, to run Wireshark in the background. """ return tcpdump(pktlist, prog=conf.prog.wireshark, wait=wait, **kwargs) @conf.commands.register def tdecode(pktlist, args=None, **kwargs): """ Run tshark on a list of packets. :param args: If not specified, defaults to ``tshark -V``. See :func:`tcpdump` for more parameters. """ if args is None: args = ["-V"] return tcpdump(pktlist, prog=conf.prog.tshark, args=args, **kwargs) def _guess_linktype_name(value): """Guess the DLT name from its value.""" import scapy.data return next( k[4:] for k, v in six.iteritems(scapy.data.__dict__) if k.startswith("DLT") and v == value ) def _guess_linktype_value(name): """Guess the value of a DLT name.""" import scapy.data if not name.startswith("DLT_"): name = "DLT_" + name return scapy.data.__dict__[name] @conf.commands.register def tcpdump(pktlist, dump=False, getfd=False, args=None, prog=None, getproc=False, quiet=False, use_tempfile=None, read_stdin_opts=None, linktype=None, wait=True): """Run tcpdump or tshark on a list of packets. When using ``tcpdump`` on OSX (``prog == conf.prog.tcpdump``), this uses a temporary file to store the packets. This works around a bug in Apple's version of ``tcpdump``: http://apple.stackexchange.com/questions/152682/ Otherwise, the packets are passed in stdin. This function can be explicitly enabled or disabled with the ``use_tempfile`` parameter. When using ``wireshark``, it will be called with ``-ki -`` to start immediately capturing packets from stdin. Otherwise, the command will be run with ``-r -`` (which is correct for ``tcpdump`` and ``tshark``). This can be overridden with ``read_stdin_opts``. This has no effect when ``use_tempfile=True``, or otherwise reading packets from a regular file. pktlist: a Packet instance, a PacketList instance or a list of Packet instances. Can also be a filename (as a string), an open file-like object that must be a file format readable by tshark (Pcap, PcapNg, etc.) or None (to sniff) dump: when set to True, returns a string instead of displaying it. getfd: when set to True, returns a file-like object to read data from tcpdump or tshark from. getproc: when set to True, the subprocess.Popen object is returned args: arguments (as a list) to pass to tshark (example for tshark: args=["-T", "json"]). prog: program to use (defaults to tcpdump, will work with tshark) quiet: when set to True, the process stderr is discarded use_tempfile: When set to True, always use a temporary file to store packets. When set to False, pipe packets through stdin. When set to None (default), only use a temporary file with ``tcpdump`` on OSX. read_stdin_opts: When set, a list of arguments needed to capture from stdin. Otherwise, attempts to guess. linktype: A custom DLT value or name, to overwrite the default values. wait: If True (default), waits for the process to terminate before returning to Scapy. If False, the process will be detached to the background. If dump, getproc or getfd is True, these have the same effect as ``wait=False``. Examples: >>> tcpdump([IP()/TCP(), IP()/UDP()]) reading from file -, link-type RAW (Raw IP) 16:46:00.474515 IP 127.0.0.1.20 > 127.0.0.1.80: Flags [S], seq 0, win 8192, length 0 # noqa: E501 16:46:00.475019 IP 127.0.0.1.53 > 127.0.0.1.53: [|domain] >>> tcpdump([IP()/TCP(), IP()/UDP()], prog=conf.prog.tshark) 1 0.000000 127.0.0.1 -> 127.0.0.1 TCP 40 20->80 [SYN] Seq=0 Win=8192 Len=0 # noqa: E501 2 0.000459 127.0.0.1 -> 127.0.0.1 UDP 28 53->53 Len=0 To get a JSON representation of a tshark-parsed PacketList(), one can: >>> import json, pprint >>> json_data = json.load(tcpdump(IP(src="217.25.178.5", dst="45.33.32.156"), ... prog=conf.prog.tshark, args=["-T", "json"], ... getfd=True)) >>> pprint.pprint(json_data) [{u'_index': u'packets-2016-12-23', u'_score': None, u'_source': {u'layers': {u'frame': {u'frame.cap_len': u'20', u'frame.encap_type': u'7', [...] u'frame.time_relative': u'0.000000000'}, u'ip': {u'ip.addr': u'45.33.32.156', u'ip.checksum': u'0x0000a20d', [...] u'ip.ttl': u'64', u'ip.version': u'4'}, u'raw': u'Raw packet data'}}, u'_type': u'pcap_file'}] >>> json_data[0]['_source']['layers']['ip']['ip.ttl'] u'64' """ getfd = getfd or getproc if prog is None: prog = [conf.prog.tcpdump] _prog_name = "windump()" if WINDOWS else "tcpdump()" elif isinstance(prog, six.string_types): _prog_name = "{}()".format(prog) prog = [prog] else: raise ValueError("prog must be a string") from scapy.arch.common import TCPDUMP if prog[0] == conf.prog.tcpdump and not TCPDUMP: message = "tcpdump is not available. Cannot use tcpdump() !" raise Scapy_Exception(message) if linktype is not None: # Tcpdump does not support integers in -y (yet) # https://github.com/the-tcpdump-group/tcpdump/issues/758 if isinstance(linktype, int): # Guess name from value try: linktype_name = _guess_linktype_name(linktype) except StopIteration: linktype = -1 else: # Guess value from name if linktype.startswith("DLT_"): linktype = linktype[4:] linktype_name = linktype try: linktype = _guess_linktype_value(linktype) except KeyError: linktype = -1 if linktype == -1: raise ValueError( "Unknown linktype. Try passing its datalink name instead" ) prog += ["-y", linktype_name] # Build Popen arguments if args is None: args = [] else: # Make a copy of args args = list(args) stdout = subprocess.PIPE if dump or getfd else None stderr = open(os.devnull) if quiet else None if use_tempfile is None: # Apple's tcpdump cannot read from stdin, see: # http://apple.stackexchange.com/questions/152682/ use_tempfile = DARWIN and prog[0] == conf.prog.tcpdump if read_stdin_opts is None: if prog[0] == conf.prog.wireshark: # Start capturing immediately (-k) from stdin (-i -) read_stdin_opts = ["-ki", "-"] else: read_stdin_opts = ["-r", "-"] else: # Make a copy of read_stdin_opts read_stdin_opts = list(read_stdin_opts) if pktlist is None: # sniff with ContextManagerSubprocess(_prog_name, prog[0]): proc = subprocess.Popen( prog + args, stdout=stdout, stderr=stderr, ) elif isinstance(pktlist, six.string_types): # file with ContextManagerSubprocess(_prog_name, prog[0]): proc = subprocess.Popen( prog + ["-r", pktlist] + args, stdout=stdout, stderr=stderr, ) elif use_tempfile: tmpfile = get_temp_file(autoext=".pcap", fd=True) try: tmpfile.writelines(iter(lambda: pktlist.read(1048576), b"")) except AttributeError: wrpcap(tmpfile, pktlist, linktype=linktype) else: tmpfile.close() with ContextManagerSubprocess(_prog_name, prog[0]): proc = subprocess.Popen( prog + ["-r", tmpfile.name] + args, stdout=stdout, stderr=stderr, ) else: # pass the packet stream with ContextManagerSubprocess(_prog_name, prog[0]): proc = subprocess.Popen( prog + read_stdin_opts + args, stdin=subprocess.PIPE, stdout=stdout, stderr=stderr, ) try: proc.stdin.writelines(iter(lambda: pktlist.read(1048576), b"")) except AttributeError: wrpcap(proc.stdin, pktlist, linktype=linktype) except UnboundLocalError: raise IOError("%s died unexpectedly !" % prog) else: proc.stdin.close() if dump: return b"".join(iter(lambda: proc.stdout.read(1048576), b"")) if getproc: return proc if getfd: return proc.stdout if wait: proc.wait() @conf.commands.register def hexedit(pktlist): """Run hexedit on a list of packets, then return the edited packets.""" f = get_temp_file() wrpcap(f, pktlist) with ContextManagerSubprocess("hexedit()", conf.prog.hexedit): subprocess.call([conf.prog.hexedit, f]) pktlist = rdpcap(f) os.unlink(f) return pktlist def get_terminal_width(): """Get terminal width (number of characters) if in a window. Notice: this will try several methods in order to support as many terminals and OS as possible. """ # Let's first try using the official API # (Python 3.3+) if not six.PY2: import shutil sizex = shutil.get_terminal_size(fallback=(0, 0))[0] if sizex != 0: return sizex # Backups / Python 2.7 if WINDOWS: from ctypes import windll, create_string_buffer # http://code.activestate.com/recipes/440694-determine-size-of-console-window-on-windows/ h = windll.kernel32.GetStdHandle(-12) csbi = create_string_buffer(22) res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi) if res: (bufx, bufy, curx, cury, wattr, left, top, right, bottom, maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw) # noqa: E501 sizex = right - left + 1 # sizey = bottom - top + 1 return sizex return None else: # We have various methods sizex = None # COLUMNS is set on some terminals try: sizex = int(os.environ['COLUMNS']) except Exception: pass if sizex: return sizex # We can query TIOCGWINSZ try: import fcntl import termios s = struct.pack('HHHH', 0, 0, 0, 0) x = fcntl.ioctl(1, termios.TIOCGWINSZ, s) sizex = struct.unpack('HHHH', x)[1] except IOError: pass return sizex def pretty_list(rtlst, header, sortBy=0, borders=False): """Pretty list to fit the terminal, and add header""" if borders: _space = "|" else: _space = " " # Windows has a fat terminal border _spacelen = len(_space) * (len(header) - 1) + (10 if WINDOWS else 0) _croped = False # Sort correctly rtlst.sort(key=lambda x: x[sortBy]) # Append tag rtlst = header + rtlst # Detect column's width colwidth = [max([len(y) for y in x]) for x in zip(*rtlst)] # Make text fit in box (if required) width = get_terminal_width() if conf.auto_crop_tables and width: width = width - _spacelen while sum(colwidth) > width: _croped = True # Needs to be cropped # Get the longest row i = colwidth.index(max(colwidth)) # Get all elements of this row row = [len(x[i]) for x in rtlst] # Get biggest element of this row: biggest of the array j = row.index(max(row)) # Re-build column tuple with the edited element t = list(rtlst[j]) t[i] = t[i][:-2] + "_" rtlst[j] = tuple(t) # Update max size row[j] = len(t[i]) colwidth[i] = max(row) if _croped: log_runtime.info("Table cropped to fit the terminal (conf.auto_crop_tables==True)") # noqa: E501 # Generate padding scheme fmt = _space.join(["%%-%ds" % x for x in colwidth]) # Append separation line if needed if borders: rtlst.insert(1, tuple("-" * x for x in colwidth)) # Compile rt = "\n".join(((fmt % x).strip() for x in rtlst)) return rt def __make_table(yfmtfunc, fmtfunc, endline, data, fxyz, sortx=None, sorty=None, seplinefunc=None): # noqa: E501 """Core function of the make_table suite, which generates the table""" vx = {} vy = {} vz = {} vxf = {} # Python 2 backward compatibility fxyz = lambda_tuple_converter(fxyz) tmp_len = 0 for e in data: xx, yy, zz = [str(s) for s in fxyz(*e)] tmp_len = max(len(yy), tmp_len) vx[xx] = max(vx.get(xx, 0), len(xx), len(zz)) vy[yy] = None vz[(xx, yy)] = zz vxk = list(vx) vyk = list(vy) if sortx: vxk.sort(key=sortx) else: try: vxk.sort(key=int) except Exception: try: vxk.sort(key=atol) except Exception: vxk.sort() if sorty: vyk.sort(key=sorty) else: try: vyk.sort(key=int) except Exception: try: vyk.sort(key=atol) except Exception: vyk.sort() if seplinefunc: sepline = seplinefunc(tmp_len, [vx[x] for x in vxk]) print(sepline) fmt = yfmtfunc(tmp_len) print(fmt % "", end=' ') for x in vxk: vxf[x] = fmtfunc(vx[x]) print(vxf[x] % x, end=' ') print(endline) if seplinefunc: print(sepline) for y in vyk: print(fmt % y, end=' ') for x in vxk: print(vxf[x] % vz.get((x, y), "-"), end=' ') print(endline) if seplinefunc: print(sepline) def make_table(*args, **kargs): __make_table(lambda l: "%%-%is" % l, lambda l: "%%-%is" % l, "", *args, **kargs) # noqa: E501 def make_lined_table(*args, **kargs): __make_table(lambda l: "%%-%is |" % l, lambda l: "%%-%is |" % l, "", seplinefunc=lambda a, x: "+".join('-' * (y + 2) for y in [a - 1] + x + [-2]), # noqa: E501 *args, **kargs) def make_tex_table(*args, **kargs): __make_table(lambda l: "%s", lambda l: "& %s", "\\\\", seplinefunc=lambda a, x: "\\hline", *args, **kargs) # noqa: E501 #################### # WHOIS CLIENT # #################### def whois(ip_address): """Whois client for Python""" whois_ip = str(ip_address) try: query = socket.gethostbyname(whois_ip) except Exception: query = whois_ip s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect(("whois.ripe.net", 43)) s.send(query.encode("utf8") + b"\r\n") answer = b"" while True: d = s.recv(4096) answer += d if not d: break s.close() ignore_tag = b"remarks:" # ignore all lines starting with the ignore_tag lines = [line for line in answer.split(b"\n") if not line or (line and not line.startswith(ignore_tag))] # noqa: E501 # remove empty lines at the bottom for i in range(1, len(lines)): if not lines[-i].strip(): del lines[-i] else: break return b"\n".join(lines[3:]) ####################### # PERIODIC SENDER # ####################### class PeriodicSenderThread(threading.Thread): def __init__(self, sock, pkt, interval=0.5): """ Thread to send packets periodically Args: sock: socket where packet is sent periodically pkt: packet to send interval: interval between two packets """ self._pkt = pkt self._socket = sock self._stopped = threading.Event() self._interval = interval threading.Thread.__init__(self) def run(self): while not self._stopped.is_set(): self._socket.send(self._pkt) time.sleep(self._interval) def stop(self): self._stopped.set()
1
16,116
To avoid useless calls to `conf.l2types.get()` we could have a `set()` of types that are OK in this PCAP files. We could check if `p.__class__ not in self.l2_types_seen` first, only run this code block in that case, and add `self.l2_types_seen.add(p.__class__)`. I'm not sure if I'm clear here... let me know what you think.
secdev-scapy
py
@@ -129,6 +129,11 @@ public class Constants { // Used (or should be used) wherever a string representation of UTF_8 charset is needed: public static final String UTF_8 = java.nio.charset.StandardCharsets.UTF_8.toString(); + // Specifies the source(adhoc, scheduled, flow_trigger) from where flow execution is triggered + public static final String EXECUTION_SOURCE_ADHOC = "adhoc"; + public static final String EXECUTION_SOURCE_SCHEDULED = "scheduled"; + public static final String EXECUTION_SOURCE_FLOW_TRIGGER = "flow_trigger"; + public static class ConfigurationKeys { public static final String AZKABAN_GLOBAL_PROPERTIES_EXT_PATH = "executor.global.properties";
1
/* * Copyright 2018 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban; import java.time.Duration; /** * Constants used in configuration files or shared among classes. * * <p>Conventions: * * <p>Internal constants to be put in the {@link Constants} class * * <p>Configuration keys to be put in the {@link ConfigurationKeys} class * * <p>Flow level properties keys to be put in the {@link FlowProperties} class * * <p>Job level Properties keys to be put in the {@link JobProperties} class * * <p>Use '.' to separate name spaces and '_" to separate words in the same namespace. e.g. * azkaban.job.some_key</p> */ public class Constants { // Azkaban Flow Versions public static final double DEFAULT_AZKABAN_FLOW_VERSION = 1.0; public static final double AZKABAN_FLOW_VERSION_2_0 = 2.0; // Flow 2.0 file suffix public static final String PROJECT_FILE_SUFFIX = ".project"; public static final String FLOW_FILE_SUFFIX = ".flow"; // Flow 2.0 node type public static final String NODE_TYPE = "type"; public static final String FLOW_NODE_TYPE = "flow"; // Flow 2.0 flow and job path delimiter public static final String PATH_DELIMITER = ":"; // Job properties override suffix public static final String JOB_OVERRIDE_SUFFIX = ".jor"; // Names and paths of various file names to configure Azkaban public static final String AZKABAN_PROPERTIES_FILE = "azkaban.properties"; public static final String AZKABAN_PRIVATE_PROPERTIES_FILE = "azkaban.private.properties"; public static final String DEFAULT_CONF_PATH = "conf"; public static final String DEFAULT_EXECUTOR_PORT_FILE = "executor.port"; public static final String AZKABAN_SERVLET_CONTEXT_KEY = "azkaban_app"; // Internal username used to perform SLA action public static final String AZKABAN_SLA_CHECKER_USERNAME = "azkaban_sla"; // Memory check retry interval when OOM in ms public static final long MEMORY_CHECK_INTERVAL_MS = 1000 * 60 * 1; // Max number of memory check retry public static final int MEMORY_CHECK_RETRY_LIMIT = 720; public static final int DEFAULT_PORT_NUMBER = 8081; public static final int DEFAULT_SSL_PORT_NUMBER = 8443; public static final int DEFAULT_JETTY_MAX_THREAD_COUNT = 20; // Configures the form limits for the web application public static final int MAX_FORM_CONTENT_SIZE = 10 * 1024 * 1024; // One Schedule's default End Time: 01/01/2050, 00:00:00, UTC public static final long DEFAULT_SCHEDULE_END_EPOCH_TIME = 2524608000000L; // Default flow trigger max wait time public static final Duration DEFAULT_FLOW_TRIGGER_MAX_WAIT_TIME = Duration.ofDays(10); public static final Duration MIN_FLOW_TRIGGER_WAIT_TIME = Duration.ofMinutes(1); public static final int DEFAULT_MIN_AGE_FOR_CLASSIFYING_A_FLOW_AGED_MINUTES = 20; // The flow exec id for a flow trigger instance which hasn't started a flow yet public static final int UNASSIGNED_EXEC_ID = -1; // The flow exec id for a flow trigger instance unable to trigger a flow yet public static final int FAILED_EXEC_ID = -2; // Default locked flow error message public static final String DEFAULT_LOCKED_FLOW_ERROR_MESSAGE = "Flow %s in project %s is locked. This is either a repeatedly failing flow, or an ineffcient" + " flow. Please refer to the Dr. Elephant report for this flow for more information."; // Default maximum number of concurrent runs for a single flow public static final int DEFAULT_MAX_ONCURRENT_RUNS_ONEFLOW = 30; // How often executors will poll new executions in Poll Dispatch model public static final int DEFAULT_AZKABAN_POLLING_INTERVAL_MS = 1000; // Executors can use cpu load calculated from this period to take/skip polling turns public static final int DEFAULT_AZKABAN_POLLING_CRITERIA_CPU_LOAD_PERIOD_SEC = 60; // Default value to feature enable setting. To be backward compatible, this value === FALSE public static final boolean DEFAULT_AZKABAN_RAMP_ENABLED = false; // Due to multiple AzkabanExec Server instance scenario, it will be required to persistent the ramp result into the DB. // However, Frequent data persistence will sacrifice the performance with limited data accuracy. // This setting value controls to push result into DB every N finished ramped workflows public static final int DEFAULT_AZKABAN_RAMP_STATUS_PUSH_INTERVAL_MAX = 20; // Due to multiple AzkabanExec Server instance, it will be required to persistent the ramp result into the DB. // However, Frequent data persistence will sacrifice the performance with limited data accuracy. // This setting value controls to pull result from DB every N new ramped workflows public static final int DEFAULT_AZKABAN_RAMP_STATUS_PULL_INTERVAL_MAX = 50; // Use Polling Service to sync the ramp status cross EXEC Server. public static final boolean DEFAULT_AZKABAN_RAMP_STATUS_POOLING_ENABLED = false; // How often executors will poll ramp status in Poll Dispatch model public static final int DEFAULT_AZKABAN_RAMP_STATUS_POLLING_INTERVAL = 10; // Username to be sent to UserManager when OAuth is in use, and real username is not available: public static final String OAUTH_USERNAME_PLACEHOLDER = "<OAuth>"; // Used by UserManager for password validation (to tell apart real passwords from auth codes). // Empirically, passwords are shorter than this, and ACs are longer: public static final int OAUTH_MIN_AUTHCODE_LENGTH = 80; // Used (or should be used) wherever a string representation of UTF_8 charset is needed: public static final String UTF_8 = java.nio.charset.StandardCharsets.UTF_8.toString(); public static class ConfigurationKeys { public static final String AZKABAN_GLOBAL_PROPERTIES_EXT_PATH = "executor.global.properties"; // Configures Azkaban to use new polling model for dispatching public static final String AZKABAN_POLL_MODEL = "azkaban.poll.model"; public static final String AZKABAN_POLLING_INTERVAL_MS = "azkaban.polling.interval.ms"; public static final String AZKABAN_POLLING_LOCK_ENABLED = "azkaban.polling.lock.enabled"; public static final String AZKABAN_POLLING_CRITERIA_FLOW_THREADS_AVAILABLE = "azkaban.polling_criteria.flow_threads_available"; public static final String AZKABAN_POLLING_CRITERIA_MIN_FREE_MEMORY_GB = "azkaban.polling_criteria.min_free_memory_gb"; public static final String AZKABAN_POLLING_CRITERIA_MAX_CPU_UTILIZATION_PCT = "azkaban.polling_criteria.max_cpu_utilization_pct"; public static final String AZKABAN_POLLING_CRITERIA_CPU_LOAD_PERIOD_SEC = "azkaban.polling_criteria.cpu_load_period_sec"; // Configures properties for Azkaban executor health check public static final String AZKABAN_EXECUTOR_HEALTHCHECK_INTERVAL_MIN = "azkaban.executor.healthcheck.interval.min"; public static final String AZKABAN_EXECUTOR_MAX_FAILURE_COUNT = "azkaban.executor.max.failurecount"; public static final String AZKABAN_ADMIN_ALERT_EMAIL = "azkaban.admin.alert.email"; // Configures Azkaban Flow Version in project YAML file public static final String AZKABAN_FLOW_VERSION = "azkaban-flow-version"; // These properties are configurable through azkaban.properties public static final String AZKABAN_PID_FILENAME = "azkaban.pid.filename"; // Defines a list of external links, each referred to as a topic public static final String AZKABAN_SERVER_EXTERNAL_TOPICS = "azkaban.server.external.topics"; // External URL template of a given topic, specified in the list defined above public static final String AZKABAN_SERVER_EXTERNAL_TOPIC_URL = "azkaban.server.external.${topic}.url"; // Designates one of the external link topics to correspond to an execution analyzer public static final String AZKABAN_SERVER_EXTERNAL_ANALYZER_TOPIC = "azkaban.server.external.analyzer.topic"; public static final String AZKABAN_SERVER_EXTERNAL_ANALYZER_LABEL = "azkaban.server.external.analyzer.label"; // Designates one of the external link topics to correspond to a job log viewer public static final String AZKABAN_SERVER_EXTERNAL_LOGVIEWER_TOPIC = "azkaban.server.external.logviewer.topic"; public static final String AZKABAN_SERVER_EXTERNAL_LOGVIEWER_LABEL = "azkaban.server.external.logviewer.label"; /* * Hadoop/Spark user job link. * Example: * a) azkaban.server.external.resource_manager_job_url=http://***rm***:8088/cluster/app/application_${application.id} * b) azkaban.server.external.history_server_job_url=http://***jh***:19888/jobhistory/job/job_${application.id} * c) azkaban.server.external.spark_history_server_job_url=http://***sh***:18080/history/application_${application.id}/1/jobs * */ public static final String RESOURCE_MANAGER_JOB_URL = "azkaban.server.external.resource_manager_job_url"; public static final String HISTORY_SERVER_JOB_URL = "azkaban.server.external.history_server_job_url"; public static final String SPARK_HISTORY_SERVER_JOB_URL = "azkaban.server.external.spark_history_server_job_url"; // Configures the Kafka appender for logging user jobs, specified for the exec server public static final String AZKABAN_SERVER_LOGGING_KAFKA_BROKERLIST = "azkaban.server.logging.kafka.brokerList"; public static final String AZKABAN_SERVER_LOGGING_KAFKA_TOPIC = "azkaban.server.logging.kafka.topic"; // Represent the class name of azkaban metrics reporter. public static final String CUSTOM_METRICS_REPORTER_CLASS_NAME = "azkaban.metrics.reporter.name"; // Represent the metrics server URL. public static final String METRICS_SERVER_URL = "azkaban.metrics.server.url"; public static final String IS_METRICS_ENABLED = "azkaban.is.metrics.enabled"; public static final String MIN_AGE_FOR_CLASSIFYING_A_FLOW_AGED_MINUTES = "azkaban.metrics" + ".min_age_for_classifying_a_flow_aged_minutes"; // User facing web server configurations used to construct the user facing server URLs. They are useful when there is a reverse proxy between Azkaban web servers and users. // enduser -> myazkabanhost:443 -> proxy -> localhost:8081 // when this parameters set then these parameters are used to generate email links. // if these parameters are not set then jetty.hostname, and jetty.port(if ssl configured jetty.ssl.port) are used. public static final String AZKABAN_WEBSERVER_EXTERNAL_HOSTNAME = "azkaban.webserver.external_hostname"; public static final String AZKABAN_WEBSERVER_EXTERNAL_SSL_PORT = "azkaban.webserver.external_ssl_port"; public static final String AZKABAN_WEBSERVER_EXTERNAL_PORT = "azkaban.webserver.external_port"; // Hostname for the host, if not specified, canonical hostname will be used public static final String AZKABAN_SERVER_HOST_NAME = "azkaban.server.hostname"; // List of users we prevent azkaban from running flows as. (ie: root, azkaban) public static final String BLACK_LISTED_USERS = "azkaban.server.blacklist.users"; // Path name of execute-as-user executable public static final String AZKABAN_SERVER_NATIVE_LIB_FOLDER = "azkaban.native.lib"; // Name of *nix group associated with the process running Azkaban public static final String AZKABAN_SERVER_GROUP_NAME = "azkaban.group.name"; // Legacy configs section, new configs should follow the naming convention of azkaban.server.<rest of the name> for server configs. // Jetty server configurations. public static final String JETTY_HEADER_BUFFER_SIZE = "jetty.headerBufferSize"; public static final String JETTY_USE_SSL = "jetty.use.ssl"; public static final String JETTY_SSL_PORT = "jetty.ssl.port"; public static final String JETTY_PORT = "jetty.port"; public static final String EXECUTOR_PORT_FILE = "executor.portfile"; // To set a fixed port for executor-server. Otherwise some available port is used. public static final String EXECUTOR_PORT = "executor.port"; public static final String DEFAULT_TIMEZONE_ID = "default.timezone.id"; // Boolean config set on the Web server to prevent users from creating projects. When set to // true only admins or users with CREATEPROJECTS permission can create projects. public static final String LOCKDOWN_CREATE_PROJECTS_KEY = "lockdown.create.projects"; // Boolean config set on the Web server to prevent users from uploading projects. When set to // true only admins or users with UPLOADPROJECTS permission can upload projects. public static final String LOCKDOWN_UPLOAD_PROJECTS_KEY = "lockdown.upload.projects"; // Max flow running time in mins, server will kill flows running longer than this setting. // if not set or <= 0, then there's no restriction on running time. public static final String AZKABAN_MAX_FLOW_RUNNING_MINS = "azkaban.server.flow.max.running.minutes"; // Maximum number of tries to download a dependency (no more retry attempts will be made after this many download failures) public static final String AZKABAN_DEPENDENCY_MAX_DOWNLOAD_TRIES = "azkaban.dependency.max.download.tries"; public static final String AZKABAN_DEPENDENCY_DOWNLOAD_THREADPOOL_SIZE = "azkaban.dependency.download.threadpool.size"; public static final String AZKABAN_STORAGE_TYPE = "azkaban.storage.type"; public static final String AZKABAN_STORAGE_LOCAL_BASEDIR = "azkaban.storage.local.basedir"; public static final String HADOOP_CONF_DIR_PATH = "hadoop.conf.dir.path"; // This really should be azkaban.storage.hdfs.project_root.uri public static final String AZKABAN_STORAGE_HDFS_PROJECT_ROOT_URI = "azkaban.storage.hdfs.root.uri"; public static final String AZKABAN_STORAGE_CACHE_DEPENDENCY_ENABLED = "azkaban.storage.cache.dependency.enabled"; public static final String AZKABAN_STORAGE_CACHE_DEPENDENCY_ROOT_URI = "azkaban.storage.cache.dependency_root.uri"; public static final String AZKABAN_STORAGE_ORIGIN_DEPENDENCY_ROOT_URI = "azkaban.storage.origin.dependency_root.uri"; public static final String AZKABAN_KERBEROS_PRINCIPAL = "azkaban.kerberos.principal"; public static final String AZKABAN_KEYTAB_PATH = "azkaban.keytab.path"; public static final String PROJECT_TEMP_DIR = "project.temp.dir"; // Event reporting properties public static final String AZKABAN_EVENT_REPORTING_CLASS_PARAM = "azkaban.event.reporting.class"; public static final String AZKABAN_EVENT_REPORTING_ENABLED = "azkaban.event.reporting.enabled"; // Comma separated list of properties to propagate from flow to Event reporter metadata public static final String AZKABAN_EVENT_REPORTING_PROPERTIES_TO_PROPAGATE = "azkaban.event.reporting.propagateProperties"; public static final String AZKABAN_EVENT_REPORTING_KAFKA_BROKERS = "azkaban.event.reporting.kafka.brokers"; public static final String AZKABAN_EVENT_REPORTING_KAFKA_TOPIC = "azkaban.event.reporting.kafka.topic"; public static final String AZKABAN_EVENT_REPORTING_KAFKA_SCHEMA_REGISTRY_URL = "azkaban.event.reporting.kafka.schema.registry.url"; /* * The max number of artifacts retained per project. * Accepted Values: * - 0 : Save all artifacts. No clean up is done on storage. * - 1, 2, 3, ... (any +ve integer 'n') : Maintain 'n' latest versions in storage * * Note: Having an unacceptable value results in an exception and the service would REFUSE * to start. * * Example: * a) azkaban.storage.artifact.max.retention=all * implies save all artifacts * b) azkaban.storage.artifact.max.retention=3 * implies save latest 3 versions saved in storage. **/ public static final String AZKABAN_STORAGE_ARTIFACT_MAX_RETENTION = "azkaban.storage.artifact.max.retention"; // enable quartz scheduler and flow trigger if true. public static final String ENABLE_QUARTZ = "azkaban.server.schedule.enable_quartz"; public static final String CUSTOM_CREDENTIAL_NAME = "azkaban.security.credential"; public static final String OAUTH_CREDENTIAL_NAME = "azkaban.oauth.credential"; public static final String SECURITY_USER_GROUP = "azkaban.security.user.group"; // dir to keep dependency plugins public static final String DEPENDENCY_PLUGIN_DIR = "azkaban.dependency.plugin.dir"; public static final String USE_MULTIPLE_EXECUTORS = "azkaban.use.multiple.executors"; public static final String MAX_CONCURRENT_RUNS_ONEFLOW = "azkaban.max.concurrent.runs.oneflow"; // list of whitelisted flows, with specific max number of concurrent runs. Format: // <project 1>,<flow 1>,<number>;<project 2>,<flow 2>,<number> public static final String CONCURRENT_RUNS_ONEFLOW_WHITELIST = "azkaban.concurrent.runs.oneflow.whitelist"; public static final String WEBSERVER_QUEUE_SIZE = "azkaban.webserver.queue.size"; public static final String ACTIVE_EXECUTOR_REFRESH_IN_MS = "azkaban.activeexecutor.refresh.milisecinterval"; public static final String ACTIVE_EXECUTOR_REFRESH_IN_NUM_FLOW = "azkaban.activeexecutor.refresh.flowinterval"; public static final String EXECUTORINFO_REFRESH_MAX_THREADS = "azkaban.executorinfo.refresh.maxThreads"; public static final String MAX_DISPATCHING_ERRORS_PERMITTED = "azkaban.maxDispatchingErrors"; public static final String EXECUTOR_SELECTOR_FILTERS = "azkaban.executorselector.filters"; public static final String EXECUTOR_SELECTOR_COMPARATOR_PREFIX = "azkaban.executorselector.comparator."; public static final String QUEUEPROCESSING_ENABLED = "azkaban.queueprocessing.enabled"; public static final String SESSION_TIME_TO_LIVE = "session.time.to.live"; // allowed max number of sessions per user per IP public static final String MAX_SESSION_NUMBER_PER_IP_PER_USER = "azkaban.session" + ".max_number_per_ip_per_user"; // allowed max size of shared project dir (percentage of partition size), e.g 0.8 public static final String PROJECT_CACHE_SIZE_PERCENTAGE = "azkaban.project_cache_size_percentage_of_disk"; public static final String PROJECT_CACHE_THROTTLE_PERCENTAGE = "azkaban.project_cache_throttle_percentage"; // how many older versions of project files are kept in DB before deleting them public static final String PROJECT_VERSION_RETENTION = "project.version.retention"; // number of rows to be displayed on the executions page. public static final String DISPLAY_EXECUTION_PAGE_SIZE = "azkaban.display.execution_page_size"; // locked flow error message. Parameters passed in are the flow name and project name. public static final String AZKABAN_LOCKED_FLOW_ERROR_MESSAGE = "azkaban.locked.flow.error.message"; // flow ramp related setting keys // Default value to feature enable setting. To be backward compatible, this value === FALSE public static final String AZKABAN_RAMP_ENABLED = "azkaban.ramp.enabled"; // Due to multiple AzkabanExec Server instance scenario, it will be required to persistent the ramp result into the DB. // However, Frequent data persistence will sacrifice the performance with limited data accuracy. // This setting value controls to push result into DB every N finished ramped workflows public static final String AZKABAN_RAMP_STATUS_PUSH_INTERVAL_MAX = "azkaban.ramp.status.push.interval.max"; // Due to multiple AzkabanExec Server instance, it will be required to persistent the ramp result into the DB. // However, Frequent data persistence will sacrifice the performance with limited data accuracy. // This setting value controls to pull result from DB every N new ramped workflows public static final String AZKABAN_RAMP_STATUS_PULL_INTERVAL_MAX = "azkaban.ramp.status.pull.interval.max"; // A Polling Service can be applied to determine the ramp status synchronization interval. public static final String AZKABAN_RAMP_STATUS_POLLING_ENABLED = "azkaban.ramp.status.polling.enabled"; public static final String AZKABAN_RAMP_STATUS_POLLING_INTERVAL = "azkaban.ramp.status.polling.interval"; public static final String AZKABAN_RAMP_STATUS_POLLING_CPU_MAX = "azkaban.ramp.status.polling.cpu.max"; public static final String AZKABAN_RAMP_STATUS_POLLING_MEMORY_MIN = "azkaban.ramp.status.polling.memory.min"; public static final String EXECUTION_LOGS_RETENTION_MS = "execution.logs.retention.ms"; public static final String EXECUTION_LOGS_CLEANUP_INTERVAL_SECONDS = "execution.logs.cleanup.interval.seconds"; public static final String EXECUTION_LOGS_CLEANUP_RECORD_LIMIT = "execution.logs.cleanup.record.limit"; // Oauth2.0 configuration keys. If missing, no OAuth will be attempted, and the old // username/password{+2FA} prompt will be given for interactive login: public static final String OAUTH_PROVIDER_URI_KEY = "oauth.provider_uri"; // where to send user for OAuth flow, e.g.: // oauth.provider_uri=https://login.microsoftonline.com/tenant-id/oauth2/v2.0/authorize\ // ?client_id=client_id\ // &response_type=code\ // &scope=openid\ // &response_mode=form_post\ // &state={state}\ // &redirect_uri={redirect_uri} // Strings {state} and {redirect_uri}, if present verbatim in the property value, will be // substituted at runtime with (URL-encoded) navigation target and OAuth responce handler URIs, // respectively. See handleOauth() in LoginAbstractServlet.java for details. public static final String OAUTH_REDIRECT_URI_KEY = "oauth.redirect_uri"; // how OAuth calls us back, e.g.: // oauth.redirect_uri=http://localhost:8081/?action=oauth_callback } public static class FlowProperties { // Basic properties of flows as set by the executor server public static final String AZKABAN_FLOW_PROJECT_NAME = "azkaban.flow.projectname"; public static final String AZKABAN_FLOW_FLOW_ID = "azkaban.flow.flowid"; public static final String AZKABAN_FLOW_SUBMIT_USER = "azkaban.flow.submituser"; public static final String AZKABAN_FLOW_EXEC_ID = "azkaban.flow.execid"; public static final String AZKABAN_FLOW_PROJECT_VERSION = "azkaban.flow.projectversion"; } public static class JobProperties { // Job property that enables/disables using Kafka logging of user job logs public static final String AZKABAN_JOB_LOGGING_KAFKA_ENABLE = "azkaban.job.logging.kafka.enable"; /* * this parameter is used to replace EXTRA_HCAT_LOCATION that could fail when one of the uris is not available. * EXTRA_HCAT_CLUSTERS has the following format: * other_hcat_clusters = "thrift://hcat1:port,thrift://hcat2:port;thrift://hcat3:port,thrift://hcat4:port" * Each string in the parenthesis is regarded as a "cluster", and we will get a delegation token from each cluster. * The uris(hcat servers) in a "cluster" ensures HA is provided. **/ public static final String EXTRA_HCAT_CLUSTERS = "azkaban.job.hive.other_hcat_clusters"; /* * the settings to be defined by user indicating if there are hcat locations other than the * default one the system should pre-fetch hcat token from. Note: Multiple thrift uris are * supported, use comma to separate the values, values are case insensitive. **/ // Use EXTRA_HCAT_CLUSTERS instead @Deprecated public static final String EXTRA_HCAT_LOCATION = "other_hcat_location"; // If true, AZ will fetches the jobs' certificate from remote Certificate Authority. public static final String ENABLE_JOB_SSL = "azkaban.job.enable.ssl"; // If true, AZ will fetch OAuth token from credential provider public static final String ENABLE_OAUTH = "azkaban.enable.oauth"; // Job properties that indicate maximum memory size public static final String JOB_MAX_XMS = "job.max.Xms"; public static final String MAX_XMS_DEFAULT = "1G"; public static final String JOB_MAX_XMX = "job.max.Xmx"; public static final String MAX_XMX_DEFAULT = "2G"; // The hadoop user the job should run under. If not specified, it will default to submit user. public static final String USER_TO_PROXY = "user.to.proxy"; /** * Format string for Log4j's EnhancedPatternLayout */ public static final String JOB_LOG_LAYOUT = "azkaban.job.log.layout"; } public static class JobCallbackProperties { public static final String JOBCALLBACK_CONNECTION_REQUEST_TIMEOUT = "jobcallback.connection.request.timeout"; public static final String JOBCALLBACK_CONNECTION_TIMEOUT = "jobcallback.connection.timeout"; public static final String JOBCALLBACK_SOCKET_TIMEOUT = "jobcallback.socket.timeout"; public static final String JOBCALLBACK_RESPONSE_WAIT_TIMEOUT = "jobcallback.response.wait.timeout"; public static final String JOBCALLBACK_THREAD_POOL_SIZE = "jobcallback.thread.pool.size"; } public static class FlowTriggerProps { // Flow trigger props public static final String SCHEDULE_TYPE = "type"; public static final String CRON_SCHEDULE_TYPE = "cron"; public static final String SCHEDULE_VALUE = "value"; public static final String DEP_NAME = "name"; // Flow trigger dependency run time props public static final String START_TIME = "startTime"; public static final String TRIGGER_INSTANCE_ID = "triggerInstanceId"; } public static class PluginManager { public static final String JOBTYPE_DEFAULTDIR = "plugins/jobtypes"; public static final String RAMPPOLICY_DEFAULTDIR = "plugins/ramppolicies"; // need jars.to.include property, will be loaded with user property public static final String CONFFILE = "plugin.properties"; // not exposed to users public static final String SYSCONFFILE = "private.properties"; // common properties for multiple plugins public static final String COMMONCONFFILE = "common.properties"; // common private properties for multiple plugins public static final String COMMONSYSCONFFILE = "commonprivate.properties"; } }
1
19,792
Shouldn't "source" be a noun? How about? -EXECUTION_SOURCE_USER -EXECUTION_SOURCE_SCHEDULE -EXECUTION_SOURCE_EVENT (because it's an event based trigger mechanism)
azkaban-azkaban
java
@@ -1,6 +1,6 @@ _base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', - '../_base_/datasets/coco_instance.py', + '../_base_/datasets/lvis_v1_instance.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] model = dict(
1
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] model = dict( roi_head=dict( bbox_head=dict( num_classes=1203, cls_predictor_cfg=dict(type='NormedLinear', tempearture=20), loss_cls=dict( type='SeesawLoss', p=0.8, q=2.0, num_classes=1203, loss_weight=1.0)), mask_head=dict(num_classes=1203)), test_cfg=dict( rcnn=dict( score_thr=0.0001, # LVIS allows up to 300 max_per_img=300))) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768), (1333, 800)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] dataset_type = 'LVISV1Dataset' data_root = 'data/lvis_v1/' data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict( type=dataset_type, ann_file=data_root + 'annotations/lvis_v1_train.json', img_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=data_root + 'annotations/lvis_v1_val.json', img_prefix=data_root, pipeline=test_pipeline), test=dict( type=dataset_type, ann_file=data_root + 'annotations/lvis_v1_val.json', img_prefix=data_root, pipeline=test_pipeline)) evaluation = dict(interval=24, metric=['bbox', 'segm'])
1
26,796
Should not switch to `lvis_v1_instance` here because that base config uses ClassBalancedDataset to oversample the data.
open-mmlab-mmdetection
py
@@ -24,6 +24,8 @@ describe('th-has-data-cells', function () { '<table>' + ' <tr> <th>hi</th> <td>hello</td> </tr>' + ' <tr> <th>hi</th> <td>hello</td> </tr>' + + ' <tr> <td>hello</td> <th>hi</th> </tr>' + + ' <tr> <td>hello</td> <th>hi</th> </tr>' + '</table>'; var node = fixture.querySelector('table');
1
describe('th-has-data-cells', function () { 'use strict'; var fixture = document.getElementById('fixture'); var checkContext = { _relatedNodes: [], _data: null, data: function (d) { this._data = d; }, relatedNodes: function (rn) { this._relatedNodes = rn; } }; afterEach(function () { fixture.innerHTML = ''; checkContext._relatedNodes = []; checkContext._data = null; }); it('should return true each row header has a non-empty cell', function (){ fixture.innerHTML = '<table>' + ' <tr> <th>hi</th> <td>hello</td> </tr>' + ' <tr> <th>hi</th> <td>hello</td> </tr>' + '</table>'; var node = fixture.querySelector('table'); assert.isTrue(checks['th-has-data-cells'].evaluate.call(checkContext, node)); }); it('should return true each non-empty column header has a cell', function (){ fixture.innerHTML = '<table>' + ' <tr> <th>H</th> <th>H</th> </tr>' + ' <tr> <td>hi</td> <td>hello</td></tr>' + '</table>'; var node = fixture.querySelector('table'); assert.isTrue(checks['th-has-data-cells'].evaluate.call(checkContext, node)); }); it('should return true if referred to with headers attr', function (){ fixture.innerHTML = '<table>' + ' <tr> <td headers="a">hi</td> <td headers="b">hello</td></tr>' + ' <tr> <th id="a">H</th> <th id="b">H</th> </tr>' + '</table>'; var node = fixture.querySelector('table'); assert.isTrue(checks['th-has-data-cells'].evaluate.call(checkContext, node)); }); it('should return true if referred to with aria-labelledby', function (){ fixture.innerHTML = '<table>' + ' <tr> <td aria-labelledby="a">hi</td> <td aria-labelledby="b">hello</td></tr>' + ' <tr> <th id="a">H</th> <th id="b">H</th> </tr>' + '</table>'; var node = fixture.querySelector('table'); assert.isTrue(checks['th-has-data-cells'].evaluate.call(checkContext, node)); }); it('should return true if the th element is empty', function (){ fixture.innerHTML = '<table>' + ' <tr> <th></th> <th></th> </tr>' + ' <tr> <th></th> <th></th> </tr>' + '</table>'; var node = fixture.querySelector('table'); assert.isTrue(checks['th-has-data-cells'].evaluate.call(checkContext, node)); }); it('should return false if a th has no data cells', function (){ fixture.innerHTML = '<table>' + ' <tr> <th>hi</th> </tr>' + ' <tr> <th>hi</th> </tr>' + '</table>'; var node = fixture.querySelector('table'); assert.isFalse(checks['th-has-data-cells'].evaluate.call(checkContext, node)); }); it('should return false if all data cells are empty', function (){ fixture.innerHTML = '<table>' + ' <tr> <th>hi</th> <td></td> </tr>' + ' <tr> <th>hi</th> <td></td> </tr>' + '</table>'; var node = fixture.querySelector('table'); assert.isFalse(checks['th-has-data-cells'].evaluate.call(checkContext, node)); }); it('should return false if a td with role=columnheader is used that has no data cells', function (){ fixture.innerHTML = '<table id="fail4">' + ' <tr> <td>aXe</td> <td role="columnheader">AXE</th> </tr>' + '</table>'; var node = fixture.querySelector('table'); assert.isFalse(checks['th-has-data-cells'].evaluate.call(checkContext, node)); }); });
1
10,955
Last time I checked, this technique did not work on all major screen reader - browser combinations. What screen readers have you tested?
dequelabs-axe-core
js
@@ -180,6 +180,8 @@ namespace Datadog.Trace.Configuration TraceBatchInterval = source?.GetInt32(ConfigurationKeys.SerializationBatchInterval) ?? 100; + + AspnetRouteTemplateResourceNamesEnabled = IsFeatureFlagEnabled(ConfigurationKeys.FeatureFlags.AspnetRouteTemplateResourceNamesEnabled); } /// <summary>
1
using System; using System.Collections.Concurrent; using System.Collections.Generic; using System.Linq; using System.Text.RegularExpressions; using Datadog.Trace.PlatformHelpers; using Datadog.Trace.Util; using Datadog.Trace.Vendors.Serilog; namespace Datadog.Trace.Configuration { /// <summary> /// Contains Tracer settings. /// </summary> public class TracerSettings { /// <summary> /// The default host value for <see cref="AgentUri"/>. /// </summary> public const string DefaultAgentHost = "localhost"; /// <summary> /// The default port value for <see cref="AgentUri"/>. /// </summary> public const int DefaultAgentPort = 8126; /// <summary> /// Initializes a new instance of the <see cref="TracerSettings"/> class with default values. /// </summary> public TracerSettings() : this(null) { } /// <summary> /// Initializes a new instance of the <see cref="TracerSettings"/> class /// using the specified <see cref="IConfigurationSource"/> to initialize values. /// </summary> /// <param name="source">The <see cref="IConfigurationSource"/> to use when retrieving configuration values.</param> public TracerSettings(IConfigurationSource source) { Environment = source?.GetString(ConfigurationKeys.Environment); ServiceName = source?.GetString(ConfigurationKeys.ServiceName) ?? // backwards compatibility for names used in the past source?.GetString("DD_SERVICE_NAME"); ServiceVersion = source?.GetString(ConfigurationKeys.ServiceVersion); TraceEnabled = source?.GetBool(ConfigurationKeys.TraceEnabled) ?? // default value true; if (AzureAppServices.Metadata.IsRelevant && AzureAppServices.Metadata.IsUnsafeToTrace) { TraceEnabled = false; } var disabledIntegrationNames = source?.GetString(ConfigurationKeys.DisabledIntegrations) ?.Split(new[] { ';' }, StringSplitOptions.RemoveEmptyEntries) ?? Enumerable.Empty<string>(); DisabledIntegrationNames = new HashSet<string>(disabledIntegrationNames, StringComparer.OrdinalIgnoreCase); var adonetExcludedTypes = source?.GetString(ConfigurationKeys.AdoNetExcludedTypes) ?.Split(new[] { ';' }, StringSplitOptions.RemoveEmptyEntries) ?? Enumerable.Empty<string>(); AdoNetExcludedTypes = new HashSet<string>(adonetExcludedTypes, StringComparer.OrdinalIgnoreCase); Integrations = new IntegrationSettingsCollection(source); var agentHost = source?.GetString(ConfigurationKeys.AgentHost) ?? // backwards compatibility for names used in the past source?.GetString("DD_TRACE_AGENT_HOSTNAME") ?? source?.GetString("DATADOG_TRACE_AGENT_HOSTNAME") ?? // default value DefaultAgentHost; var agentPort = source?.GetInt32(ConfigurationKeys.AgentPort) ?? // backwards compatibility for names used in the past source?.GetInt32("DATADOG_TRACE_AGENT_PORT") ?? // default value DefaultAgentPort; var agentUri = source?.GetString(ConfigurationKeys.AgentUri) ?? // default value $"http://{agentHost}:{agentPort}"; AgentUri = new Uri(agentUri); TracesPipeName = source?.GetString(ConfigurationKeys.TracesPipeName); TracesPipeTimeoutMs = source?.GetInt32(ConfigurationKeys.TracesPipeTimeoutMs) #if DEBUG ?? 20_000; #else ?? 100; #endif TracesTransport = source?.GetString(ConfigurationKeys.TracesTransport); if (string.Equals(AgentUri.Host, "localhost", StringComparison.OrdinalIgnoreCase)) { // Replace localhost with 127.0.0.1 to avoid DNS resolution. // When ipv6 is enabled, localhost is first resolved to ::1, which fails // because the trace agent is only bound to ipv4. // This causes delays when sending traces. var builder = new UriBuilder(agentUri) { Host = "127.0.0.1" }; AgentUri = builder.Uri; } AnalyticsEnabled = source?.GetBool(ConfigurationKeys.GlobalAnalyticsEnabled) ?? // default value false; LogsInjectionEnabled = source?.GetBool(ConfigurationKeys.LogsInjectionEnabled) ?? // default value false; MaxTracesSubmittedPerSecond = source?.GetInt32(ConfigurationKeys.MaxTracesSubmittedPerSecond) ?? // default value 100; GlobalTags = source?.GetDictionary(ConfigurationKeys.GlobalTags) ?? // backwards compatibility for names used in the past source?.GetDictionary("DD_TRACE_GLOBAL_TAGS") ?? // default value (empty) new ConcurrentDictionary<string, string>(); // Filter out tags with empty keys or empty values, and trim whitespace GlobalTags = GlobalTags.Where(kvp => !string.IsNullOrWhiteSpace(kvp.Key) && !string.IsNullOrWhiteSpace(kvp.Value)) .ToDictionary(kvp => kvp.Key.Trim(), kvp => kvp.Value.Trim()); HeaderTags = source?.GetDictionary(ConfigurationKeys.HeaderTags) ?? // default value (empty) new ConcurrentDictionary<string, string>(); // Filter out tags with empty keys or empty values, and trim whitespace HeaderTags = HeaderTags.Where(kvp => !string.IsNullOrWhiteSpace(kvp.Key) && !string.IsNullOrWhiteSpace(kvp.Value)) .ToDictionary(kvp => kvp.Key.Trim(), kvp => kvp.Value.Trim()); var serviceNameMappings = source?.GetDictionary(ConfigurationKeys.ServiceNameMappings) ?.Where(kvp => !string.IsNullOrWhiteSpace(kvp.Key) && !string.IsNullOrWhiteSpace(kvp.Value)) ?.ToDictionary(kvp => kvp.Key.Trim(), kvp => kvp.Value.Trim()); ServiceNameMappings = new ServiceNames(serviceNameMappings); DogStatsdPort = source?.GetInt32(ConfigurationKeys.DogStatsdPort) ?? // default value 8125; TracerMetricsEnabled = source?.GetBool(ConfigurationKeys.TracerMetricsEnabled) ?? // default value false; RuntimeMetricsEnabled = source?.GetBool(ConfigurationKeys.RuntimeMetricsEnabled) ?? false; CustomSamplingRules = source?.GetString(ConfigurationKeys.CustomSamplingRules); GlobalSamplingRate = source?.GetDouble(ConfigurationKeys.GlobalSamplingRate); StartupDiagnosticLogEnabled = source?.GetBool(ConfigurationKeys.StartupDiagnosticLogEnabled) ?? // default value true; var httpServerErrorStatusCodes = source?.GetString(ConfigurationKeys.HttpServerErrorStatusCodes) ?? // Default value "500-599"; HttpServerErrorStatusCodes = ParseHttpCodesToArray(httpServerErrorStatusCodes); var httpClientErrorStatusCodes = source?.GetString(ConfigurationKeys.HttpClientErrorStatusCodes) ?? // Default value "400-499"; HttpClientErrorStatusCodes = ParseHttpCodesToArray(httpClientErrorStatusCodes); TraceBufferSize = source?.GetInt32(ConfigurationKeys.BufferSize) ?? 1024 * 1024 * 10; // 10MB TraceBatchInterval = source?.GetInt32(ConfigurationKeys.SerializationBatchInterval) ?? 100; } /// <summary> /// Gets or sets the default environment name applied to all spans. /// </summary> /// <seealso cref="ConfigurationKeys.Environment"/> public string Environment { get; set; } /// <summary> /// Gets or sets the service name applied to top-level spans and used to build derived service names. /// </summary> /// <seealso cref="ConfigurationKeys.ServiceName"/> public string ServiceName { get; set; } /// <summary> /// Gets or sets the version tag applied to all spans. /// </summary> /// <seealso cref="ConfigurationKeys.ServiceVersion"/> public string ServiceVersion { get; set; } /// <summary> /// Gets or sets a value indicating whether tracing is enabled. /// Default is <c>true</c>. /// </summary> /// <seealso cref="ConfigurationKeys.TraceEnabled"/> public bool TraceEnabled { get; set; } /// <summary> /// Gets or sets a value indicating whether debug is enabled for a tracer. /// This property is obsolete. Manage the debug setting through GlobalSettings. /// </summary> /// <seealso cref="GlobalSettings.DebugEnabled"/> [Obsolete] public bool DebugEnabled { get; set; } /// <summary> /// Gets or sets the names of disabled integrations. /// </summary> /// <seealso cref="ConfigurationKeys.DisabledIntegrations"/> public HashSet<string> DisabledIntegrationNames { get; set; } /// <summary> /// Gets or sets the AdoNet types to exclude from automatic instrumentation. /// </summary> /// <seealso cref="ConfigurationKeys.AdoNetExcludedTypes"/> public HashSet<string> AdoNetExcludedTypes { get; set; } /// <summary> /// Gets or sets the Uri where the Tracer can connect to the Agent. /// Default is <c>"http://localhost:8126"</c>. /// </summary> /// <seealso cref="ConfigurationKeys.AgentUri"/> /// <seealso cref="ConfigurationKeys.AgentHost"/> /// <seealso cref="ConfigurationKeys.AgentPort"/> public Uri AgentUri { get; set; } /// <summary> /// Gets or sets the key used to determine the transport for sending traces. /// Default is <c>null</c>, which will use the default path decided in <see cref="Agent.Api"/>. /// </summary> /// <seealso cref="ConfigurationKeys.TracesTransport"/> public string TracesTransport { get; set; } /// <summary> /// Gets or sets the windows pipe name where the Tracer can connect to the Agent. /// Default is <c>null</c>. /// </summary> /// <seealso cref="ConfigurationKeys.TracesPipeName"/> public string TracesPipeName { get; set; } /// <summary> /// Gets or sets the timeout in milliseconds for the windows named pipe requests. /// Default is <c>100</c>. /// </summary> /// <seealso cref="ConfigurationKeys.TracesPipeTimeoutMs"/> public int TracesPipeTimeoutMs { get; set; } /// <summary> /// Gets or sets the windows pipe name where the Tracer can send stats. /// Default is <c>null</c>. /// </summary> /// <seealso cref="ConfigurationKeys.MetricsPipeName"/> public string MetricsPipeName { get; set; } /// <summary> /// Gets or sets a value indicating whether default Analytics are enabled. /// Settings this value is a shortcut for setting /// <see cref="Configuration.IntegrationSettings.AnalyticsEnabled"/> on some predetermined integrations. /// See the documentation for more details. /// </summary> /// <seealso cref="ConfigurationKeys.GlobalAnalyticsEnabled"/> public bool AnalyticsEnabled { get; set; } /// <summary> /// Gets or sets a value indicating whether correlation identifiers are /// automatically injected into the logging context. /// Default is <c>false</c>. /// </summary> /// <seealso cref="ConfigurationKeys.LogsInjectionEnabled"/> public bool LogsInjectionEnabled { get; set; } /// <summary> /// Gets or sets a value indicating the maximum number of traces set to AutoKeep (p1) per second. /// Default is <c>100</c>. /// </summary> /// <seealso cref="ConfigurationKeys.MaxTracesSubmittedPerSecond"/> public int MaxTracesSubmittedPerSecond { get; set; } /// <summary> /// Gets or sets a value indicating custom sampling rules. /// </summary> /// <seealso cref="ConfigurationKeys.CustomSamplingRules"/> public string CustomSamplingRules { get; set; } /// <summary> /// Gets or sets a value indicating a global rate for sampling. /// </summary> /// <seealso cref="ConfigurationKeys.GlobalSamplingRate"/> public double? GlobalSamplingRate { get; set; } /// <summary> /// Gets a collection of <see cref="Integrations"/> keyed by integration name. /// </summary> public IntegrationSettingsCollection Integrations { get; } /// <summary> /// Gets or sets the global tags, which are applied to all <see cref="Span"/>s. /// </summary> public IDictionary<string, string> GlobalTags { get; set; } /// <summary> /// Gets or sets the map of header keys to tag names, which are applied to the root <see cref="Span"/> of incoming requests. /// </summary> public IDictionary<string, string> HeaderTags { get; set; } /// <summary> /// Gets or sets the port where the DogStatsd server is listening for connections. /// Default is <c>8125</c>. /// </summary> /// <seealso cref="ConfigurationKeys.DogStatsdPort"/> public int DogStatsdPort { get; set; } /// <summary> /// Gets or sets a value indicating whether internal metrics /// are enabled and sent to DogStatsd. /// </summary> public bool TracerMetricsEnabled { get; set; } /// <summary> /// Gets or sets a value indicating whether runtime metrics /// are enabled and sent to DogStatsd. /// </summary> public bool RuntimeMetricsEnabled { get; set; } /// <summary> /// Gets or sets a value indicating whether the use /// of System.Diagnostics.DiagnosticSource is enabled. /// Default is <c>true</c>. /// </summary> /// <remark> /// This value cannot be set in code. Instead, /// set it using the <c>DD_TRACE_DIAGNOSTIC_SOURCE_ENABLED</c> /// environment variable or in configuration files. /// </remark> public bool DiagnosticSourceEnabled { get => GlobalSettings.Source.DiagnosticSourceEnabled; set { } } /// <summary> /// Gets or sets a value indicating whether the diagnostic log at startup is enabled /// </summary> public bool StartupDiagnosticLogEnabled { get; set; } /// <summary> /// Gets or sets the HTTP status code that should be marked as errors for server integrations. /// </summary> /// <seealso cref="ConfigurationKeys.HttpServerErrorStatusCodes"/> internal bool[] HttpServerErrorStatusCodes { get; set; } /// <summary> /// Gets or sets the HTTP status code that should be marked as errors for client integrations. /// </summary> /// <seealso cref="ConfigurationKeys.HttpClientErrorStatusCodes"/> internal bool[] HttpClientErrorStatusCodes { get; set; } /// <summary> /// Gets configuration values for changing service names based on configuration /// </summary> internal ServiceNames ServiceNameMappings { get; } /// <summary> /// Gets or sets a value indicating the size in bytes of the trace buffer /// </summary> internal int TraceBufferSize { get; set; } /// <summary> /// Gets or sets a value indicating the batch interval for the serialization queue, in milliseconds /// </summary> internal int TraceBatchInterval { get; set; } /// <summary> /// Create a <see cref="TracerSettings"/> populated from the default sources /// returned by <see cref="CreateDefaultConfigurationSource"/>. /// </summary> /// <returns>A <see cref="TracerSettings"/> populated from the default sources.</returns> public static TracerSettings FromDefaultSources() { var source = CreateDefaultConfigurationSource(); return new TracerSettings(source); } /// <summary> /// Creates a <see cref="IConfigurationSource"/> by combining environment variables, /// AppSettings where available, and a local datadog.json file, if present. /// </summary> /// <returns>A new <see cref="IConfigurationSource"/> instance.</returns> public static CompositeConfigurationSource CreateDefaultConfigurationSource() { return GlobalSettings.CreateDefaultConfigurationSource(); } /// <summary> /// Sets the HTTP status code that should be marked as errors for client integrations. /// </summary> /// <seealso cref="ConfigurationKeys.HttpClientErrorStatusCodes"/> /// <param name="statusCodes">Status codes that should be marked as errors</param> public void SetHttpClientErrorStatusCodes(IEnumerable<int> statusCodes) { HttpClientErrorStatusCodes = ParseHttpCodesToArray(string.Join(",", statusCodes)); } /// <summary> /// Sets the HTTP status code that should be marked as errors for server integrations. /// </summary> /// <seealso cref="ConfigurationKeys.HttpServerErrorStatusCodes"/> /// <param name="statusCodes">Status codes that should be marked as errors</param> public void SetHttpServerErrorStatusCodes(IEnumerable<int> statusCodes) { HttpServerErrorStatusCodes = ParseHttpCodesToArray(string.Join(",", statusCodes)); } /// <summary> /// Sets the mappings to use for service names within a <see cref="Span"/> /// </summary> /// <param name="mappings">Mappings to use from original service name (e.g. <code>sql-server</code> or <code>graphql</code>) /// as the <see cref="KeyValuePair{TKey, TValue}.Key"/>) to replacement service names as <see cref="KeyValuePair{TKey, TValue}.Value"/>).</param> public void SetServiceNameMappings(IEnumerable<KeyValuePair<string, string>> mappings) { ServiceNameMappings.SetServiceNameMappings(mappings); } /// <summary> /// Populate the internal structures. Modifying the settings past this point is not supported /// </summary> internal void Freeze() { Integrations.SetDisabledIntegrations(DisabledIntegrationNames); } internal bool IsErrorStatusCode(int statusCode, bool serverStatusCode) { var source = serverStatusCode ? HttpServerErrorStatusCodes : HttpClientErrorStatusCodes; if (source == null) { return false; } if (statusCode >= source.Length) { return false; } return source[statusCode]; } internal bool IsIntegrationEnabled(IntegrationInfo integration, bool defaultValue = true) { if (TraceEnabled && !DomainMetadata.ShouldAvoidAppDomain()) { return Integrations[integration].Enabled ?? defaultValue; } return false; } internal bool IsIntegrationEnabled(string integrationName) { if (TraceEnabled && !DomainMetadata.ShouldAvoidAppDomain()) { bool? enabled = Integrations[integrationName].Enabled; return enabled != false; } return false; } internal double? GetIntegrationAnalyticsSampleRate(IntegrationInfo integration, bool enabledWithGlobalSetting) { var integrationSettings = Integrations[integration]; var analyticsEnabled = integrationSettings.AnalyticsEnabled ?? (enabledWithGlobalSetting && AnalyticsEnabled); return analyticsEnabled ? integrationSettings.AnalyticsSampleRate : (double?)null; } internal bool IsNetStandardFeatureFlagEnabled() { var value = EnvironmentHelpers.GetEnvironmentVariable("DD_TRACE_NETSTANDARD_ENABLED", string.Empty); return value == "1" || value == "true"; } internal bool[] ParseHttpCodesToArray(string httpStatusErrorCodes) { bool[] httpErrorCodesArray = new bool[600]; void TrySetValue(int index) { if (index >= 0 && index < httpErrorCodesArray.Length) { httpErrorCodesArray[index] = true; } } string[] configurationsArray = httpStatusErrorCodes.Replace(" ", string.Empty).Split(','); foreach (string statusConfiguration in configurationsArray) { int startStatus; // Checks that the value about to be used follows the `401-404` structure or single 3 digit number i.e. `401` else log the warning if (!Regex.IsMatch(statusConfiguration, @"^\d{3}-\d{3}$|^\d{3}$")) { Log.Warning("Wrong format '{0}' for DD_HTTP_SERVER/CLIENT_ERROR_STATUSES configuration.", statusConfiguration); } // If statusConfiguration equals a single value i.e. `401` parse the value and save to the array else if (int.TryParse(statusConfiguration, out startStatus)) { TrySetValue(startStatus); } else { string[] statusCodeLimitsRange = statusConfiguration.Split('-'); startStatus = int.Parse(statusCodeLimitsRange[0]); int endStatus = int.Parse(statusCodeLimitsRange[1]); if (endStatus < startStatus) { startStatus = endStatus; endStatus = int.Parse(statusCodeLimitsRange[0]); } for (int statusCode = startStatus; statusCode <= endStatus; statusCode++) { TrySetValue(statusCode); } } } return httpErrorCodesArray; } internal string GetServiceName(Tracer tracer, string serviceName) { return ServiceNameMappings.GetServiceName(tracer.DefaultServiceName, serviceName); } } }
1
19,401
Most tracer settings can be set in several ways: environment variables, `app.config`/`web.config` file, or in a json file (`~/datadog.json` by default). Most exceptions to this rule are settings in native code (where we use env vars only). Currently, `DD_TRACE_ASPNET_ROUTE_TEMPLATE_RESOURCE_NAMES_ENABLED` will only work as an env var. Was this limitation intentional?
DataDog-dd-trace-dotnet
.cs
@@ -218,4 +218,10 @@ type Config struct { // TaskMetadataBurstRate specifies the burst rate throttle for the task metadata endpoint TaskMetadataBurstRate int + + // NoIID when set to true, specifies that the agent should not register the instance + // with instance identity document. This is required in order to accomodate scenarios in + // which ECS agent tries to register the instance where the instance id document is + // not available or needed + NoIID bool }
1
// Copyright 2014-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may // not use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either // express or implied. See the License for the specific language governing // permissions and limitations under the License. package config import ( "time" "github.com/aws/amazon-ecs-agent/agent/dockerclient" cnitypes "github.com/containernetworking/cni/pkg/types" ) // ImagePullBehaviorType is an enum variable type corresponding to different agent pull // behaviors including default, always, never and once. type ImagePullBehaviorType int8 type Config struct { // DEPRECATED // ClusterArn is the Name or full ARN of a Cluster to register into. It has // been deprecated (and will eventually be removed) in favor of Cluster ClusterArn string `deprecated:"Please use Cluster instead"` // Cluster can either be the Name or full ARN of a Cluster. This is the // cluster the agent should register this ContainerInstance into. If this // value is not set, it will default to "default" Cluster string `trim:"true"` // APIEndpoint is the endpoint, such as "ecs.us-east-1.amazonaws.com", to // make calls against. If this value is not set, it will default to the // endpoint for your current AWSRegion APIEndpoint string `trim:"true"` // DockerEndpoint is the address the agent will attempt to connect to the // Docker daemon at. This should have the same value as "DOCKER_HOST" // normally would to interact with the daemon. It defaults to // unix:///var/run/docker.sock DockerEndpoint string // AWSRegion is the region to run in (such as "us-east-1"). This value will // be inferred from the EC2 metadata service, but if it cannot be found this // will be fatal. AWSRegion string `missing:"fatal" trim:"true"` // ReservedPorts is an array of ports which should be registered as // unavailable. If not set, they default to [22,2375,2376,51678]. ReservedPorts []uint16 // ReservedPortsUDP is an array of UDP ports which should be registered as // unavailable. If not set, it defaults to []. ReservedPortsUDP []uint16 // DataDir is the directory data is saved to in order to preserve state // across agent restarts. // It is also used to keep the metadata of containers managed by the agent DataDir string // DataDirOnHost is the directory in the instance from which we mount // DataDir to the ecs-agent container and to agent managed containers DataDirOnHost string // Checkpoint configures whether data should be periodically to a checkpoint // file, in DataDir, such that on instance or agent restarts it will resume // as the same ContainerInstance. It defaults to false. Checkpoint bool // EngineAuthType configures what type of data is in EngineAuthData. // Supported types, right now, can be found in the dockerauth package: https://godoc.org/github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerauth EngineAuthType string `trim:"true"` // EngineAuthData contains authentication data. Please see the documentation // for EngineAuthType for more information. EngineAuthData *SensitiveRawMessage // UpdatesEnabled specifies whether updates should be applied to this agent. // Default true UpdatesEnabled bool // UpdateDownloadDir specifies where new agent versions should be placed // within the container in order for the external updating process to // correctly handle them. UpdateDownloadDir string // DisableMetrics configures whether task utilization metrics should be // sent to the ECS telemetry endpoint DisableMetrics bool // ReservedMemory specifies the amount of memory (in MB) to reserve for things // other than containers managed by ECS ReservedMemory uint16 // DockerStopTimeout specifies the amount of time before a SIGKILL is issued to // containers managed by ECS DockerStopTimeout time.Duration // ContainerStartTimeout specifies the amount of time to wait to start a container ContainerStartTimeout time.Duration // AvailableLoggingDrivers specifies the logging drivers available for use // with Docker. If not set, it defaults to ["json-file","none"]. AvailableLoggingDrivers []dockerclient.LoggingDriver // PrivilegedDisabled specified whether the Agent is capable of launching // tasks with privileged containers PrivilegedDisabled bool // SELinxuCapable specifies whether the Agent is capable of using SELinux // security options SELinuxCapable bool // AppArmorCapable specifies whether the Agent is capable of using AppArmor // security options AppArmorCapable bool // TaskCleanupWaitDuration specifies the time to wait after a task is stopped // until cleanup of task resources is started. TaskCleanupWaitDuration time.Duration // TaskIAMRoleEnabled specifies if the Agent is capable of launching // tasks with IAM Roles. TaskIAMRoleEnabled bool // TaskCPUMemLimit specifies if Agent can launch a task with a hierarchical cgroup TaskCPUMemLimit Conditional // CredentialsAuditLogFile specifies the path/filename of the audit log. CredentialsAuditLogFile string // CredentialsAuditLogEnabled specifies whether audit logging is disabled. CredentialsAuditLogDisabled bool // TaskIAMRoleEnabledForNetworkHost specifies if the Agent is capable of launching // tasks with IAM Roles when networkMode is set to 'host' TaskIAMRoleEnabledForNetworkHost bool // TaskENIEnabled specifies if the Agent is capable of launching task within // defined EC2 networks TaskENIEnabled bool // ImageCleanupDisabled specifies whether the Agent will periodically perform // automated image cleanup ImageCleanupDisabled bool // MinimumImageDeletionAge specifies the minimum time since it was pulled // before it can be deleted MinimumImageDeletionAge time.Duration // ImageCleanupInterval specifies the time to wait before performing the image // cleanup since last time it was executed ImageCleanupInterval time.Duration // NumImagesToDeletePerCycle specifies the num of image to delete every time // when Agent performs cleanup NumImagesToDeletePerCycle int // ImagePullBehavior specifies the agent's behavior for pulling image and loading // local Docker image cache ImagePullBehavior ImagePullBehaviorType // InstanceAttributes contains key/value pairs representing // attributes to be associated with this instance within the // ECS service and used to influence behavior such as launch // placement. InstanceAttributes map[string]string // Set if clients validate ssl certificates. Used mainly for testing AcceptInsecureCert bool `json:"-"` // CNIPluginsPath is the path for the cni plugins CNIPluginsPath string // PauseContainerTarballPath is the path to the pause container tarball PauseContainerTarballPath string // PauseContainerImageName is the name for the pause container image. // Setting this value to be different from the default will disable loading // the image from the tarball; the referenced image must already be loaded. PauseContainerImageName string // PauseContainerTag is the tag for the pause container image. // Setting this value to be different from the default will disable loading // the image from the tarball; the referenced image must already be loaded. PauseContainerTag string // AWSVPCBlockInstanceMetdata specifies if InstanceMetadata endpoint should be blocked // for tasks that are launched with network mode "awsvpc" when ECS_AWSVPC_BLOCK_IMDS=true AWSVPCBlockInstanceMetdata bool // OverrideAWSVPCLocalIPv4Address overrides the local IPv4 address chosen // for a task using the `awsvpc` networking mode. Using this configuration // will limit you to running one `awsvpc` task at a time. IPv4 addresses // must be specified in decimal-octet form and also specify the subnet // size (e.g., "169.254.172.42/22"). OverrideAWSVPCLocalIPv4Address *cnitypes.IPNet // AWSVPCAdditionalLocalRoutes allows the specification of routing table // entries that will be added in the task's network namespace via the // instance bridge interface rather than via the ENI. AWSVPCAdditionalLocalRoutes []cnitypes.IPNet // ContainerMetadataEnabled specifies if the agent should provide a metadata // file for containers. ContainerMetadataEnabled bool // OverrideAWSLogsExecutionRole is config option used to enable awslogs // driver authentication over the task's execution role OverrideAWSLogsExecutionRole bool // CgroupPath is the path expected by the agent, defaults to // '/sys/fs/cgroup' CgroupPath string // PlatformVariables consists of configuration variables specific to linux/windows PlatformVariables PlatformVariables // TaskMetadataSteadyStateRate specifies the steady state throttle for the task metadata endpoint TaskMetadataSteadyStateRate int // TaskMetadataBurstRate specifies the burst rate throttle for the task metadata endpoint TaskMetadataBurstRate int }
1
20,076
Would "DisableIID" be a better name? For example, we used DisableMetrics, ImageCleanupDisabled, ... etc. for other similar fields.
aws-amazon-ecs-agent
go
@@ -3908,7 +3908,7 @@ build_bb_ilist(dcontext_t *dcontext, build_bb_t *bb) BBPRINT(bb, 3, "reached end pc " PFX ", stopping\n", bb->stop_pc); break; } - if (total_instrs > DYNAMO_OPTION(max_bb_instrs)) { + if (total_instrs == DYNAMO_OPTION(max_bb_instrs) - 1) { /* this could be an enormous basic block, or it could * be some degenerate infinite-loop case like a call * to a function that calls exit() and then calls itself,
1
/* ********************************************************** * Copyright (c) 2011-2021 Google, Inc. All rights reserved. * Copyright (c) 2001-2010 VMware, Inc. All rights reserved. * **********************************************************/ /* * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * * Neither the name of VMware, Inc. nor the names of its contributors may be * used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. */ /* Copyright (c) 2003-2007 Determina Corp. */ /* Copyright (c) 2001-2003 Massachusetts Institute of Technology */ /* Copyright (c) 2001 Hewlett-Packard Company */ /* * interp.c - interpreter used for native trace selection */ #include "../globals.h" #include "../link.h" #include "../fragment.h" #include "../emit.h" #include "../dispatch.h" #include "../fcache.h" #include "../monitor.h" /* for trace_abort and monitor_data_t */ #include "arch.h" #include "instr.h" #include "instr_create_shared.h" #include "instrlist.h" #include "decode.h" #include "decode_fast.h" #include "disassemble.h" #include "instrument.h" #include "../hotpatch.h" #ifdef RETURN_AFTER_CALL # include "../rct.h" #endif #ifdef WINDOWS # include "ntdll.h" /* for EXCEPTION_REGISTRATION */ # include "../nudge.h" /* for generic_nudge_target() address */ #endif #include "../perscache.h" #include "../native_exec.h" #include "../jit_opt.h" #ifdef CHECK_RETURNS_SSE2 # include <setjmp.h> /* for warning when see libc setjmp */ #endif #ifdef VMX86_SERVER # include "vmkuw.h" /* VMKUW_SYSCALL_GATEWAY */ #endif #ifdef ANNOTATIONS # include "../annotations.h" #endif #ifdef AARCH64 # include "build_ldstex.h" #endif enum { DIRECT_XFER_LENGTH = 5 }; /* forward declarations */ static void process_nops_for_trace(dcontext_t *dcontext, instrlist_t *ilist, uint flags _IF_DEBUG(bool recreating)); static int fixup_last_cti(dcontext_t *dcontext, instrlist_t *trace, app_pc next_tag, uint next_flags, uint trace_flags, fragment_t *prev_f, linkstub_t *prev_l, bool record_translation, uint *num_exits_deleted /*OUT*/, /* If non-NULL, only looks inside trace between these two */ instr_t *start_instr, instr_t *end_instr); bool mangle_trace(dcontext_t *dcontext, instrlist_t *ilist, monitor_data_t *md); /* we use a branch limit of 1 to make it easier for the trace * creation mechanism to stitch basic blocks together */ #define BRANCH_LIMIT 1 /* we limit total bb size to handle cases like infinite loop or sequence * of calls. * also, we have a limit on fragment body sizes, which should be impossible * to break since x86 instrs are max 17 bytes and we only modify ctis. * Although...selfmod mangling does really expand fragments! * -selfmod_max_writes helps for selfmod bbs (case 7893/7909). * System call mangling is also large, for degenerate cases like tests/linux/infinite. * PR 215217: also client additions: we document and assert. * FIXME: need better way to know how big will get, b/c we can construct * cases that will trigger the size assertion! */ /* define replaced by -max_bb_instrs option */ /* exported so micro routines can assert whether held */ DECLARE_CXTSWPROT_VAR(mutex_t bb_building_lock, INIT_LOCK_FREE(bb_building_lock)); /* i#1111: we do not use the lock until the 2nd thread is created */ volatile bool bb_lock_start; static file_t bbdump_file = INVALID_FILE; #ifdef DEBUG DECLARE_NEVERPROT_VAR(uint debug_bb_count, 0); #endif /* initialization */ void interp_init() { if (INTERNAL_OPTION(bbdump_tags)) { bbdump_file = open_log_file("bbs", NULL, 0); ASSERT(bbdump_file != INVALID_FILE); } } #ifdef CUSTOM_TRACES_RET_REMOVAL # ifdef DEBUG /* don't bother with adding lock */ static int num_rets_removed; # endif #endif /* cleanup */ void interp_exit() { if (INTERNAL_OPTION(bbdump_tags)) { close_log_file(bbdump_file); } DELETE_LOCK(bb_building_lock); LOG(GLOBAL, LOG_INTERP | LOG_STATS, 1, "Total application code seen: %d KB\n", GLOBAL_STAT(app_code_seen) / 1024); #ifdef CUSTOM_TRACES_RET_REMOVAL # ifdef DEBUG LOG(GLOBAL, LOG_INTERP | LOG_STATS, 1, "Total rets removed: %d\n", num_rets_removed); # endif #endif } /**************************************************************************** **************************************************************************** * * B A S I C B L O C K B U I L D I N G */ /* we have a lot of data to pass around so we package it in this struct * so we can have separate routines for readability */ typedef struct { /* in */ app_pc start_pc; bool app_interp; /* building bb to interp app, as opposed to for pc * translation or figuring out what pages a bb touches? */ bool for_cache; /* normal to-be-executed build? */ bool record_vmlist; /* should vmareas be updated? */ bool mangle_ilist; /* should bb ilist be mangled? */ bool record_translation; /* store translation info for each instr_t? */ bool has_bb_building_lock; /* usually ==for_cache; used for aborting bb building */ bool checked_start_vmarea; /* caller called check_new_page_start() on start_pc */ file_t outf; /* send disassembly and notes to a file? * we use this mainly for dumping trace origins */ app_pc stop_pc; /* Optional: NULL for normal termination rules. * Only checked for full_decode. */ bool pass_to_client; /* pass to client, if a bb hook exists; * we store this up front to avoid race conditions * between full_decode setting and hook calling time. */ bool post_client; /* has the client already processed the bb? */ bool for_trace; /* PR 299808: we tell client if building a trace */ /* in and out */ overlap_info_t *overlap_info; /* if non-null, records overlap information here; * caller must initialize region_start and region_end */ /* out */ instrlist_t *ilist; uint flags; void *vmlist; app_pc end_pc; bool native_exec; /* replace cur ilist with a native_exec version */ bool native_call; /* the gateway is a call */ instrlist_t **unmangled_ilist; /* PR 299808: clone ilist pre-mangling */ /* internal usage only */ bool full_decode; /* decode every instruction into a separate instr_t? */ bool follow_direct; /* elide unconditional branches? */ bool check_vm_area; /* whether to call check_thread_vm_area() */ uint num_elide_jmp; uint num_elide_call; app_pc last_page; app_pc cur_pc; app_pc instr_start; app_pc checked_end; /* end of current vmarea checked */ cache_pc exit_target; /* fall-through target of final instr */ uint exit_type; /* indirect branch type */ ibl_branch_type_t ibl_branch_type; /* indirect branch type as an IBL selector */ instr_t *instr; /* the current instr */ int eflags; app_pc pretend_pc; /* selfmod only: decode from separate pc */ #ifdef ARM dr_pred_type_t svc_pred; /* predicate for conditional svc */ #endif DEBUG_DECLARE(bool initialized;) } build_bb_t; /* forward decl */ static inline bool bb_process_syscall(dcontext_t *dcontext, build_bb_t *bb); static void init_build_bb(build_bb_t *bb, app_pc start_pc, bool app_interp, bool for_cache, bool mangle_ilist, bool record_translation, file_t outf, uint known_flags, overlap_info_t *overlap_info) { memset(bb, 0, sizeof(*bb)); #if defined(LINUX) && defined(X86_32) /* With SA_RESTART (i#2659) we end up interpreting the int 0x80 in vsyscall, * whose fall-through hits our hook. We avoid interpreting our own hook * by shifting it to the displaced pc. */ if (DYNAMO_OPTION(hook_vsyscall) && start_pc == vsyscall_sysenter_return_pc) start_pc = vsyscall_sysenter_displaced_pc; #endif bb->check_vm_area = true; bb->start_pc = start_pc; bb->app_interp = app_interp; bb->for_cache = for_cache; if (bb->for_cache) bb->record_vmlist = true; bb->mangle_ilist = mangle_ilist; bb->record_translation = record_translation; bb->outf = outf; bb->overlap_info = overlap_info; bb->follow_direct = !TEST(FRAG_SELFMOD_SANDBOXED, known_flags); bb->flags = known_flags; bb->ibl_branch_type = IBL_GENERIC; /* initialization only */ #ifdef ARM bb->svc_pred = DR_PRED_NONE; #endif DODEBUG(bb->initialized = true;); } static void reset_overlap_info(dcontext_t *dcontext, build_bb_t *bb) { bb->overlap_info->start_pc = bb->start_pc; bb->overlap_info->min_pc = bb->start_pc; bb->overlap_info->max_pc = bb->start_pc; bb->overlap_info->contiguous = true; bb->overlap_info->overlap = false; } static void update_overlap_info(dcontext_t *dcontext, build_bb_t *bb, app_pc new_pc, bool jmp) { if (new_pc < bb->overlap_info->min_pc) bb->overlap_info->min_pc = new_pc; if (new_pc > bb->overlap_info->max_pc) bb->overlap_info->max_pc = new_pc; /* we get called at end of all contiguous intervals, so ignore jmps */ LOG(THREAD, LOG_ALL, 5, "\t app_bb_overlaps " PFX ".." PFX " %s\n", bb->last_page, new_pc, jmp ? "jmp" : ""); if (!bb->overlap_info->overlap && !jmp) { /* contiguous interval: prev_pc..new_pc (open-ended) */ if (bb->last_page < bb->overlap_info->region_end && new_pc > bb->overlap_info->region_start) { LOG(THREAD_GET, LOG_ALL, 5, "\t it overlaps!\n"); bb->overlap_info->overlap = true; } } if (bb->overlap_info->contiguous && jmp) bb->overlap_info->contiguous = false; } #ifdef DEBUG # define BBPRINT(bb, level, ...) \ do { \ LOG(THREAD, LOG_INTERP, level, __VA_ARGS__); \ if (bb->outf != INVALID_FILE && bb->outf != (THREAD)) \ print_file(bb->outf, __VA_ARGS__); \ } while (0); #else # ifdef INTERNAL # define BBPRINT(bb, level, ...) \ do { \ if (bb->outf != INVALID_FILE) \ print_file(bb->outf, __VA_ARGS__); \ } while (0); # else # define BBPRINT(bb, level, ...) /* nothing */ # endif #endif #ifdef WINDOWS extern void intercept_load_dll(void); extern void intercept_unload_dll(void); # ifdef INTERNAL extern void DllMainThreadAttach(void); # endif #endif /* forward declarations */ static bool mangle_bb_ilist(dcontext_t *dcontext, build_bb_t *bb); static void build_native_exec_bb(dcontext_t *dcontext, build_bb_t *bb); static bool at_native_exec_gateway(dcontext_t *dcontext, app_pc start, bool *is_call _IF_DEBUG(bool xfer_target)); #ifdef DEBUG static void report_native_module(dcontext_t *dcontext, app_pc modpc); #endif /*************************************************************************** * Image entry */ static bool reached_image_entry = false; static INLINE_FORCED bool check_for_image_entry(app_pc bb_start) { if (!reached_image_entry && bb_start == get_image_entry()) { LOG(THREAD_GET, LOG_ALL, 1, "Reached image entry point " PFX "\n", bb_start); set_reached_image_entry(); return true; } return false; } void set_reached_image_entry() { SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT); reached_image_entry = true; SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); } bool reached_image_entry_yet() { return reached_image_entry; } /*************************************************************************** * Whether to inline or elide callees */ /* Return true if pc is a call target that should NOT be entered but should * still be mangled. */ static inline bool must_not_be_entered(app_pc pc) { return false #ifdef DR_APP_EXPORTS /* i#1237: DR will change dr_app_running_under_dynamorio return value * on seeing a bb starting at dr_app_running_under_dynamorio. */ || pc == (app_pc)dr_app_running_under_dynamorio #endif ; } /* Return true if pc is a call target that should NOT be inlined and left native. */ static inline bool leave_call_native(app_pc pc) { return ( #ifdef INTERNAL !dynamo_options.inline_calls #else 0 #endif #ifdef WINDOWS || pc == (app_pc)intercept_load_dll || pc == (app_pc)intercept_unload_dll /* we're guaranteed to have direct calls to the next routine since our * own DllMain calls it! */ # ifdef INTERNAL || pc == (app_pc)DllMainThreadAttach # endif /* check for nudge handling escape from cache */ || (pc == (app_pc)generic_nudge_handler) #else /* PR 200203: long-term we want to control loading of client * libs, but for now we have to let the loader call _fini() * in the client, which may end up calling __wrap_free(). * It's simpler to let those be interpreted and make a native * call to the real heap routine here as this is a direct * call whereas we'd need native_exec for the others: */ || pc == (app_pc)global_heap_free #endif ); } /* return true if pc is a direct jmp target that should NOT be elided and followed */ static inline bool must_not_be_elided(app_pc pc) { #ifdef WINDOWS /* Allow only the return jump in the landing pad to be elided, as we * interpret the return path from trampolines. The forward jump leads to * the trampoline and shouldn't be elided. */ if (is_on_interception_initial_route(pc)) return true; #endif return (0 #ifdef WINDOWS /* we insert trampolines by adding direct jmps to our interception code buffer * we don't want to interpret the code in that buffer, as it may swap to the * dstack and mess up a return-from-fcache. * N.B.: if use this routine anywhere else, pay attention to the * hack for is_syscall_trampoline() in the use here! */ || (is_in_interception_buffer(pc)) #else /* UNIX */ #endif ); } #ifdef DR_APP_EXPORTS /* This function allows automatically injected dynamo to ignore * dynamo API routines that would really mess things up */ static inline bool must_escape_from(app_pc pc) { /* if ever find ourselves at top of one of these, immediately issue * a ret instruction...haven't set up frame yet so stack fine, only * problem is return value, go ahead and overwrite xax, it's caller-saved * FIXME: is this ok? */ /* Note that we can't just look for direct calls to these functions * because of stubs, etc. that end up doing indirect jumps to them! */ bool res = false # ifdef DR_APP_EXPORTS || (automatic_startup && (pc == (app_pc)dynamorio_app_init || pc == (app_pc)dr_app_start || pc == (app_pc)dynamo_thread_init || pc == (app_pc)dynamorio_app_exit || /* dr_app_stop is a nop already */ pc == (app_pc)dynamo_thread_exit)) # endif ; # ifdef DEBUG if (res) { # ifdef DR_APP_EXPORTS LOG(THREAD_GET, LOG_INTERP, 3, "must_escape_from: found "); if (pc == (app_pc)dynamorio_app_init) LOG(THREAD_GET, LOG_INTERP, 3, "dynamorio_app_init\n"); else if (pc == (app_pc)dr_app_start) LOG(THREAD_GET, LOG_INTERP, 3, "dr_app_start\n"); /* FIXME: are dynamo_thread_* still needed hered? */ else if (pc == (app_pc)dynamo_thread_init) LOG(THREAD_GET, LOG_INTERP, 3, "dynamo_thread_init\n"); else if (pc == (app_pc)dynamorio_app_exit) LOG(THREAD_GET, LOG_INTERP, 3, "dynamorio_app_exit\n"); else if (pc == (app_pc)dynamo_thread_exit) LOG(THREAD_GET, LOG_INTERP, 3, "dynamo_thread_exit\n"); # endif } # endif return res; } #endif /* DR_APP_EXPORTS */ /* Adds bb->instr, which must be a direct call or jmp, to bb->ilist for native * execution. Makes sure its target is reachable from the code cache, which * is critical for jmps b/c they're native for our hooks of app code which may * not be reachable from the code cache. Also needed for calls b/c in the future * (i#774) the DR lib (and thus our leave_call_native() calls) won't be reachable * from the cache. */ static void bb_add_native_direct_xfer(dcontext_t *dcontext, build_bb_t *bb, bool appended) { #if defined(X86) && defined(X64) /* i#922: we're going to run this jmp from our code cache so we have to * make sure it still reaches its target. We could try to check * reachability from the likely code cache slot, but these should be * rare enough that making them indirect won't matter and then we have * fewer reachability dependences. * We do this here rather than in d_r_mangle() b/c we'd have a hard time * distinguishing native jmp/call due to DR's own operations from a * client's inserted meta jmp/call. */ /* Strategy: write target into xax (DR-reserved) slot and jmp through it. * Alternative would be to embed the target into the code stream. * We don't need to set translation b/c these are meta instrs and they * won't fault. */ ptr_uint_t tgt = (ptr_uint_t)opnd_get_pc(instr_get_target(bb->instr)); opnd_t tls_slot = opnd_create_sized_tls_slot(os_tls_offset(TLS_XAX_SLOT), OPSZ_4); instrlist_meta_append( bb->ilist, INSTR_CREATE_mov_imm(dcontext, tls_slot, OPND_CREATE_INT32((int)tgt))); opnd_set_disp(&tls_slot, opnd_get_disp(tls_slot) + 4); instrlist_meta_append( bb->ilist, INSTR_CREATE_mov_imm(dcontext, tls_slot, OPND_CREATE_INT32((int)(tgt >> 32)))); if (instr_is_ubr(bb->instr)) { instrlist_meta_append( bb->ilist, INSTR_CREATE_jmp_ind(dcontext, opnd_create_tls_slot(os_tls_offset(TLS_XAX_SLOT)))); bb->exit_type |= instr_branch_type(bb->instr); } else { ASSERT(instr_is_call_direct(bb->instr)); instrlist_meta_append( bb->ilist, INSTR_CREATE_call_ind(dcontext, opnd_create_tls_slot(os_tls_offset(TLS_XAX_SLOT)))); } if (appended) instrlist_remove(bb->ilist, bb->instr); instr_destroy(dcontext, bb->instr); bb->instr = NULL; #elif defined(ARM) ASSERT_NOT_IMPLEMENTED(false); /* i#1582 */ #else if (appended) { /* avoid assert about meta w/ translation but no restore_state callback */ instr_set_translation(bb->instr, NULL); } else instrlist_append(bb->ilist, bb->instr); /* Indicate that relative target must be * re-encoded, and that it is not an exit cti. * However, we must mangle this to ensure it reaches (i#992) * which we special-case in d_r_mangle(). */ instr_set_meta(bb->instr); instr_set_raw_bits_valid(bb->instr, false); #endif } /* Perform checks such as looking for dynamo stopping points and bad places * to be. We assume we only have to check after control transfer instructions, * i.e., we assume that all of these conditions are procedures that are only * entered by calling or jumping, never falling through. */ static inline bool check_for_stopping_point(dcontext_t *dcontext, build_bb_t *bb) { #ifdef DR_APP_EXPORTS if (must_escape_from(bb->cur_pc)) { /* x64 will zero-extend to rax, so we use eax here */ reg_id_t reg = IF_X86_ELSE(REG_EAX, DR_REG_R0); BBPRINT(bb, 3, "interp: emergency exit from " PFX "\n", bb->cur_pc); /* if ever find ourselves at top of one of these, immediately issue * a ret instruction...haven't set up frame yet so stack fine, only * problem is return value, go ahead and overwrite xax, it's * caller-saved. * FIXME: is this ok? */ /* move 0 into xax/r0 -- our functions return 0 to indicate success */ instrlist_append( bb->ilist, XINST_CREATE_load_int(dcontext, opnd_create_reg(reg), OPND_CREATE_INT32(0))); /* insert a ret instruction */ instrlist_append(bb->ilist, XINST_CREATE_return(dcontext)); /* should this be treated as a real return? */ bb->exit_type |= LINK_INDIRECT | LINK_RETURN; bb->exit_target = get_ibl_routine(dcontext, IBL_LINKED, DEFAULT_IBL_BB(), IBL_RETURN); return true; } #endif /* DR_APP_EXPORTS */ #ifdef CHECK_RETURNS_SSE2 if (bb->cur_pc == (app_pc)longjmp) { SYSLOG_INTERNAL_WARNING("encountered longjmp, which will cause ret mismatch!"); } #endif return is_stopping_point(dcontext, bb->cur_pc); } /* Arithmetic eflags analysis to see if sequence of instrs reads an * arithmetic flag prior to writing it. * Usage: first initialize status to 0 and eflags_6 to 0. * Then call this routine for each instr in sequence, assigning result to status. * eflags_6 holds flags written and read so far. * Uses these flags, defined in instr.h, as status values: * EFLAGS_WRITE_ARITH = writes all arith flags before reading any * EFLAGS_WRITE_OF = writes OF before reading it (x86-onlY) * EFLAGS_READ_ARITH = reads some of arith flags before writing * EFLAGS_READ_OF = reads OF before writing OF (x86-only) * 0 = no information yet * On ARM, Q and GE flags are ignored. */ static inline int eflags_analysis(instr_t *instr, int status, uint *eflags_6) { uint e6 = *eflags_6; /* local copy */ uint e6_w2r = EFLAGS_WRITE_TO_READ(e6); uint instr_eflags = instr_get_arith_flags(instr, DR_QUERY_DEFAULT); /* Keep going until result is non-zero, also keep going if * result is writes to OF to see if later writes to rest of flags * before reading any, and keep going if reads one of the 6 to see * if later writes to OF before reading it. */ if (instr_eflags == 0 || status == EFLAGS_WRITE_ARITH IF_X86(|| status == EFLAGS_READ_OF)) return status; /* we ignore interrupts */ if ((instr_eflags & EFLAGS_READ_ARITH) != 0 && (!instr_opcode_valid(instr) || !instr_is_interrupt(instr))) { /* store the flags we're reading */ e6 |= (instr_eflags & EFLAGS_READ_ARITH); *eflags_6 = e6; if ((e6_w2r | (instr_eflags & EFLAGS_READ_ARITH)) != e6_w2r) { /* we're reading a flag that has not been written yet */ status = EFLAGS_READ_ARITH; /* some read before all written */ LOG(THREAD_GET, LOG_INTERP, 4, "\treads flag before writing it!\n"); #ifdef X86 if ((instr_eflags & EFLAGS_READ_OF) != 0 && (e6 & EFLAGS_WRITE_OF) == 0) { status = EFLAGS_READ_OF; /* reads OF before writing! */ LOG(THREAD_GET, LOG_INTERP, 4, "\t reads OF prior to writing it!\n"); } #endif } } else if ((instr_eflags & EFLAGS_WRITE_ARITH) != 0) { /* store the flags we're writing */ e6 |= (instr_eflags & EFLAGS_WRITE_ARITH); *eflags_6 = e6; /* check if all written but none read yet */ if ((e6 & EFLAGS_WRITE_ARITH) == EFLAGS_WRITE_ARITH && (e6 & EFLAGS_READ_ARITH) == 0) { status = EFLAGS_WRITE_ARITH; /* all written before read */ LOG(THREAD_GET, LOG_INTERP, 4, "\twrote all 6 flags now!\n"); } #ifdef X86 /* check if at least OF was written but not read */ else if ((e6 & EFLAGS_WRITE_OF) != 0 && (e6 & EFLAGS_READ_OF) == 0) { status = EFLAGS_WRITE_OF; /* OF written before read */ LOG(THREAD_GET, LOG_INTERP, 4, "\twrote overflow flag before reading it!\n"); } #endif } return status; } /* check origins of code for several purposes: * 1) we need list of areas where this thread's fragments come * from, for faster flushing on munmaps * 2) also for faster flushing, each vmarea has a list of fragments * 3) we need to mark as read-only any writable region that * has a fragment come from it, to handle self-modifying code * 4) for PROGRAM_SHEPHERDING restricted code origins for security * 5) for restricted execution environments: not letting bb cross regions */ /* FIXME CASE 7380: since report security violation before execute off bad page, can be false positive due to: - a faulting instruction in middle of bb would have prevented getting there - ignorable syscall in middle - self-mod code would have ended bb sooner than bad page One solution is to have check_thread_vm_area() return false and have bb building stop at checked_end if a violation will occur when we get there. Then we only raise the violation once building a bb starting there. */ static inline void check_new_page_start(dcontext_t *dcontext, build_bb_t *bb) { DEBUG_DECLARE(bool ok;) if (!bb->check_vm_area) return; DEBUG_DECLARE(ok =) check_thread_vm_area(dcontext, bb->start_pc, bb->start_pc, (bb->record_vmlist ? &bb->vmlist : NULL), &bb->flags, &bb->checked_end, false /*!xfer*/); ASSERT(ok); /* cannot return false on non-xfer */ bb->last_page = bb->start_pc; if (bb->overlap_info != NULL) reset_overlap_info(dcontext, bb); } /* Walk forward in straight line from prev_pc to new_pc. * FIXME: with checked_end we don't need to call this on every contig end * while bb building like we used to. Should revisit the overlap info and * walk_app_bb reasons for keeping those contig() calls and see if we can * optimize them away for bb building at least. * i#993: new_pc points to the last byte of the current instruction and is not * an open-ended endpoint. */ static inline bool check_new_page_contig(dcontext_t *dcontext, build_bb_t *bb, app_pc new_pc) { bool is_first_instr = (bb->instr_start == bb->start_pc); if (!bb->check_vm_area) return true; if (bb->checked_end == NULL) { ASSERT(new_pc == bb->start_pc); } else if (new_pc >= bb->checked_end) { if (!check_thread_vm_area(dcontext, new_pc, bb->start_pc, (bb->record_vmlist ? &bb->vmlist : NULL), &bb->flags, &bb->checked_end, /* i#989: We don't want to fall through to an * incompatible vmarea, so we treat fall * through like a transfer. We can't end the * bb before the first instruction, so we pass * false to forcibly merge in the vmarea * flags. */ !is_first_instr /*xfer*/)) { return false; } } if (bb->overlap_info != NULL) update_overlap_info(dcontext, bb, new_pc, false /*not jmp*/); DOLOG(4, LOG_INTERP, { if (PAGE_START(bb->last_page) != PAGE_START(new_pc)) LOG(THREAD, LOG_INTERP, 4, "page boundary crossed\n"); }); bb->last_page = new_pc; /* update even if not new page, for walk_app_bb */ return true; } /* Direct cti from prev_pc to new_pc */ static bool check_new_page_jmp(dcontext_t *dcontext, build_bb_t *bb, app_pc new_pc) { /* For tracking purposes, check the last byte of the cti. */ bool ok = check_new_page_contig(dcontext, bb, bb->cur_pc - 1); ASSERT(ok && "should have checked cur_pc-1 in decode loop"); if (!ok) /* Don't follow the jmp in release build. */ return false; /* cur sandboxing doesn't handle direct cti * not good enough to only check this at top of interp -- could walk contig * from non-selfmod to selfmod page, and then do a direct cti, which * check_thread_vm_area would allow (no flag changes on direct cti)! * also not good enough to put this check in check_thread_vm_area, as that * only checks across pages. */ if ((bb->flags & FRAG_SELFMOD_SANDBOXED) != 0) return false; if (PAGE_START(bb->last_page) != PAGE_START(new_pc)) LOG(THREAD, LOG_INTERP, 4, "page boundary crossed\n"); /* do not walk into a native exec dll (we assume not currently there, * though could happen if bypass a gateway -- even then this is a feature * to allow getting back to native ASAP) * FIXME: we could assume that such direct calls only * occur from DGC, and rely on check_thread_vm_area to disallow, * as an (unsafe) optimization */ if (DYNAMO_OPTION(native_exec) && DYNAMO_OPTION(native_exec_dircalls) && !vmvector_empty(native_exec_areas) && is_native_pc(new_pc)) return false; /* i#805: If we're crossing a module boundary between two modules that are * and aren't on null_instrument_list, don't elide the jmp. * XXX i#884: if we haven't yet executed from the 2nd module, the client * won't receive the module load event yet and we might include code * from it here. It would be tricky to solve that, and it should only happen * if the client turns on elision, so we leave it. */ if ((!!os_module_get_flag(bb->cur_pc, MODULE_NULL_INSTRUMENT)) != (!!os_module_get_flag(new_pc, MODULE_NULL_INSTRUMENT))) return false; if (!bb->check_vm_area) return true; /* need to check this even if an intra-page jmp b/c we allow sub-page vm regions */ if (!check_thread_vm_area(dcontext, new_pc, bb->start_pc, (bb->record_vmlist ? &bb->vmlist : NULL), &bb->flags, &bb->checked_end, true /*xfer*/)) return false; if (bb->overlap_info != NULL) update_overlap_info(dcontext, bb, new_pc, true /*jmp*/); bb->flags |= FRAG_HAS_DIRECT_CTI; bb->last_page = new_pc; /* update even if not new page, for walk_app_bb */ return true; } static inline void bb_process_single_step(dcontext_t *dcontext, build_bb_t *bb) { LOG(THREAD, LOG_INTERP, 2, "interp: single step exception bb at " PFX "\n", bb->instr_start); /* FIXME i#2144 : handling a rep string operation. * In this case, we should test if only one iteration is done * before the single step exception. */ instrlist_append(bb->ilist, bb->instr); instr_set_translation(bb->instr, bb->instr_start); /* Mark instruction as special exit. */ instr_branch_set_special_exit(bb->instr, true); bb->exit_type |= LINK_SPECIAL_EXIT; /* Make this bb thread-private and a trace barrier. */ bb->flags &= ~FRAG_SHARED; bb->flags |= FRAG_CANNOT_BE_TRACE; } static inline void bb_process_invalid_instr(dcontext_t *dcontext, build_bb_t *bb) { /* invalid instr: end bb BEFORE the instr, we'll throw exception if we * reach the instr itself */ LOG(THREAD, LOG_INTERP, 2, "interp: invalid instr at " PFX "\n", bb->instr_start); /* This routine is called by more than just bb builder, also used * for recreating state, so check bb->app_interp parameter to find out * if building a real app bb to be executed */ if (bb->app_interp && bb->instr_start == bb->start_pc) { /* This is first instr in bb so it will be executed for sure and * we need to generate an invalid instruction exception. * A benefit of being first instr is that the state is easy * to translate. */ /* Copying the invalid bytes and having the processor generate the exception * would help on Windows where the kernel splits invalid instructions into * different cases (an invalid lock prefix and other distinctions, when the * underlying processor has a single interrupt 6), and it is hard to * duplicate Windows' behavior in our forged exception. However, we are not * certain that this instruction will raise a fault on the processor. It * might not if our decoder has a bug, or a new processor has added new * opcodes, or just due to processor variations in undefined gray areas. * Trying to copy without knowing the length of the instruction is a recipe * for disaster: it can lead to executing junk and even missing our exit cti * (i#3939). */ /* TODO i#1000: Give clients a chance to see this instruction for analysis, * and to change it. That's not easy to do though when we don't know what * it is. But it's confusing for the client to get the illegal instr fault * having never seen the problematic instr in a bb event. */ /* XXX i#57: provide a runtime option to specify new instruction formats to * avoid this app exception for new opcodes. */ ASSERT(dcontext->bb_build_info == bb); bb_build_abort(dcontext, true /*clean vm area*/, true /*unlock*/); /* XXX: we use illegal instruction here, even though we * know windows uses different exception codes for different * types of invalid instructions (for ex. STATUS_INVALID_LOCK * _SEQUENCE for lock prefix on a jmp instruction). */ if (TEST(DUMPCORE_FORGE_ILLEGAL_INST, DYNAMO_OPTION(dumpcore_mask))) os_dump_core("Warning: Encountered Illegal Instruction"); os_forge_exception(bb->instr_start, ILLEGAL_INSTRUCTION_EXCEPTION); ASSERT_NOT_REACHED(); } else { instr_destroy(dcontext, bb->instr); bb->instr = NULL; } } /* FIXME i#1668, i#2974: NYI on ARM/AArch64 */ #ifdef X86 /* returns true to indicate "elide and continue" and false to indicate "end bb now" * should be used both for converted indirect jumps and * FIXME: for direct jumps by bb_process_ubr */ static inline bool follow_direct_jump(dcontext_t *dcontext, build_bb_t *bb, app_pc target) { if (bb->follow_direct && !must_not_be_entered(target) && bb->num_elide_jmp < DYNAMO_OPTION(max_elide_jmp) && (DYNAMO_OPTION(elide_back_jmps) || bb->cur_pc <= target)) { if (check_new_page_jmp(dcontext, bb, target)) { /* Elide unconditional branch and follow target */ bb->num_elide_jmp++; STATS_INC(total_elided_jmps); STATS_TRACK_MAX(max_elided_jmps, bb->num_elide_jmp); bb->cur_pc = target; BBPRINT(bb, 4, " continuing at target " PFX "\n", bb->cur_pc); return true; /* keep bb going */ } else { BBPRINT(bb, 3, " NOT following jmp from " PFX " to " PFX "\n", bb->instr_start, target); } } else { BBPRINT(bb, 3, " NOT attempting to follow jump from " PFX " to " PFX "\n", bb->instr_start, target); } return false; /* stop bb */ } #endif /* X86 */ /* returns true to indicate "elide and continue" and false to indicate "end bb now" */ static inline bool bb_process_ubr(dcontext_t *dcontext, build_bb_t *bb) { app_pc tgt = (byte *)opnd_get_pc(instr_get_target(bb->instr)); BBPRINT(bb, 4, "interp: direct jump at " PFX "\n", bb->instr_start); if (must_not_be_elided(tgt)) { #ifdef WINDOWS byte *wrapper_start; if (is_syscall_trampoline(tgt, &wrapper_start)) { /* HACK to avoid entering the syscall trampoline that is meant * only for native syscalls -- we replace the jmp with the * original app mov immed that it replaced */ BBPRINT(bb, 3, "interp: replacing syscall trampoline @" PFX " w/ orig mov @" PFX "\n", bb->instr_start, wrapper_start); instr_reset(dcontext, bb->instr); /* leave bb->cur_pc unchanged */ decode(dcontext, wrapper_start, bb->instr); /* ASSUMPTION: syscall trampoline puts hooked instruction * (usually mov_imm but can be lea if hooked_deeper) here */ ASSERT(instr_get_opcode(bb->instr) == OP_mov_imm || (instr_get_opcode(bb->instr) == OP_lea && DYNAMO_OPTION(native_exec_hook_conflict) == HOOKED_TRAMPOLINE_HOOK_DEEPER)); instrlist_append(bb->ilist, bb->instr); /* translation should point to the trampoline at the * original application address */ if (bb->record_translation) instr_set_translation(bb->instr, bb->instr_start); if (instr_get_opcode(bb->instr) == OP_lea) { app_pc translation = bb->instr_start + instr_length(dcontext, bb->instr); ASSERT_CURIOSITY(instr_length(dcontext, bb->instr) == 4); /* we hooked deep need to add the int 2e instruction */ /* can't use create_syscall_instr because of case 5217 hack */ ASSERT(get_syscall_method() == SYSCALL_METHOD_INT); bb->instr = INSTR_CREATE_int(dcontext, opnd_create_immed_int((char)0x2e, OPSZ_1)); if (bb->record_translation) instr_set_translation(bb->instr, translation); ASSERT(instr_is_syscall(bb->instr) && instr_get_opcode(bb->instr) == OP_int); instrlist_append(bb->ilist, bb->instr); return bb_process_syscall(dcontext, bb); } return true; /* keep bb going */ } #endif BBPRINT(bb, 3, "interp: NOT following jmp to " PFX "\n", tgt); /* add instruction to instruction list */ bb_add_native_direct_xfer(dcontext, bb, false /*!appended*/); /* Case 8711: coarse-grain can't handle non-exit cti */ bb->flags &= ~FRAG_COARSE_GRAIN; STATS_INC(coarse_prevent_cti); return false; /* end bb now */ } else { if (bb->follow_direct && !must_not_be_entered(tgt) && bb->num_elide_jmp < DYNAMO_OPTION(max_elide_jmp) && (DYNAMO_OPTION(elide_back_jmps) || bb->cur_pc <= tgt)) { if (check_new_page_jmp(dcontext, bb, tgt)) { /* Elide unconditional branch and follow target */ bb->num_elide_jmp++; STATS_INC(total_elided_jmps); STATS_TRACK_MAX(max_elided_jmps, bb->num_elide_jmp); bb->cur_pc = tgt; BBPRINT(bb, 4, " continuing at target " PFX "\n", bb->cur_pc); /* pretend never saw this ubr: delete instr, then continue */ instr_destroy(dcontext, bb->instr); bb->instr = NULL; return true; /* keep bb going */ } else { BBPRINT(bb, 3, " NOT following direct jmp from " PFX " to " PFX "\n", bb->instr_start, tgt); } } /* End this bb now */ bb->exit_target = opnd_get_pc(instr_get_target(bb->instr)); instrlist_append(bb->ilist, bb->instr); return false; /* end bb */ } return true; /* keep bb going */ } #ifdef X86 /* returns true if call is elided, * and false if not following due to hitting a limit or other reason */ static bool follow_direct_call(dcontext_t *dcontext, build_bb_t *bb, app_pc callee) { /* FIXME: This code should be reused in bb_process_convertible_indcall() * and in bb_process_call_direct() */ if (bb->follow_direct && !must_not_be_entered(callee) && bb->num_elide_call < DYNAMO_OPTION(max_elide_call) && (DYNAMO_OPTION(elide_back_calls) || bb->cur_pc <= callee)) { if (check_new_page_jmp(dcontext, bb, callee)) { bb->num_elide_call++; STATS_INC(total_elided_calls); STATS_TRACK_MAX(max_elided_calls, bb->num_elide_call); bb->cur_pc = callee; BBPRINT(bb, 4, " continuing in callee at " PFX "\n", bb->cur_pc); return true; /* keep bb going in callee */ } else { BBPRINT(bb, 3, " NOT following direct (or converted) call from " PFX " to " PFX "\n", bb->instr_start, callee); } } else { BBPRINT(bb, 3, " NOT attempting to follow call from " PFX " to " PFX "\n", bb->instr_start, callee); } return false; /* stop bb */ } #endif /* X86 */ static inline void bb_stop_prior_to_instr(dcontext_t *dcontext, build_bb_t *bb, bool appended) { if (appended) instrlist_remove(bb->ilist, bb->instr); instr_destroy(dcontext, bb->instr); bb->instr = NULL; bb->cur_pc = bb->instr_start; } /* returns true to indicate "elide and continue" and false to indicate "end bb now" */ static inline bool bb_process_call_direct(dcontext_t *dcontext, build_bb_t *bb) { byte *callee = (byte *)opnd_get_pc(instr_get_target(bb->instr)); #ifdef CUSTOM_TRACES_RET_REMOVAL if (callee == bb->instr_start + 5) { LOG(THREAD, LOG_INTERP, 4, "found call to next instruction\n"); } else dcontext->num_calls++; #endif STATS_INC(num_all_calls); BBPRINT(bb, 4, "interp: direct call at " PFX "\n", bb->instr_start); if (leave_call_native(callee)) { BBPRINT(bb, 3, "interp: NOT inlining or mangling call to " PFX "\n", callee); /* Case 8711: coarse-grain can't handle non-exit cti. * If we allow this fragment to be coarse we must kill the freeze * nudge thread! */ bb->flags &= ~FRAG_COARSE_GRAIN; STATS_INC(coarse_prevent_cti); bb_add_native_direct_xfer(dcontext, bb, true /*appended*/); return true; /* keep bb going, w/o inlining call */ } else { if (DYNAMO_OPTION(coarse_split_calls) && DYNAMO_OPTION(coarse_units) && TEST(FRAG_COARSE_GRAIN, bb->flags)) { if (instrlist_first(bb->ilist) != bb->instr) { /* have call be in its own bb */ bb_stop_prior_to_instr(dcontext, bb, true /*appended already*/); return false; /* stop bb */ } else { /* single-call fine-grained bb */ bb->flags &= ~FRAG_COARSE_GRAIN; STATS_INC(coarse_prevent_cti); } } /* FIXME: use follow_direct_call() */ if (bb->follow_direct && !must_not_be_entered(callee) && bb->num_elide_call < DYNAMO_OPTION(max_elide_call) && (DYNAMO_OPTION(elide_back_calls) || bb->cur_pc <= callee)) { if (check_new_page_jmp(dcontext, bb, callee)) { bb->num_elide_call++; STATS_INC(total_elided_calls); STATS_TRACK_MAX(max_elided_calls, bb->num_elide_call); bb->cur_pc = callee; BBPRINT(bb, 4, " continuing in callee at " PFX "\n", bb->cur_pc); return true; /* keep bb going */ } } BBPRINT(bb, 3, " NOT following direct call from " PFX " to " PFX "\n", bb->instr_start, callee); /* End this bb now */ if (instr_is_cbr(bb->instr)) { /* Treat as cbr, not call */ instr_exit_branch_set_type(bb->instr, instr_branch_type(bb->instr)); } else { bb->exit_target = callee; } return false; /* end bb now */ } return true; /* keep bb going */ } #ifdef WINDOWS /* We check if the instrs call, mov, and sysenter are * "call (%xdx); mov %xsp -> %xdx" or "call %xdx; mov %xsp -> %xdx" * and "sysenter". */ bool instr_is_call_sysenter_pattern(instr_t *call, instr_t *mov, instr_t *sysenter) { instr_t *instr; if (call == NULL || mov == NULL || sysenter == NULL) return false; if (instr_is_meta(call) || instr_is_meta(mov) || instr_is_meta(sysenter)) return false; if (instr_get_next(call) != mov || instr_get_next(mov) != sysenter) return false; /* check sysenter */ if (instr_get_opcode(sysenter) != OP_sysenter) return false; /* FIXME Relax the pattern matching on the "mov; call" pair so that small * changes in the register dataflow and call construct are tolerated. */ /* Did we find a "mov %xsp -> %xdx"? */ instr = mov; if (!(instr != NULL && instr_get_opcode(instr) == OP_mov_ld && instr_num_srcs(instr) == 1 && instr_num_dsts(instr) == 1 && opnd_is_reg(instr_get_dst(instr, 0)) && opnd_get_reg(instr_get_dst(instr, 0)) == REG_XDX && opnd_is_reg(instr_get_src(instr, 0)) && opnd_get_reg(instr_get_src(instr, 0)) == REG_XSP)) { return false; } /* Did we find a "call (%xdx) or "call %xdx" that's already marked * for ind->direct call conversion? */ instr = call; if (!(instr != NULL && TEST(INSTR_IND_CALL_DIRECT, instr->flags) && instr_is_call_indirect(instr) && /* The 2nd src operand should always be %xsp. */ opnd_is_reg(instr_get_src(instr, 1)) && opnd_get_reg(instr_get_src(instr, 1)) == REG_XSP && /* Match 'call (%xdx)' for post-SP2. */ ((opnd_is_near_base_disp(instr_get_src(instr, 0)) && opnd_get_base(instr_get_src(instr, 0)) == REG_XDX && opnd_get_disp(instr_get_src(instr, 0)) == 0) || /* Match 'call %xdx' for pre-SP2. */ (opnd_is_reg(instr_get_src(instr, 0)) && opnd_get_reg(instr_get_src(instr, 0)) == REG_XDX)))) { return false; } return true; } /* Walk up from the bb->instr and verify that the preceding instructions * match the pattern that we expect to precede a sysenter. */ static instr_t * bb_verify_sysenter_pattern(dcontext_t *dcontext, build_bb_t *bb) { /* Walk back up 2 instructions and verify that there's a * "call (%xdx); mov %xsp -> %xdx" or "call %xdx; mov %xsp -> %xdx" * just prior to the sysenter. * We use "xsp" and "xdx" to be ready for x64 sysenter though we don't * expect to see it. */ instr_t *mov, *call; mov = instr_get_prev_expanded(dcontext, bb->ilist, bb->instr); if (mov == NULL) return NULL; call = instr_get_prev_expanded(dcontext, bb->ilist, mov); if (call == NULL) return NULL; if (!instr_is_call_sysenter_pattern(call, mov, bb->instr)) { BBPRINT(bb, 3, "bb_verify_sysenter_pattern -- pattern didn't match\n"); return NULL; } return call; } /* Only used for the Borland SEH exemption. */ /* FIXME - we can't really tell a push from a pop since both are typically a * mov to fs:[0], but double processing doesn't hurt. */ /* NOTE we don't see dynamic SEH frame pushes, we only see the first SEH push * per mov -> fs:[0] instruction in the app. So we don't see modified in place * handler addresses (see at_Borland_SEH_rct_exemption()) or handler addresses * that are passed into a shared routine that sets up the frame (not yet seen, * note that MS dlls that have a _SEH_prolog hardcode the handler address in * the _SEH_prolog routine, only the data is passed in). */ static void bb_process_SEH_push(dcontext_t *dcontext, build_bb_t *bb, void *value) { if (value == NULL || value == (void *)PTR_UINT_MINUS_1) { /* could be popping off the last frame (leaving -1) of the SEH stack */ STATS_INC(num_endlist_SEH_write); ASSERT_CURIOSITY(value != NULL); return; } LOG(THREAD, LOG_INTERP, 3, "App moving " PFX " to fs:[0]\n", value); # ifdef RETURN_AFTER_CALL if (DYNAMO_OPTION(borland_SEH_rct)) { /* xref case 5752, the Borland compiler SEH implementation uses a push * imm ret motif for fall through to the finally of a try finally block * (very similar to what the Microsoft NT at_SEH_rct_exception() is * doing). The layout will always look like this : * push e: (imm32) (e should be in the .E/.F table) * a: * ... * b: ret * c: jmp rel32 (c should be in the .E/.F table) * d: jmp a: (rel8/32) * ... (usually nothing) * e: * (where ret at b is targeting e, or a valid after call). The * exception dispatcher calls c (the SEH frame has c as the handler) * which jmps to the exception handler which, in turn, calls d to * execute the finally block. Fall through is as shown above. So, * we see a .E violation for the handlers call to d and a .C violation * for the fall trough case of the ret @ b targeting e. We may also * see a .E violation for a call to a as sometimes the handler computes * the target of the jmp @ d an passes that to a different exception * handler. * * For try-except we see the following layout : * I've only seen jmp ind in the case that led to needing * at_Borland_SEH_rct_exemption() to be added, not that * it makes any difference. * [ jmp z: (rel8/32) || (rarely) ret || (very rarely) jmp ind] * x: jmp rel32 (x should be in the .E/.F table) * y: * ... * call rel32 * [z: ... || ret ] * Though there may be other optimized layouts (the ret instead of the * jmp z: is one such) so we may not want to rely on anything other * then x y. The exception dispatcher calls x (the SEH frame has x as * the handler) which jmps to the exception handler which, in turn, * jmps to y to execute the except block. We see a .F violation from * the handler's jmp to y. at_Borland_SEH_rct_exemption() covers a * case where the address of x (and thus y) in an existing SEH frame * is changed in place instead of popping and pushing a new frame. * * All addresses (rel and otherwise) should be in the same module. So * we need to recognize the patter and add d:/y: to the .E/.F table * as well as a: (sometimes the handler calculates the target of d and * passes that up to a higher level routine, though I don't see the * point) and add e: to the .C table. * * It would be preferable to handle these exemptions reactively at * the violation point, but unfortunately, by the time we get to the * violation the SEH frame information has been popped off the stack * and is lost, so we have to do it pre-emptively here (pattern * matching at violation time has proven to difficult in the face of * certain compiler optimizations). See at_Borland_SEH_rct_exemption() * in callback.c, that could handle all ind branches to y and ind calls * to d (see below) at an acceptable level of security if we desired. * Handling the ret @ b to e reactively would require the ability to * recreate the exact src cti (so we can use the addr of the ret to * pattern match) at the violation point (something that can't always * currently be done, reset flushing etc.). Handling the ind call to * a (which I've never acutally seen, though I've seen the address * computed and it looks like it could likely be hit) reactively is * more tricky. Prob. the only way to handle that is to allow .E/.F * transistions to any address after a push imm32 of an address in the * same module, but that might be too permissive. FIXME - should still * revisit doing the exemptions reactively at some point, esp. once we * can reliably get the src cti. */ extern bool seen_Borland_SEH; /* set for callback.c */ /* First read in the SEH frame, this is the observed structure and * the first two fields (which are all that we use) are constrained by * ntdll exception dispatcher (see EXCEPTION_REGISTRATION decleration * in ntdll.h). */ /* FIXME - could just use EXCEPTION_REGISTRATION period since all we * need is the handler address and it would allow simpler curiosity * [see 8181] below. If, as is expected, other options make use of * this routine we'll probably have one shared get of the SEH frame * anyways. */ typedef struct _borland_seh_frame_t { EXCEPTION_REGISTRATION reg; reg_t xbp; /* not used by us */ } borland_seh_frame_t; borland_seh_frame_t frame; /* will hold [b,e] or [x-1,y] */ byte target_buf[RET_0_LENGTH + 2 * JMP_LONG_LENGTH]; app_pc handler_jmp_target = NULL; if (!d_r_safe_read(value, sizeof(frame), &frame)) { /* We already checked for NULL and -1 above so this should be * a valid SEH frame. Xref 8181, borland_seh_frame_t struct is * bigger then EXCEPTION_REGISTRATION (which is all that is * required) so verify smaller size is readable. */ ASSERT_CURIOSITY( sizeof(EXCEPTION_REGISTRATION) < sizeof(frame) && d_r_safe_read(value, sizeof(EXCEPTION_REGISTRATION), &frame)); goto post_borland; } /* frame.reg.handler is c or y, read extra prior bytes to look for b */ if (!d_r_safe_read((app_pc)frame.reg.handler - RET_0_LENGTH, sizeof(target_buf), target_buf)) { goto post_borland; } if (is_jmp_rel32(&target_buf[RET_0_LENGTH], (app_pc)frame.reg.handler, &handler_jmp_target)) { /* we have a possible match, now do the more expensive checking */ app_pc base; LOG(THREAD, LOG_INTERP, 3, "Read possible borland SEH frame @" PFX "\n\t" "next=" PFX " handler=" PFX " xbp=" PFX "\n\t", value, frame.reg.prev, frame.reg.handler, frame.xbp); DOLOG(3, LOG_INTERP, { dump_buffer_as_bytes(THREAD, target_buf, sizeof(target_buf), 0); }); /* optimize check if we've already processed this frame once */ if ((DYNAMO_OPTION(rct_ind_jump) != OPTION_DISABLED || DYNAMO_OPTION(rct_ind_call) != OPTION_DISABLED) && rct_ind_branch_target_lookup( dcontext, (app_pc)frame.reg.handler + JMP_LONG_LENGTH)) { /* we already processed this SEH frame once, this is prob. a * frame pop, no need to continue */ STATS_INC(num_borland_SEH_dup_frame); LOG(THREAD, LOG_INTERP, 3, "Processing duplicate Borland SEH frame\n"); goto post_borland; } base = get_module_base((app_pc)frame.reg.handler); STATS_INC(num_borland_SEH_initial_match); /* Perf opt, we use the cheaper get_allocation_base() below instead * of get_module_base(). We are checking the result against a * known module base (base) so no need to duplicate the is module * check. FIXME - the checks prob. aren't even necessary given the * later is_in_code_section checks. Xref case 8171. */ /* FIXME - (perf) we could cache the region from the first * is_in_code_section() call and check against that before falling * back on is_in_code_section in case of multiple code sections. */ if (base != NULL && get_allocation_base(handler_jmp_target) == base && get_allocation_base(bb->instr_start) == base && /* FIXME - with -rct_analyze_at_load we should be able to * verify that frame->handler (x: c:) is on the .E/.F * table already. We could also try to match known pre x: * post y: patterns. */ is_in_code_section(base, bb->instr_start, NULL, NULL) && is_in_code_section(base, handler_jmp_target, NULL, NULL) && is_range_in_code_section(base, (app_pc)frame.reg.handler, (app_pc)frame.reg.handler + JMP_LONG_LENGTH + 1, NULL, NULL)) { app_pc finally_target; byte push_imm_buf[PUSH_IMM32_LENGTH]; DEBUG_DECLARE(bool ok;) /* we have a match, add handler+JMP_LONG_LENGTH (y: d:) * to .E/.F table */ STATS_INC(num_borland_SEH_try_match); LOG(THREAD, LOG_INTERP, 2, "Found Borland SEH frame adding " PFX " to .E/.F table\n", (app_pc)frame.reg.handler + JMP_LONG_LENGTH); if ((DYNAMO_OPTION(rct_ind_jump) != OPTION_DISABLED || DYNAMO_OPTION(rct_ind_call) != OPTION_DISABLED)) { d_r_mutex_lock(&rct_module_lock); rct_add_valid_ind_branch_target( dcontext, (app_pc)frame.reg.handler + JMP_LONG_LENGTH); d_r_mutex_unlock(&rct_module_lock); } /* we set this as an enabler for another exemption in * callback .C, see notes there */ if (!seen_Borland_SEH) { SELF_UNPROTECT_DATASEC(DATASEC_RARELY_PROT); seen_Borland_SEH = true; SELF_PROTECT_DATASEC(DATASEC_RARELY_PROT); } /* case 8648: used to decide which RCT entries to persist */ DEBUG_DECLARE(ok =) os_module_set_flag(base, MODULE_HAS_BORLAND_SEH); ASSERT(ok); /* look for .C addresses for try finally */ if (target_buf[0] == RAW_OPCODE_ret && (is_jmp_rel32(&target_buf[RET_0_LENGTH + JMP_LONG_LENGTH], (app_pc)frame.reg.handler + JMP_LONG_LENGTH, &finally_target) || is_jmp_rel8(&target_buf[RET_0_LENGTH + JMP_LONG_LENGTH], (app_pc)frame.reg.handler + JMP_LONG_LENGTH, &finally_target)) && d_r_safe_read(finally_target - sizeof(push_imm_buf), sizeof(push_imm_buf), push_imm_buf) && push_imm_buf[0] == RAW_OPCODE_push_imm32) { app_pc push_val = *(app_pc *)&push_imm_buf[1]; /* do a few more, expensive, sanity checks */ /* FIXME - (perf) see earlier note on get_allocation_base() * and is_in_code_section() usage. */ if (get_allocation_base(finally_target) == base && is_in_code_section(base, finally_target, NULL, NULL) && get_allocation_base(push_val) == base && /* FIXME - could also check that push_val is in * .E/.F table, at least for -rct_analyze_at_load */ is_in_code_section(base, push_val, NULL, NULL)) { /* Full match, add push_val (e:) to the .C table * and finally_target (a:) to the .E/.F table */ STATS_INC(num_borland_SEH_finally_match); LOG(THREAD, LOG_INTERP, 2, "Found Borland SEH finally frame adding " PFX " to" " .C table and " PFX " to .E/.F table\n", push_val, finally_target); if ((DYNAMO_OPTION(rct_ind_jump) != OPTION_DISABLED || DYNAMO_OPTION(rct_ind_call) != OPTION_DISABLED)) { d_r_mutex_lock(&rct_module_lock); rct_add_valid_ind_branch_target(dcontext, finally_target); d_r_mutex_unlock(&rct_module_lock); } if (DYNAMO_OPTION(ret_after_call)) { fragment_add_after_call(dcontext, push_val); } } else { ASSERT_CURIOSITY(false && "partial borland seh finally match"); } } } } } post_borland: # endif /* RETURN_AFTER_CALL */ return; } /* helper routine for bb_process_fs_ref * return true if bb should be continued, false if it shouldn't */ static bool bb_process_fs_ref_opnd(dcontext_t *dcontext, build_bb_t *bb, opnd_t dst, bool *is_to_fs0) { ASSERT(is_to_fs0 != NULL); *is_to_fs0 = false; if (opnd_is_far_base_disp(dst) && /* FIXME - check size? */ opnd_get_segment(dst) == SEG_FS) { /* is a write to fs:[*] */ if (bb->instr_start != bb->start_pc) { /* Not first instruction in the bb, end bb before this * instruction, so we can see it as the first instruction of a * new bb where we can use the register state. */ /* As is, always ending the bb here has a mixed effect on mem usage * with default options. We do end up with slightly more bb's * (and associated bookeeping costs), but frequently with MS dlls * we reduce code cache dupliaction from jmp/call ellision * (_SEH_[Pro,Epi]log otherwise ends up frequently duplicated for * instance). */ /* FIXME - we must stop the bb here even if there's already * a bb built for the next instruction, as we have to have * reproducible bb building for recreate app state. We should * only get here through code duplication (typically jmp/call * inlining, though can also be through multiple entry points into * the same block of non cti instructions). */ bb_stop_prior_to_instr(dcontext, bb, false /*not appended yet*/); return false; /* stop bb */ } /* Only process the push if building a new bb for cache, can't check * this any earlier since have to preserve bb building/ending behavior * even when not for cache (for recreation etc.). */ if (bb->app_interp) { /* check is write to fs:[0] */ /* XXX: this won't identify all memory references (need to switch to * instr_compute_address_ex_priv() in order to handle VSIB) but the * current usage is just to identify the Borland pattern so that's ok. */ if (opnd_compute_address_priv(dst, get_mcontext(dcontext)) == NULL) { /* we have new mov to fs:[0] */ *is_to_fs0 = true; } } } return true; } /* While currently only used for Borland SEH exemptions, this analysis could * also be helpful for other SEH tasks (xref case 5824). */ static bool bb_process_fs_ref(dcontext_t *dcontext, build_bb_t *bb) { ASSERT(DYNAMO_OPTION(process_SEH_push) && instr_get_prefix_flag(bb->instr, PREFIX_SEG_FS)); /* If this is the first instruction of a bb for the cache we * want to fully decode it, check if it's pushing an SEH frame * and, if so, pass it to the SEH checking routines (currently * just used for the Borland SEH rct handling). If this is not * the first instruction of the bb then we want to stop the bb * just before this instruction so that when we do process this * instruction it will be the first in the bb (allowing us to * use the register state). */ if (!bb->full_decode) { instr_decode(dcontext, bb->instr); /* is possible this is an invalid instr that made it through the fast * decode, FIXME is there a better way to handle this? */ if (!instr_valid(bb->instr)) { ASSERT_NOT_TESTED(); if (bb->cur_pc == NULL) bb->cur_pc = bb->instr_start; bb_process_invalid_instr(dcontext, bb); return false; /* stop bb */ } ASSERT(instr_get_prefix_flag(bb->instr, PREFIX_SEG_FS)); } /* expect to see only simple mov's to fs:[0] for new SEH frames * FIXME - might we see other types we'd want to intercept? * do we want to proccess pop instructions (usually just for removing * a frame)? */ if (instr_get_opcode(bb->instr) == OP_mov_st) { bool is_to_fs0; opnd_t dst = instr_get_dst(bb->instr, 0); if (!bb_process_fs_ref_opnd(dcontext, bb, dst, &is_to_fs0)) return false; /* end bb */ /* Only process the push if building a new bb for cache, can't check * this any earlier since have to preserve bb building/ending behavior * even when not for cache (for recreation etc.). */ if (bb->app_interp) { if (is_to_fs0) { ptr_int_t value = 0; opnd_t src = instr_get_src(bb->instr, 0); if (opnd_is_immed_int(src)) { value = opnd_get_immed_int(src); } else if (opnd_is_reg(src)) { value = reg_get_value_priv(opnd_get_reg(src), get_mcontext(dcontext)); } else { ASSERT_NOT_REACHED(); } STATS_INC(num_SEH_pushes_processed); LOG(THREAD, LOG_INTERP, 3, "found mov to fs:[0] @ " PFX "\n", bb->instr_start); bb_process_SEH_push(dcontext, bb, (void *)value); } else { STATS_INC(num_fs_movs_not_SEH); } } } # if defined(DEBUG) && defined(INTERNAL) else if (INTERNAL_OPTION(check_for_SEH_push)) { /* Debug build Sanity check that we aren't missing SEH frame pushes */ int i; int num_dsts = instr_num_dsts(bb->instr); for (i = 0; i < num_dsts; i++) { bool is_to_fs0; opnd_t dst = instr_get_dst(bb->instr, i); if (!bb_process_fs_ref_opnd(dcontext, bb, dst, &is_to_fs0)) { STATS_INC(num_process_SEH_bb_early_terminate_debug); return false; /* end bb */ } /* common case is pop instructions to fs:[0] when popping an * SEH frame stored on tos */ if (is_to_fs0) { if (instr_get_opcode(bb->instr) == OP_pop) { LOG(THREAD, LOG_INTERP, 4, "found pop to fs:[0] @ " PFX "\n", bb->instr_start); STATS_INC(num_process_SEH_pop_fs0); } else { /* an unexpected SEH frame push */ LOG(THREAD, LOG_INTERP, 1, "found unexpected write to fs:[0] @" PFX "\n", bb->instr_start); DOLOG(1, LOG_INTERP, { d_r_loginst(dcontext, 1, bb->instr, ""); }); ASSERT_CURIOSITY(!is_to_fs0); } } } } # endif return true; /* continue bb */ } #endif /* win32 */ #if defined(UNIX) && !defined(DGC_DIAGNOSTICS) && defined(X86) /* The basic strategy for mangling mov_seg instruction is: * For mov fs/gs => reg/[mem], simply mangle it to write * the app's fs/gs selector value into dst. * For mov reg/mem => fs/gs, we make it as the first instruction * of bb, and mark that bb not linked and has mov_seg instr, * and change that instruction to be a nop. * Then whenever before entering code cache, we check if that's the bb * has mov_seg. If yes, we will update the information we maintained * about the app's fs/gs. */ /* check if the basic block building should continue on a mov_seg instr. */ static bool bb_process_mov_seg(dcontext_t *dcontext, build_bb_t *bb) { reg_id_t seg; if (!INTERNAL_OPTION(mangle_app_seg)) return true; /* continue bb */ /* if it is a read, we only need mangle the instruction. */ ASSERT(instr_num_srcs(bb->instr) == 1); if (opnd_is_reg(instr_get_src(bb->instr, 0)) && reg_is_segment(opnd_get_reg(instr_get_src(bb->instr, 0)))) return true; /* continue bb */ /* it is an update, we need set to be the first instr of bb */ ASSERT(instr_num_dsts(bb->instr) == 1); ASSERT(opnd_is_reg(instr_get_dst(bb->instr, 0))); seg = opnd_get_reg(instr_get_dst(bb->instr, 0)); ASSERT(reg_is_segment(seg)); /* we only need handle fs/gs */ if (seg != SEG_GS && seg != SEG_FS) return true; /* continue bb */ /* if no private loader, we only need mangle the non-tls seg */ if (seg == IF_X64_ELSE(SEG_FS, SEG_FS) && !INTERNAL_OPTION(private_loader)) return true; /* continue bb */ if (bb->instr_start == bb->start_pc) { /* the first instruction, we can continue build bb. */ /* this bb cannot be part of trace! */ bb->flags |= FRAG_CANNOT_BE_TRACE; bb->flags |= FRAG_HAS_MOV_SEG; return true; /* continue bb */ } LOG(THREAD, LOG_INTERP, 3, "ending bb before mov_seg\n"); /* Set cur_pc back to the start of this instruction and delete this * instruction from the bb ilist. */ bb->cur_pc = instr_get_raw_bits(bb->instr); instrlist_remove(bb->ilist, bb->instr); instr_destroy(dcontext, bb->instr); /* Set instr to NULL in order to get translation of exit cti correct. */ bb->instr = NULL; /* this block must be the last one in a trace * breaking traces here shouldn't be a perf issue b/c this is so rare, * it should happen only once per thread on setting up tls. */ bb->flags |= FRAG_MUST_END_TRACE; return false; /* stop bb here */ } #endif /* UNIX && X86 */ /* Returns true to indicate that ignorable syscall processing is completed * with *continue_bb indicating if the bb should be continued or not. * When returning false, continue_bb isn't pertinent. */ static bool bb_process_ignorable_syscall(dcontext_t *dcontext, build_bb_t *bb, int sysnum, bool *continue_bb) { STATS_INC(ignorable_syscalls); BBPRINT(bb, 3, "found ignorable system call 0x%04x\n", sysnum); #ifdef WINDOWS if (get_syscall_method() != SYSCALL_METHOD_SYSENTER) { DOCHECK(1, { if (get_syscall_method() == SYSCALL_METHOD_WOW64) ASSERT_NOT_TESTED(); }); if (continue_bb != NULL) *continue_bb = true; return true; } else { /* Can we continue interp after the sysenter at the instruction * after the call to sysenter? */ instr_t *call = bb_verify_sysenter_pattern(dcontext, bb); if (call != NULL) { /* If we're continuing code discovery at the after-call address, * change the cur_pc to continue at the after-call addr. This is * safe since the preceding call is in the fragment and * %xsp/(%xsp) hasn't changed since the call. Obviously, we assume * that the sysenter breaks control flow in fashion such any * instruction that follows it isn't reached by DR. */ if (DYNAMO_OPTION(ignore_syscalls_follow_sysenter)) { bb->cur_pc = instr_get_raw_bits(call) + instr_length(dcontext, call); if (continue_bb != NULL) *continue_bb = true; return true; } else { /* End this bb now. We set the exit target so that control * skips the vsyscall 'ret' that's executed natively after the * syscall and ends up at the correct place. */ /* FIXME Assigning exit_target causes the fragment to end * with a direct exit stub to the after-call address, which * is fine. If bb->exit_target < bb->start_pc, the future * fragment for exit_target is marked as a trace head which * isn't intended. A potentially undesirable side effect * is that exit_target's fragment can't be included in * trace for start_pc. */ bb->exit_target = instr_get_raw_bits(call) + instr_length(dcontext, call); if (continue_bb != NULL) *continue_bb = false; return true; } } STATS_INC(ignorable_syscalls_failed_sysenter_pattern); /* Pattern match failed but the syscall is ignorable so maybe we * can try shared syscall? */ /* Decrement the stat to prevent double counting. We rarely expect to hit * this case. */ STATS_DEC(ignorable_syscalls); return false; } #elif defined(MACOS) if (instr_get_opcode(bb->instr) == OP_sysenter) { /* To continue after the sysenter we need to go to the ret ibl, as user-mode * sysenter wrappers put the retaddr into edx as the post-kernel continuation. */ bb->exit_type |= LINK_INDIRECT | LINK_RETURN; bb->ibl_branch_type = IBL_RETURN; bb->exit_target = get_ibl_routine(dcontext, get_ibl_entry_type(bb->exit_type), DEFAULT_IBL_BB(), bb->ibl_branch_type); LOG(THREAD, LOG_INTERP, 4, "sysenter exit target = " PFX "\n", bb->exit_target); if (continue_bb != NULL) *continue_bb = false; } else if (continue_bb != NULL) *continue_bb = true; return true; #else if (continue_bb != NULL) *continue_bb = true; return true; #endif } #ifdef WINDOWS /* Process a syscall that is executed via shared syscall. */ static void bb_process_shared_syscall(dcontext_t *dcontext, build_bb_t *bb, int sysnum) { ASSERT(DYNAMO_OPTION(shared_syscalls)); DODEBUG({ if (ignorable_system_call(sysnum, bb->instr, NULL)) STATS_INC(ignorable_syscalls); else STATS_INC(optimizable_syscalls); }); BBPRINT(bb, 3, "found %soptimizable system call 0x%04x\n", INTERNAL_OPTION(shared_eq_ignore) ? "ignorable-" : "", sysnum); LOG(THREAD, LOG_INTERP, 3, "ending bb at syscall & NOT removing the interrupt itself\n"); /* Mark the instruction as pointing to shared syscall */ bb->instr->flags |= INSTR_SHARED_SYSCALL; /* this block must be the last one in a trace */ bb->flags |= FRAG_MUST_END_TRACE; /* we redirect all optimizable syscalls to a single shared piece of code. * Once a fragment reaches the shared syscall code, it can be safely * deleted, for example, if the thread is interrupted for a callback and * DR needs to delete fragments for cache management. * * Note that w/shared syscall, syscalls can be executed from TWO * places -- shared_syscall and do_syscall. */ bb->exit_target = shared_syscall_routine(dcontext); /* make sure translation for ending jmp ends up right, mangle will * remove this instruction, so set to NULL so translation does the * right thing */ bb->instr = NULL; } #endif /* WINDOWS */ #ifdef ARM /* This routine walks back to find the IT instr for the current IT block * and the position of instr in the current IT block, and returns whether * instr is the last instruction in the block. */ static bool instr_is_last_in_it_block(instr_t *instr, instr_t **it_out, uint *pos_out) { instr_t *it; int num_instrs; ASSERT(instr != NULL && instr_get_isa_mode(instr) == DR_ISA_ARM_THUMB && instr_is_predicated(instr) && instr_is_app(instr)); /* walk backward to find the IT instruction */ for (it = instr_get_prev(instr), num_instrs = 1; /* meta and app instrs are treated identically here */ it != NULL && num_instrs <= 4 /* max 4 instr in an IT block */; it = instr_get_prev(it)) { if (instr_is_label(it)) continue; if (instr_get_opcode(it) == OP_it) break; num_instrs++; } ASSERT(it != NULL && instr_get_opcode(it) == OP_it); ASSERT(num_instrs <= instr_it_block_get_count(it)); if (it_out != NULL) *it_out = it; if (pos_out != NULL) *pos_out = num_instrs - 1; /* pos starts from 0 */ if (num_instrs == instr_it_block_get_count(it)) return true; return false; } static void adjust_it_instr_for_split(dcontext_t *dcontext, instr_t *it, uint pos) { dr_pred_type_t block_pred[IT_BLOCK_MAX_INSTRS]; uint i, block_count = instr_it_block_get_count(it); byte firstcond[2], mask[2]; DEBUG_DECLARE(bool ok;) ASSERT(pos < instr_it_block_get_count(it) - 1); for (i = 0; i < block_count; i++) block_pred[i] = instr_it_block_get_pred(it, i); DOCHECK(CHKLVL_ASSERTS, { instr_t *instr; for (instr = instr_get_next_app(it), i = 0; instr != NULL; instr = instr_get_next_app(instr)) { ASSERT(instr_is_predicated(instr) && i <= pos); ASSERT(block_pred[i++] == instr_get_predicate(instr)); } }); DEBUG_DECLARE(ok =) instr_it_block_compute_immediates( block_pred[0], (pos > 0) ? block_pred[1] : DR_PRED_NONE, (pos > 1) ? block_pred[2] : DR_PRED_NONE, DR_PRED_NONE, /* at most 3 preds */ &firstcond[0], &mask[0]); ASSERT(ok); DOCHECK(CHKLVL_ASSERTS, { DEBUG_DECLARE(ok =) instr_it_block_compute_immediates( block_pred[pos + 1], (block_count > pos + 2) ? block_pred[pos + 2] : DR_PRED_NONE, (block_count > pos + 3) ? block_pred[pos + 3] : DR_PRED_NONE, DR_PRED_NONE, /* at most 3 preds */ &firstcond[1], &mask[1]); ASSERT(ok); }); /* firstcond should be unchanged */ ASSERT(opnd_get_immed_int(instr_get_src(it, 0)) == firstcond[0]); instr_set_src(it, 1, OPND_CREATE_INT(mask[0])); LOG(THREAD, LOG_INTERP, 3, "ending bb in an IT block & adjusting the IT instruction\n"); /* FIXME i#1669: NYI on passing split it block info to next bb */ ASSERT_NOT_IMPLEMENTED(false); } #endif /* ARM */ static bool bb_process_non_ignorable_syscall(dcontext_t *dcontext, build_bb_t *bb, int sysnum) { BBPRINT(bb, 3, "found non-ignorable system call 0x%04x\n", sysnum); STATS_INC(non_ignorable_syscalls); bb->exit_type |= LINK_NI_SYSCALL; /* destroy the interrupt instruction */ LOG(THREAD, LOG_INTERP, 3, "ending bb at syscall & removing the interrupt itself\n"); /* Indicate that this is a non-ignorable syscall so mangle will remove */ /* FIXME i#1551: maybe we should union int80 and svc as both are inline syscall? */ #ifdef UNIX if (instr_get_opcode(bb->instr) == IF_X86_ELSE(OP_int, OP_svc)) { # if defined(MACOS) && defined(X86) int num = instr_get_interrupt_number(bb->instr); if (num == 0x81 || num == 0x82) { bb->exit_type |= LINK_SPECIAL_EXIT; bb->instr->flags |= INSTR_BRANCH_SPECIAL_EXIT; } else { ASSERT(num == 0x80); # endif /* MACOS && X86 */ bb->exit_type |= LINK_NI_SYSCALL_INT; bb->instr->flags |= INSTR_NI_SYSCALL_INT; # ifdef MACOS } # endif } else #endif bb->instr->flags |= INSTR_NI_SYSCALL; #ifdef ARM /* we assume all conditional syscalls are treated as non-ignorable */ if (instr_is_predicated(bb->instr)) { instr_t *it; uint pos; ASSERT(instr_is_syscall(bb->instr)); bb->svc_pred = instr_get_predicate(bb->instr); if (instr_get_isa_mode(bb->instr) == DR_ISA_ARM_THUMB && !instr_is_last_in_it_block(bb->instr, &it, &pos)) { /* FIXME i#1669: we violate the transparency and clients will see * modified IT instr. We should adjust the IT instr at mangling * stage after client instrumentation, but that is complex. */ adjust_it_instr_for_split(dcontext, it, pos); } } #endif /* Set instr to NULL in order to get translation of exit cti correct. */ bb->instr = NULL; /* this block must be the last one in a trace */ bb->flags |= FRAG_MUST_END_TRACE; return false; /* end bb now */ } /* returns true to indicate "continue bb" and false to indicate "end bb now" */ static inline bool bb_process_syscall(dcontext_t *dcontext, build_bb_t *bb) { int sysnum; /* PR 307284: for simplicity do syscall/int processing post-client. * We give up on inlining but we can still use ignorable/shared syscalls * and trace continuation. */ if (bb->pass_to_client && !bb->post_client) return false; #ifdef DGC_DIAGNOSTICS if (TEST(FRAG_DYNGEN, bb->flags) && !is_dyngen_vsyscall(bb->instr_start)) { LOG(THREAD, LOG_INTERP, 1, "WARNING: syscall @ " PFX " in dyngen code!\n", bb->instr_start); } #endif BBPRINT(bb, 4, "interp: syscall @ " PFX "\n", bb->instr_start); check_syscall_method(dcontext, bb->instr); bb->flags |= FRAG_HAS_SYSCALL; /* if we can identify syscall number and it is an ignorable syscall, * we let bb keep going, else we end bb and flag it */ sysnum = find_syscall_num(dcontext, bb->ilist, bb->instr); #ifdef VMX86_SERVER DOSTATS({ if (instr_get_opcode(bb->instr) == OP_int && instr_get_interrupt_number(bb->instr) == VMKUW_SYSCALL_GATEWAY) { STATS_INC(vmkuw_syscall_sites); LOG(THREAD, LOG_SYSCALLS, 2, "vmkuw system call site: #=%d\n", sysnum); } }); #endif BBPRINT(bb, 3, "syscall # is %d\n", sysnum); if (sysnum != -1 && instrument_filter_syscall(dcontext, sysnum)) { BBPRINT(bb, 3, "client asking to intercept => pretending syscall # %d is -1\n", sysnum); sysnum = -1; } #ifdef ARM if (sysnum != -1 && instr_is_predicated(bb->instr)) { BBPRINT(bb, 3, "conditional system calls cannot be inlined => " "pretending syscall # %d is -1\n", sysnum); sysnum = -1; } #endif if (sysnum != -1 && DYNAMO_OPTION(ignore_syscalls) && ignorable_system_call(sysnum, bb->instr, NULL) #ifdef X86 /* PR 288101: On Linux we do not yet support inlined sysenter instrs as we * do not have in-cache support for the post-sysenter continuation: we rely * for now on very simple sysenter handling where d_r_dispatch uses asynch_target * to know where to go next. */ IF_LINUX(&&instr_get_opcode(bb->instr) != OP_sysenter) #endif /* X86 */ ) { bool continue_bb; if (bb_process_ignorable_syscall(dcontext, bb, sysnum, &continue_bb)) { if (!DYNAMO_OPTION(inline_ignored_syscalls)) continue_bb = false; return continue_bb; } } #ifdef WINDOWS if (sysnum != -1 && DYNAMO_OPTION(shared_syscalls) && optimizable_system_call(sysnum)) { bb_process_shared_syscall(dcontext, bb, sysnum); return false; } #endif /* Fall thru and handle as a non-ignorable syscall. */ return bb_process_non_ignorable_syscall(dcontext, bb, sysnum); } /* Case 3922: for wow64 we treat "call *fs:0xc0" as a system call. * Only sets continue_bb if it returns true. */ static bool bb_process_indcall_syscall(dcontext_t *dcontext, build_bb_t *bb, bool *continue_bb) { ASSERT(continue_bb != NULL); #ifdef WINDOWS if (instr_is_wow64_syscall(bb->instr)) { /* we could check the preceding instrs but we don't bother */ *continue_bb = bb_process_syscall(dcontext, bb); return true; } #endif return false; } /* returns true to indicate "continue bb" and false to indicate "end bb now" */ static inline bool bb_process_interrupt(dcontext_t *dcontext, build_bb_t *bb) { #if defined(DEBUG) || defined(INTERNAL) || defined(WINDOWS) int num = instr_get_interrupt_number(bb->instr); #endif /* PR 307284: for simplicity do syscall/int processing post-client. * We give up on inlining but we can still use ignorable/shared syscalls * and trace continuation. * PR 550752: we cannot end at int 0x2d: we live w/ client consequences */ if (bb->pass_to_client && !bb->post_client IF_WINDOWS(&&num != 0x2d)) return false; BBPRINT(bb, 3, "int 0x%x @ " PFX "\n", num, bb->instr_start); #ifdef WINDOWS if (num == 0x2b) { /* interrupt 0x2B signals return from callback */ /* end block here and come back to dynamo to perform interrupt */ bb->exit_type |= LINK_CALLBACK_RETURN; BBPRINT(bb, 3, "ending bb at cb ret & removing the interrupt itself\n"); /* Set instr to NULL in order to get translation of exit cti * correct. mangle will destroy the instruction */ bb->instr = NULL; bb->flags |= FRAG_MUST_END_TRACE; STATS_INC(num_int2b); return false; } else { SYSLOG_INTERNAL_INFO_ONCE("non-syscall, non-int2b 0x%x @ " PFX " from " PFX, num, bb->instr_start, bb->start_pc); } #endif /* WINDOWS */ return true; } /* If the current instr in the BB is an indirect call that can be converted into a * direct call, process it and return true, else, return false. * FIXME PR 288327: put in linux call* to vsyscall page */ static bool bb_process_convertible_indcall(dcontext_t *dcontext, build_bb_t *bb) { #ifdef X86 /* We perform several levels of checking, each increasingly more stringent * and expensive, with a false return should any fail. */ instr_t *instr; opnd_t src0; instr_t *call_instr; int call_src_reg; app_pc callee; bool vsyscall = false; /* Check if this BB can be extended and the instr is a (near) indirect call */ if (instr_get_opcode(bb->instr) != OP_call_ind) return false; /* Check if we have a "mov <imm> -> %reg; call %reg" or a * "mov <imm> -> %reg; call (%reg)" pair. First check for the call. */ /* The 'if' conditions are broken up to make the code more readable * while #ifdef-ing the WINDOWS case. It's still ugly though. */ instr = bb->instr; if (!( # ifdef WINDOWS /* Match 'call (%xdx)' for a post-SP2 indirect call to sysenter. */ (opnd_is_near_base_disp(instr_get_src(instr, 0)) && opnd_get_base(instr_get_src(instr, 0)) == REG_XDX && opnd_get_disp(instr_get_src(instr, 0)) == 0) || # endif /* Match 'call %reg'. */ opnd_is_reg(instr_get_src(instr, 0)))) return false; /* If there's no CTI in the BB, we can check if there are 5+ preceding * bytes and if they could hold a "mov" instruction. */ if (!TEST(FRAG_HAS_DIRECT_CTI, bb->flags) && bb->instr_start - 5 >= bb->start_pc) { byte opcode = *((byte *)bb->instr_start - 5); /* Check the opcode. Do we see a "mov ... -> %reg"? Valid opcodes are in * the 0xb8-0xbf range (Intel IA-32 ISA ref, v.2) and specify the * destination register, i.e., 0xb8 means that %xax is the destination. */ if (opcode < 0xb8 || opcode > 0xbf) return false; } /* Check the previous instruction -- is it really a "mov"? */ src0 = instr_get_src(instr, 0); call_instr = instr; instr = instr_get_prev_expanded(dcontext, bb->ilist, bb->instr); call_src_reg = opnd_is_near_base_disp(src0) ? opnd_get_base(src0) : opnd_get_reg(src0); if (instr == NULL || instr_get_opcode(instr) != OP_mov_imm || opnd_get_reg(instr_get_dst(instr, 0)) != call_src_reg) return false; /* For the general case, we don't try to optimize a call * thru memory -- just check that the call uses a register. */ callee = NULL; if (opnd_is_reg(src0)) { /* Extract the target address. */ callee = (app_pc)opnd_get_immed_int(instr_get_src(instr, 0)); # ifdef WINDOWS # ifdef PROGRAM_SHEPHERDING /* FIXME - is checking for on vsyscall page better or is checking == to * VSYSCALL_BOOTSTRAP_ADDR? Both are hacky. */ if (is_dyngen_vsyscall((app_pc)opnd_get_immed_int(instr_get_src(instr, 0)))) { LOG(THREAD, LOG_INTERP, 4, "Pre-SP2 style indirect call " "to sysenter found at " PFX "\n", bb->instr_start); STATS_INC(num_sysenter_indcalls); vsyscall = true; ASSERT(opnd_get_immed_int(instr_get_src(instr, 0)) == (ptr_int_t)VSYSCALL_BOOTSTRAP_ADDR); ASSERT(!use_ki_syscall_routines()); /* double check our determination */ } else # endif # endif STATS_INC(num_convertible_indcalls); } # ifdef WINDOWS /* Match the "call (%xdx)" to sysenter case for SP2-patched os's. Memory at * address VSYSCALL_BOOTSTRAP_ADDR (0x7ffe0300) holds the address of * KiFastSystemCall or (FIXME - not handled) on older platforms KiIntSystemCall. * FIXME It's unsavory to hard-code 0x7ffe0300, but the constant has little * context in an SP2 os. It's a hold-over from pre-SP2. */ else if (get_syscall_method() == SYSCALL_METHOD_SYSENTER && call_src_reg == REG_XDX && opnd_get_immed_int(instr_get_src(instr, 0)) == (ptr_int_t)VSYSCALL_BOOTSTRAP_ADDR) { /* Extract the target address. We expect that the memory read using the * value in the immediate field is ok as it's the vsyscall page * which 1) cannot be made unreadable and 2) cannot be made writable so * the stored value will not change. Of course, it's possible that the * os could change the page contents. */ callee = (app_pc) * ((ptr_uint_t *)opnd_get_immed_int(instr_get_src(instr, 0))); if (get_app_sysenter_addr() == NULL) { /* For the first call* we've yet to decode an app syscall, yet we * cannot have later recreations have differing behavior, so we must * handle that case (even though it doesn't matter performance-wise * as the first call* is usually in runtime init code that's * executed once). So we do a raw byte compare to: * ntdll!KiFastSystemCall: * 7c82ed50 8bd4 mov xdx,xsp * 7c82ed52 0f34 sysenter */ uint raw; if (!d_r_safe_read(callee, sizeof(raw), &raw) || raw != 0x340fd48b) callee = NULL; } else { /* The callee should be a 2 byte "mov %xsp -> %xdx" followed by the * sysenter -- check the sysenter's address as 2 bytes past the callee. */ if (callee + 2 != get_app_sysenter_addr()) callee = NULL; } vsyscall = (callee != NULL); ASSERT(use_ki_syscall_routines()); /* double check our determination */ DODEBUG({ if (callee == NULL) ASSERT_CURIOSITY(false && "call* to vsyscall unexpected mismatch"); else { LOG(THREAD, LOG_INTERP, 4, "Post-SP2 style indirect call " "to sysenter found at " PFX "\n", bb->instr_start); STATS_INC(num_sysenter_indcalls); } }); } # endif /* Check if register dataflow matched and we were able to extract * the callee address. */ if (callee == NULL) return false; if (vsyscall) { /* Case 8917: abandon coarse-grainness in favor of performance */ bb->flags &= ~FRAG_COARSE_GRAIN; STATS_INC(coarse_prevent_indcall); } LOG(THREAD, LOG_INTERP, 4, "interp: possible convertible" " indirect call from " PFX " to " PFX "\n", bb->instr_start, callee); if (leave_call_native(callee) || must_not_be_entered(callee)) { BBPRINT(bb, 3, " NOT inlining indirect call to " PFX "\n", callee); /* Case 8711: coarse-grain can't handle non-exit cti */ bb->flags &= ~FRAG_COARSE_GRAIN; STATS_INC(coarse_prevent_cti); ASSERT_CURIOSITY_ONCE(!vsyscall && "leaving call* to vsyscall"); /* no need for bb_add_native_direct_xfer() b/c it's already indirect */ return true; /* keep bb going, w/o inlining call */ } if (bb->follow_direct && !must_not_be_entered(callee) && bb->num_elide_call < DYNAMO_OPTION(max_elide_call) && (DYNAMO_OPTION(elide_back_calls) || bb->cur_pc <= callee)) { /* FIXME This is identical to the code for evaluating a * direct call's callee. If such code appears in another * (3rd) place, we should outline it. * FIXME: use follow_direct_call() */ if (vsyscall) { /* As a flag to allow our xfer from now-non-coarse to coarse * (for vsyscall-in-ntdll) we pre-emptively mark as has-syscall. */ ASSERT(!TEST(FRAG_HAS_SYSCALL, bb->flags)); bb->flags |= FRAG_HAS_SYSCALL; } if (check_new_page_jmp(dcontext, bb, callee)) { if (vsyscall) /* Restore */ bb->flags &= ~FRAG_HAS_SYSCALL; bb->num_elide_call++; STATS_INC(total_elided_calls); STATS_TRACK_MAX(max_elided_calls, bb->num_elide_call); bb->cur_pc = callee; /* FIXME: when using follow_direct_call don't forget to set this */ call_instr->flags |= INSTR_IND_CALL_DIRECT; BBPRINT(bb, 4, " continuing in callee at " PFX "\n", bb->cur_pc); return true; /* keep bb going */ } if (vsyscall) { /* Case 8917: Restore, just in case, though we certainly expect to have * this flag set as soon as we decode a few more instrs and hit the * syscall itself -- but for pre-sp2 we currently could be elsewhere on * the same page, so let's be safe here. */ bb->flags &= ~FRAG_HAS_SYSCALL; } } /* FIXME: we're also not converting to a direct call - was this intended? */ BBPRINT(bb, 3, " NOT following indirect call from " PFX " to " PFX "\n", bb->instr_start, callee); DODEBUG({ if (vsyscall) { DO_ONCE({ /* Case 9095: don't complain so loudly if user asked for no elision */ if (DYNAMO_OPTION(max_elide_call) <= 2) SYSLOG_INTERNAL_WARNING("leaving call* to vsyscall"); else ASSERT_CURIOSITY(false && "leaving call* to vsyscall"); }); } }); ; #elif defined(ARM) /* FIXME i#1551: NYI on ARM */ ASSERT_NOT_IMPLEMENTED(false); #endif /* X86 */ return false; /* stop bb */ } /* FIXME i#1668, i#2974: NYI on ARM/AArch64 */ #ifdef X86 /* if we make the IAT sections unreadable we will need to map to proper location */ static inline app_pc read_from_IAT(app_pc iat_reference) { /* FIXME: we should have looked up where the real IAT should be at * the time of checking whether is_in_IAT */ return *(app_pc *)iat_reference; } /* returns whether target is an IAT of a module that we convert. Note * users still have to check the referred to value to verify targeting * a native module. */ static bool is_targeting_convertible_IAT(dcontext_t *dcontext, instr_t *instr, app_pc *iat_reference /* OUT */) { /* FIXME: we could give up on optimizing a particular module, * if too many writes to its IAT are found, * even 1 may be too much to handle! */ /* We only allow constant address, * any registers used for effective address calculation * can not be guaranteed to be constant dynamically. */ /* FIXME: yet a 'call %reg' if that value is an export would be a * good sign that we should go backwards and look for a possible * mov IAT[func] -> %reg and then optimize that as well - case 1948 */ app_pc memory_reference = NULL; opnd_t opnd = instr_get_target(instr); LOG(THREAD, LOG_INTERP, 4, "is_targeting_convertible_IAT: "); /* A typical example of a proper call * ff 15 8810807c call dword ptr [kernel32+0x1088 (7c801088)] * where * [7c801088] = 7c90f04c ntdll!RtlAnsiStringToUnicodeString * * The ModR/M byte for a displacement only with no SIB should be * 15 for CALL, 25 for JMP, (no far versions for IAT) */ if (opnd_is_near_base_disp(opnd)) { /* FIXME PR 253930: pattern-match x64 IAT calls */ IF_X64(ASSERT_NOT_IMPLEMENTED(false)); memory_reference = (app_pc)(ptr_uint_t)opnd_get_disp(opnd); /* now should check all other fields */ if (opnd_get_base(opnd) != REG_NULL || opnd_get_index(opnd) != REG_NULL) { /* this is not a pure memory reference, can't be IAT */ return false; } ASSERT(opnd_get_scale(opnd) == 0); } else { return false; } LOG(THREAD, LOG_INTERP, 3, "is_targeting_convertible_IAT: memory_reference " PFX "\n", memory_reference); /* FIXME: if we'd need some more additional structures those can * be looked up in a separate hashtable based on the IAT base, or * we'd have to extend the vmareas with custom fields */ ASSERT(DYNAMO_OPTION(IAT_convert)); if (vmvector_overlap(IAT_areas, memory_reference, memory_reference + 1)) { /* IAT has to be in the same module as current instruction, * but even in the unlikely reference by address from another * module there is really no problem, so not worth checking */ ASSERT_CURIOSITY(get_module_base(instr->bytes) == get_module_base(memory_reference)); /* FIXME: now that we know it is in IAT/GOT, * we have to READ the contents and return that * safely to the caller so they can convert accordingly */ /* FIXME: we would want to add the IAT section to the vmareas * of a region that has a converted block. Then on a write to * IAT we can flush efficiently only blocks affected by a * particular module, for a first hack though flushing * everything on a hooker will do. */ *iat_reference = memory_reference; return true; } else { /* plain global function * e.g. ntdll!RtlUnicodeStringToAnsiString+0x4c: * ff15c009917c call dword ptr [ntdll!RtlAllocateStringRoutine (7c9109c0)] */ return false; } } #endif /* X86 */ /* If the current instr in the BB is an indirect call through IAT that * can be converted into a direct call, process it and return true, * else, return false. */ static bool bb_process_IAT_convertible_indjmp(dcontext_t *dcontext, build_bb_t *bb, bool *elide_continue) { #ifdef X86 app_pc iat_reference; app_pc target; ASSERT(DYNAMO_OPTION(IAT_convert)); /* Check if the instr is a (near) indirect jump */ if (instr_get_opcode(bb->instr) != OP_jmp_ind) { ASSERT_CURIOSITY(false && "far ind jump"); return false; /* not matching, stop bb */ } if (!is_targeting_convertible_IAT(dcontext, bb->instr, &iat_reference)) { DOSTATS({ if (EXIT_IS_IND_JMP_PLT(bb->exit_type)) { /* see how often we mark as likely a PLT a JMP which in * fact is not going through IAT */ STATS_INC(num_indirect_jumps_PLT_not_IAT); LOG(THREAD, LOG_INTERP, 3, "bb_process_IAT_convertible_indjmp: indirect jmp not PLT instr=" PFX "\n", bb->instr->bytes); } }); return false; /* not matching, stop bb */ } target = read_from_IAT(iat_reference); DOLOG(4, LOG_INTERP, { char name[MAXIMUM_SYMBOL_LENGTH]; print_symbolic_address(target, name, sizeof(name), false); LOG(THREAD, LOG_INTERP, 4, "bb_process_IAT_convertible_indjmp: target=" PFX " %s\n", target, name); }); STATS_INC(num_indirect_jumps_IAT); DOSTATS({ if (!EXIT_IS_IND_JMP_PLT(bb->exit_type)) { /* count any other known uses for an indirect jump to go * through the IAT other than PLT uses, although a block * reaching max_elide_call would prevent the above * match */ STATS_INC(num_indirect_jumps_IAT_not_PLT); /* FIXME: case 6459 for further inquiry */ LOG(THREAD, LOG_INTERP, 4, "bb_process_IAT_convertible_indjmp: indirect jmp not PLT target=" PFX "\n", target); } }); if (must_not_be_elided(target)) { ASSERT_NOT_TESTED(); BBPRINT(bb, 3, " NOT inlining indirect jmp to must_not_be_elided " PFX "\n", target); return false; /* do not convert indirect jump, will stop bb */ } /* Verify not targeting native exec DLLs, note that the IATs of * any module may have imported a native DLL. Note it may be * possible to optimize with a range check on IAT subregions, but * this check isn't much slower. */ /* IAT_elide should definitely not touch native_exec modules. * * FIXME: we also prevent IAT_convert from optimizing imports in * native_exec_list DLLs, although we could let that convert to a * direct jump and require native_exec_dircalls to be always on to * intercept those jmps. */ if (DYNAMO_OPTION(native_exec) && is_native_pc(target)) { BBPRINT(bb, 3, " NOT inlining indirect jump to native exec module " PFX "\n", target); STATS_INC(num_indirect_jumps_IAT_native); return false; /* do not convert indirect jump, stop bb */ } /* mangle mostly as such as direct jumps would be mangled in * bb_process_ubr(dcontext, bb) but note bb->instr has already * been appended so has to reverse some of its actions */ /* pretend never saw an indirect JMP, we'll either add a new direct JMP or we'll just continue in target */ instrlist_remove(bb->ilist, bb->instr); /* bb->instr has been appended already */ instr_destroy(dcontext, bb->instr); bb->instr = NULL; if (DYNAMO_OPTION(IAT_elide)) { /* try to elide just as a direct jmp would have been elided */ /* We could have used follow_direct_call instead since * commonly this really is a disguised CALL*. Yet for PLT use * of the form of CALL PLT[foo]; JMP* IAT[foo] we would have * already counted the CALL. If we have tail call elimination * that converts a CALL* into a JMP* it is also OK to treat as * a JMP instead of a CALL just as if sharing tails. */ if (follow_direct_jump(dcontext, bb, target)) { LOG(THREAD, LOG_INTERP, 4, "bb_process_IAT_convertible_indjmp: eliding jmp* target=" PFX "\n", target); STATS_INC(num_indirect_jumps_IAT_elided); *elide_continue = true; /* do not stop bb */ return true; /* converted indirect to direct */ } } /* otherwise convert to direct jump without eliding */ /* we set bb->instr to NULL so unlike bb_process_ubr * we get the final exit_target added by build_bb_ilist * FIXME: case 85: which will work only when we're using bb->mangle_ilist * FIXME: what are callers supposed to see when we do NOT mangle? */ LOG(THREAD, LOG_INTERP, 4, "bb_process_IAT_convertible_indjmp: converting jmp* target=" PFX "\n", target); STATS_INC(num_indirect_jumps_IAT_converted); /* end basic block with a direct JMP to target */ bb->exit_target = target; *elide_continue = false; /* matching, but should stop bb */ return true; /* matching */ #elif defined(AARCHXX) /* FIXME i#1551, i#1569: NYI on ARM/AArch64 */ ASSERT_NOT_IMPLEMENTED(false); return false; #endif /* X86/ARM */ } /* Returns true if the current instr in the BB is an indirect call * through IAT that can be converted into a direct call, process it * and sets elide_continue. Otherwise function return false. * OUT elide_continue is set when bb building should continue in target, * and not set when bb building should be stopped. */ static bool bb_process_IAT_convertible_indcall(dcontext_t *dcontext, build_bb_t *bb, bool *elide_continue) { #ifdef X86 app_pc iat_reference; app_pc target; ASSERT(DYNAMO_OPTION(IAT_convert)); /* FIXME: the code structure is the same as * bb_process_IAT_convertible_indjmp, could fuse the two */ /* We perform several levels of checking, each increasingly more stringent * and expensive, with a false return should any fail. */ /* Check if the instr is a (near) indirect call */ if (instr_get_opcode(bb->instr) != OP_call_ind) { ASSERT_CURIOSITY(false && "far call"); return false; /* not matching, stop bb */ } if (!is_targeting_convertible_IAT(dcontext, bb->instr, &iat_reference)) { return false; /* not matching, stop bb */ } target = read_from_IAT(iat_reference); DOLOG(4, LOG_INTERP, { char name[MAXIMUM_SYMBOL_LENGTH]; print_symbolic_address(target, name, sizeof(name), false); LOG(THREAD, LOG_INTERP, 4, "bb_process_IAT_convertible_indcall: target=" PFX " %s\n", target, name); }); STATS_INC(num_indirect_calls_IAT); /* mangle mostly as such as direct calls are mangled with * bb_process_call_direct(dcontext, bb) */ if (leave_call_native(target) || must_not_be_entered(target)) { ASSERT_NOT_TESTED(); BBPRINT(bb, 3, " NOT inlining indirect call to leave_call_native " PFX "\n", target); return false; /* do not convert indirect call, stop bb */ } /* Verify not targeting native exec DLLs, note that the IATs of * any module may have imported a native DLL. Note it may be * possible to optimize with a range check on IAT subregions, but * this check isn't much slower. */ if (DYNAMO_OPTION(native_exec) && is_native_pc(target)) { BBPRINT(bb, 3, " NOT inlining indirect call to native exec module " PFX "\n", target); STATS_INC(num_indirect_calls_IAT_native); return false; /* do not convert indirect call, stop bb */ } /* mangle_indirect_call and calculate return address as of * bb->instr and will remove bb->instr * FIXME: it would have been * better to replace in instrlist with a direct call and have * mangle_{in,}direct_call use other than the raw bytes, but this for now does the * job. */ bb->instr->flags |= INSTR_IND_CALL_DIRECT; if (DYNAMO_OPTION(IAT_elide)) { /* try to elide just as a direct call would have been elided */ if (follow_direct_call(dcontext, bb, target)) { LOG(THREAD, LOG_INTERP, 4, "bb_process_IAT_convertible_indcall: eliding call* flags=0x%08x " "target=" PFX "\n", bb->instr->flags, target); STATS_INC(num_indirect_calls_IAT_elided); *elide_continue = true; /* do not stop bb */ return true; /* converted indirect to direct */ } } /* otherwise convert to direct call without eliding */ LOG(THREAD, LOG_INTERP, 4, "bb_process_IAT_convertible_indcall: converting call* flags=0x%08x target=" PFX "\n", bb->instr->flags, target); STATS_INC(num_indirect_calls_IAT_converted); /* bb->instr has been appended already, and will get removed by * mangle_indirect_call. We don't need to set to NULL, since this * instr is a CTI and the final jump's translation target should * still be the original indirect call. */ bb->exit_target = target; /* end basic block with a direct CALL to target. With default * options it should get mangled to a PUSH; JMP */ *elide_continue = false; /* matching, but should stop bb */ return true; /* converted indirect to direct */ #elif defined(AARCHXX) /* FIXME i#1551, i#1569: NYI on ARM/AArch64 */ ASSERT_NOT_IMPLEMENTED(false); return false; #endif /* X86/ARM */ } /* Called on instructions that save the FPU state */ static void bb_process_float_pc(dcontext_t *dcontext, build_bb_t *bb) { /* i#698: for instructions that save the floating-point state * (e.g., fxsave), we go back to d_r_dispatch to translate the fp pc. * We rule out being in a trace (and thus a potential alternative * would be to use a FRAG_ flag). These are rare instructions so that * shouldn't have a significant perf impact: except we've been hitting * libm code that uses fnstenv and is not rare, so we have non-inlined * translation under an option for now. */ if (DYNAMO_OPTION(translate_fpu_pc)) { bb->exit_type |= LINK_SPECIAL_EXIT; bb->flags |= FRAG_CANNOT_BE_TRACE; } /* If we inline the pc update, we can't persist. Simplest to keep fine-grained. */ bb->flags &= ~FRAG_COARSE_GRAIN; } static bool instr_will_be_exit_cti(instr_t *inst) { /* can't use instr_is_exit_cti() on pre-mangled instrs */ return (instr_is_app(inst) && instr_is_cti(inst) && (!instr_is_near_call_direct(inst) || !leave_call_native(instr_get_branch_target_pc(inst))) /* PR 239470: ignore wow64 syscall, which is an ind call */ IF_WINDOWS(&&!instr_is_wow64_syscall(inst))); } /* PR 215217: check syscall restrictions */ static bool client_check_syscall(instrlist_t *ilist, instr_t *inst, bool *found_syscall, bool *found_int) { int op_int = IF_X86_ELSE(OP_int, OP_svc); /* We do consider the wow64 call* a syscall here (it is both * a syscall and a call*: PR 240258). */ if (instr_is_syscall(inst) || instr_get_opcode(inst) == op_int) { if (instr_is_syscall(inst) && found_syscall != NULL) *found_syscall = true; /* Xref PR 313869 - we should be ignoring int 3 here. */ if (instr_get_opcode(inst) == op_int && found_int != NULL) *found_int = true; /* For linux an ignorable syscall is not a problem. Our * pre-syscall-exit jmp is added post client mangling so should * be robust. * FIXME: now that we have -no_inline_ignored_syscalls should * we assert on ignorable also? Probably we'd have to have * an exception for the middle of a trace? */ if (IF_UNIX(TEST(INSTR_NI_SYSCALL, inst->flags)) /* PR 243391: only block-ending interrupt 2b matters */ IF_WINDOWS(instr_is_syscall(inst) || ((instr_get_opcode(inst) == OP_int && instr_get_interrupt_number(inst) == 0x2b)))) { /* This check means we shouldn't hit the exit_type flags * check below but we leave it in place in case we add * other flags in future */ if (inst != instrlist_last(ilist)) { CLIENT_ASSERT(false, "a syscall or interrupt must terminate the block"); return false; } /* should we forcibly delete the subsequent instrs? * or the client has to deal w/ bad behavior in release build? */ } } return true; } /* Pass bb to client, and afterward check for criteria we require and rescan for * eflags and other flags that might have changed. * Returns true normally; returns false to indicate "go native". */ static bool client_process_bb(dcontext_t *dcontext, build_bb_t *bb) { dr_emit_flags_t emitflags = DR_EMIT_DEFAULT; instr_t *inst; bool found_exit_cti = false; bool found_syscall = false; bool found_int = false; #ifdef ANNOTATIONS app_pc trailing_annotation_pc = NULL, instrumentation_pc = NULL; bool found_instrumentation_pc = false; instr_t *annotation_label = NULL; #endif instr_t *last_app_instr = NULL; /* This routine is called by more than just bb builder, also used * for recreating state, so only call if caller requested it * (usually that coincides w/ bb->app_interp being set, but not * when recreating state on a fault (PR 214962)). * FIXME: hot patches shouldn't be injected during state recreations; * does predicating on bb->app_interp take care of this issue? */ if (!bb->pass_to_client) return true; /* i#995: DR may build a bb with one invalid instruction, which won't be * passed to cliennt. * FIXME: i#1000, we should present the bb to the client. * i#1000-c#1: the bb->ilist could be empty. */ if (instrlist_first(bb->ilist) == NULL) return true; if (!instr_opcode_valid(instrlist_first(bb->ilist)) && /* For -fast_client_decode we can have level 0 instrs so check * to ensure this is a single-instr bb that was built just to * raise the fault for us. * XXX i#1000: shouldn't we pass this to the client? It might not handle an * invalid instr properly though. */ instrlist_first(bb->ilist) == instrlist_last(bb->ilist)) { return true; } /* Call the bb creation callback(s) */ if (!instrument_basic_block(dcontext, /* DrMem#1735: pass app pc, not selfmod copy pc */ (bb->pretend_pc == NULL ? bb->start_pc : bb->pretend_pc), bb->ilist, bb->for_trace, !bb->app_interp, &emitflags)) { /* although no callback was called we must process syscalls/ints (PR 307284) */ } if (bb->for_cache && TEST(DR_EMIT_GO_NATIVE, emitflags)) { LOG(THREAD, LOG_INTERP, 2, "client requested that we go native\n"); SYSLOG_INTERNAL_INFO("thread " TIDFMT " is going native at client request", d_r_get_thread_id()); /* we leverage the existing native_exec mechanism */ dcontext->native_exec_postsyscall = bb->start_pc; dcontext->next_tag = BACK_TO_NATIVE_AFTER_SYSCALL; /* dynamo_thread_not_under_dynamo() will be called in dispatch_enter_native(). */ return false; } bb->post_client = true; /* FIXME: instrumentor may totally mess us up -- our flags * or syscall info might be wrong. xref PR 215217 */ /* PR 215217, PR 240265: * We need to check for client changes that require a new exit * target. We can't practically analyze the instrlist to decipher * the exit, so we'll search backwards and require that the last * cti is the exit cti. Typically, the last instruction in the * block should be the exit. Post-mbr and post-syscall positions * are particularly fragile, as our mangling code sets state up for * the exit that could be messed up by instrs inserted after the * mbr/syscall. We thus disallow such instrs (except for * dr_insert_mbr_instrumentation()). xref cases 10503, 10782, 10784 * * Here's what we support: * - more than one exit cti; all but the last must be a ubr * - an exit cbr or call must be the final instr in the block * - only one mbr; must be the final instr in the block and the exit target * - clients can't change the exit of blocks ending in a syscall * (or int), and the syscall must be the final instr in the block; * client can, however, remove the syscall and then add a different exit * - client can't add a translation target that's outside of the original * source code bounds, or else our cache consistency breaks down * (the one exception to this is that a jump can translate to its target) */ /* we set to NULL to have a default of fall-through */ bb->exit_target = NULL; bb->exit_type = 0; /* N.B.: we're walking backward */ for (inst = instrlist_last(bb->ilist); inst != NULL; inst = instr_get_prev(inst)) { if (!instr_opcode_valid(inst)) continue; if (instr_is_cti(inst) && inst != instrlist_last(bb->ilist)) { /* PR 213005: coarse_units can't handle added ctis (meta or not) * since decode_fragment(), used for state recreation, can't * distinguish from exit cti. * i#665: we now support intra-fragment meta ctis * to make persistence usable for clients */ if (!opnd_is_instr(instr_get_target(inst)) || instr_is_app(inst)) { bb->flags &= ~FRAG_COARSE_GRAIN; STATS_INC(coarse_prevent_client); } } if (instr_is_meta(inst)) { #ifdef ANNOTATIONS /* Save the trailing_annotation_pc in case a client truncated the bb there. */ if (is_annotation_label(inst) && last_app_instr == NULL) { dr_instr_label_data_t *label_data = instr_get_label_data_area(inst); trailing_annotation_pc = GET_ANNOTATION_APP_PC(label_data); instrumentation_pc = GET_ANNOTATION_INSTRUMENTATION_PC(label_data); annotation_label = inst; } #endif continue; } #ifdef X86 if (!d_r_is_avx512_code_in_use()) { if (ZMM_ENABLED()) { if (instr_may_write_zmm_or_opmask_register(inst)) { LOG(THREAD, LOG_INTERP, 2, "Detected AVX-512 code in use\n"); d_r_set_avx512_code_in_use(true, NULL); proc_set_num_simd_saved(MCXT_NUM_SIMD_SLOTS); } } } #endif #ifdef ANNOTATIONS if (instrumentation_pc != NULL && !found_instrumentation_pc && instr_get_translation(inst) == instrumentation_pc) found_instrumentation_pc = true; #endif /* in case bb was truncated, find last non-meta fall-through */ if (last_app_instr == NULL) last_app_instr = inst; /* PR 215217: client should not add new source code regions, else our * cache consistency (both page prot and selfmod) will fail */ ASSERT(!bb->for_cache || bb->vmlist != NULL); /* For selfmod recreation we don't check vmareas so we don't have vmlist. * We live w/o the checks there. */ CLIENT_ASSERT( !bb->for_cache || vm_list_overlaps(dcontext, bb->vmlist, instr_get_translation(inst), instr_get_translation(inst) + 1) || (instr_is_ubr(inst) && opnd_is_pc(instr_get_target(inst)) && instr_get_translation(inst) == opnd_get_pc(instr_get_target(inst))) /* the displaced code and jmp return from intercept buffer * has translation fields set to hooked app routine */ IF_WINDOWS(|| dr_fragment_app_pc(bb->start_pc) != bb->start_pc), "block's app sources (instr_set_translation() targets) " "must remain within original bounds"); #ifdef AARCH64 if (instr_get_opcode(inst) == OP_isb) { CLIENT_ASSERT(inst == instrlist_last(bb->ilist), "OP_isb must be last instruction in block"); } #endif /* PR 307284: we didn't process syscalls and ints pre-client * so do so now to get bb->flags and bb->exit_type set */ if (instr_is_syscall(inst) || instr_get_opcode(inst) == IF_X86_ELSE(OP_int, OP_svc)) { instr_t *tmp = bb->instr; bb->instr = inst; if (instr_is_syscall(bb->instr)) bb_process_syscall(dcontext, bb); else if (instr_get_opcode(bb->instr) == IF_X86_ELSE(OP_int, OP_svc)) { /* non-syscall int */ bb_process_interrupt(dcontext, bb); } if (inst != instrlist_last(bb->ilist)) bb->instr = tmp; } /* ensure syscall/int2b terminates block */ client_check_syscall(bb->ilist, inst, &found_syscall, &found_int); if (instr_will_be_exit_cti(inst)) { if (!found_exit_cti) { /* We're about to clobber the exit_type and could lose any * special flags set above, even if the client doesn't change * the exit target. We undo such flags after this ilist walk * to support client removal of syscalls/ints. * EXIT_IS_IND_JMP_PLT() is used for -IAT_{convert,elide}, which * is off by default for CI; it's also used for native_exec, * but we're not sure if we want to support that with CI. * xref case 10846 and i#198 */ CLIENT_ASSERT( !TEST(~(LINK_DIRECT | LINK_INDIRECT | LINK_CALL | LINK_RETURN | LINK_JMP | LINK_NI_SYSCALL_ALL | LINK_SPECIAL_EXIT IF_WINDOWS(| LINK_CALLBACK_RETURN)), bb->exit_type) && !EXIT_IS_IND_JMP_PLT(bb->exit_type), "client unsupported block exit type internal error"); found_exit_cti = true; bb->instr = inst; if ((instr_is_near_ubr(inst) || instr_is_near_call_direct(inst)) /* conditional OP_bl needs the cbr code below */ IF_ARM(&&!instr_is_cbr(inst))) { CLIENT_ASSERT(instr_is_near_ubr(inst) || inst == instrlist_last(bb->ilist) || /* for elision we assume calls are followed * by their callee target code */ DYNAMO_OPTION(max_elide_call) > 0, "an exit call must terminate the block"); /* a ubr need not be the final instr */ if (inst == last_app_instr) { bb->exit_target = instr_get_branch_target_pc(inst); bb->exit_type = instr_branch_type(inst); } } else if (instr_is_mbr(inst) || instr_is_far_cti(inst) IF_ARM(/* mode-switch direct is treated as indirect */ || instr_get_opcode(inst) == OP_blx)) { CLIENT_ASSERT(inst == instrlist_last(bb->ilist), "an exit mbr or far cti must terminate the block"); bb->exit_type = instr_branch_type(inst); #ifdef ARM if (instr_get_opcode(inst) == OP_blx) bb->ibl_branch_type = IBL_INDCALL; else #endif bb->ibl_branch_type = get_ibl_branch_type(inst); bb->exit_target = get_ibl_routine(dcontext, get_ibl_entry_type(bb->exit_type), DEFAULT_IBL_BB(), bb->ibl_branch_type); } else { ASSERT(instr_is_cbr(inst)); CLIENT_ASSERT(inst == instrlist_last(bb->ilist), "an exit cbr must terminate the block"); /* A null exit target specifies a cbr (see below). */ bb->exit_target = NULL; bb->exit_type = 0; instr_exit_branch_set_type(bb->instr, instr_branch_type(inst)); } /* since we're walking backward, at the first exit cti * we can check for post-cti code */ if (inst != instrlist_last(bb->ilist)) { if (TEST(FRAG_COARSE_GRAIN, bb->flags)) { /* PR 213005: coarse can't handle code beyond ctis */ bb->flags &= ~FRAG_COARSE_GRAIN; STATS_INC(coarse_prevent_client); } /* decode_fragment can't handle code beyond ctis */ if (!instr_is_near_call_direct(inst) || DYNAMO_OPTION(max_elide_call) == 0) bb->flags |= FRAG_CANNOT_BE_TRACE; } } /* Case 10784: Clients can confound trace building when they * introduce more than one exit cti; we'll just disable traces * for these fragments. */ else { CLIENT_ASSERT(instr_is_near_ubr(inst) || (instr_is_near_call_direct(inst) && /* for elision we assume calls are followed * by their callee target code */ DYNAMO_OPTION(max_elide_call) > 0), "a second exit cti must be a ubr"); if (!instr_is_near_call_direct(inst) || DYNAMO_OPTION(max_elide_call) == 0) bb->flags |= FRAG_CANNOT_BE_TRACE; /* our cti check above should have already turned off coarse */ ASSERT(!TEST(FRAG_COARSE_GRAIN, bb->flags)); } } } /* To handle the client modifying syscall numbers we cannot inline * syscalls in the middle of a bb. */ ASSERT(!DYNAMO_OPTION(inline_ignored_syscalls)); ASSERT((TEST(FRAG_HAS_SYSCALL, bb->flags) && found_syscall) || (!TEST(FRAG_HAS_SYSCALL, bb->flags) && !found_syscall)); IF_WINDOWS(ASSERT(!TEST(LINK_CALLBACK_RETURN, bb->exit_type) || found_int)); /* Note that we do NOT remove, or set, FRAG_HAS_DIRECT_CTI based on * client modifications: setting it for a selfmod fragment could * result in an infinite loop, and it is mainly used for elision, which we * are not doing for client ctis. Clients are not supposed add new * app source regions (PR 215217). */ /* Client might have truncated: re-set fall-through, accounting for annotations. */ if (last_app_instr != NULL) { bool adjusted_cur_pc = false; app_pc xl8 = instr_get_translation(last_app_instr); #ifdef ANNOTATIONS if (annotation_label != NULL) { if (found_instrumentation_pc) { /* i#1613: if the last app instruction precedes an annotation, extend the * translation footprint of `bb` to include the annotation (such that * the next bb starts after the annotation, avoiding duplication). */ bb->cur_pc = trailing_annotation_pc; adjusted_cur_pc = true; LOG(THREAD, LOG_INTERP, 3, "BB ends immediately prior to an annotation. " "Setting `bb->cur_pc` (for fall-through) to " PFX " so that the " "annotation will be included.\n", bb->cur_pc); } else { /* i#1613: the client removed the app instruction prior to an annotation. * We infer that the client wants to skip the annotation. Remove it now. */ instr_t *annotation_next = instr_get_next(annotation_label); instrlist_remove(bb->ilist, annotation_label); instr_destroy(dcontext, annotation_label); if (is_annotation_return_placeholder(annotation_next)) { instrlist_remove(bb->ilist, annotation_next); instr_destroy(dcontext, annotation_next); } } } #endif #if defined(WINDOWS) && !defined(STANDALONE_DECODER) /* i#1632: if the last app instruction was taken from an intercept because it was * occluded by the corresponding hook, `bb->cur_pc` should point to the original * app pc (where that instruction was copied from). Cannot use `decode_next_pc()` * on the original app pc because it is now in the middle of the hook. */ if (!adjusted_cur_pc && could_be_hook_occluded_pc(xl8)) { app_pc intercept_pc = get_intercept_pc_from_app_pc( xl8, true /* occlusions only */, false /* exclude start */); if (intercept_pc != NULL) { app_pc next_intercept_pc = decode_next_pc(dcontext, intercept_pc); bb->cur_pc = xl8 + (next_intercept_pc - intercept_pc); adjusted_cur_pc = true; LOG(THREAD, LOG_INTERP, 3, "BB ends in the middle of an intercept. " "Offsetting `bb->cur_pc` (for fall-through) to " PFX " in parallel " "to intercept instr at " PFX "\n", intercept_pc, bb->cur_pc); } } #endif /* We do not take instr_length of what the client put in, but rather * the length of the translation target */ if (!adjusted_cur_pc) { bb->cur_pc = decode_next_pc(dcontext, xl8); LOG(THREAD, LOG_INTERP, 3, "setting cur_pc (for fall-through) to " PFX "\n", bb->cur_pc); } /* don't set bb->instr if last instr is still syscall/int. * FIXME: I'm not 100% convinced the logic here covers everything * build_bb_ilist does. * FIXME: what about if last instr was invalid, or if client adds * some invalid instrs: xref bb_process_invalid_instr() */ if (bb->instr != NULL || (!found_int && !found_syscall)) bb->instr = last_app_instr; } else bb->instr = NULL; /* no app instrs left */ /* PR 215217: re-scan for accurate eflags. * FIXME: should we not do eflags tracking while decoding, then, and always * do it afterward? */ /* for -fast_client_decode, we don't support the client changing the app code */ if (!INTERNAL_OPTION(fast_client_decode)) { bb->eflags = forward_eflags_analysis(dcontext, bb->ilist, instrlist_first(bb->ilist)); } if (TEST(DR_EMIT_STORE_TRANSLATIONS, emitflags)) { /* PR 214962: let client request storage instead of recreation */ bb->flags |= FRAG_HAS_TRANSLATION_INFO; /* if we didn't have record on from start, can't store translation info */ CLIENT_ASSERT(!INTERNAL_OPTION(fast_client_decode), "-fast_client_decode not compatible with " "DR_EMIT_STORE_TRANSLATIONS"); ASSERT(bb->record_translation && bb->full_decode); } if (DYNAMO_OPTION(coarse_enable_freeze)) { /* If we're not persisting, ignore the presence or absence of the flag * so we avoid undoing savings from -opt_memory with a tool that * doesn't support persistence. */ if (!TEST(DR_EMIT_PERSISTABLE, emitflags)) { bb->flags &= ~FRAG_COARSE_GRAIN; STATS_INC(coarse_prevent_client); } } if (TEST(DR_EMIT_MUST_END_TRACE, emitflags)) { /* i#848: let client terminate traces */ bb->flags |= FRAG_MUST_END_TRACE; } return true; } #ifdef DR_APP_EXPORTS static void mangle_pre_client(dcontext_t *dcontext, build_bb_t *bb) { if (bb->start_pc == (app_pc)dr_app_running_under_dynamorio) { /* i#1237: set return value to be true in dr_app_running_under_dynamorio */ instr_t *ret = instrlist_last(bb->ilist); instr_t *mov = instr_get_prev(ret); LOG(THREAD, LOG_INTERP, 3, "Found dr_app_running_under_dynamorio\n"); ASSERT(ret != NULL && instr_is_return(ret) && mov != NULL && IF_X86(instr_get_opcode(mov) == OP_mov_imm &&) IF_ARM(instr_get_opcode(mov) == OP_mov && OPND_IS_IMMED_INT(instr_get_src(mov, 0)) &&) IF_AARCH64(instr_get_opcode(mov) == OP_movz &&)( bb->start_pc == instr_get_raw_bits(mov) || /* the translation field might be NULL */ bb->start_pc == instr_get_translation(mov))); /* i#1998: ensure the instr is Level 3+ */ instr_decode(dcontext, mov); instr_set_src(mov, 0, OPND_CREATE_INT32(1)); } } #endif /* DR_APP_EXPORTS */ /* This routine is called from build_bb_ilist when the number of instructions reaches or * exceeds max_bb_instr. It checks if bb is safe to stop after instruction stop_after. * On ARM, we do not stop bb building in the middle of an IT block unless there is a * conditional syscall. */ static bool bb_safe_to_stop(dcontext_t *dcontext, instrlist_t *ilist, instr_t *stop_after) { #ifdef ARM ASSERT(ilist != NULL && instrlist_last(ilist) != NULL); /* only thumb mode could have IT blocks */ if (dr_get_isa_mode(dcontext) != DR_ISA_ARM_THUMB) return true; if (stop_after == NULL) stop_after = instrlist_last_app(ilist); if (instr_get_opcode(stop_after) == OP_it) return false; if (!instr_is_predicated(stop_after)) return true; if (instr_is_cti(stop_after) /* must be the last instr if in IT block */ || /* we do not stop in the middle of an IT block unless it is a syscall */ instr_is_syscall(stop_after) || instr_is_interrupt(stop_after)) return true; return instr_is_last_in_it_block(stop_after, NULL, NULL); #endif /* ARM */ return true; } /* Interprets the application's instructions until the end of a basic * block is found, and prepares the resulting instrlist for creation of * a fragment, but does not create the fragment, just returns the instrlist. * Caller is responsible for freeing the list and its instrs! * * Input parameters in bb control aspects of creation: * If app_interp is true, this is considered real app code. * If pass_to_client is true, * calls instrument routine on bb->ilist before mangling * If mangle_ilist is true, mangles the ilist, else leaves it in app form * If record_vmlist is true, updates the vmareas data structures * If for_cache is true, bb building lock is assumed to be held. * record_vmlist should also be true. * Caller must set and later clear dcontext->bb_build_info. * For !for_cache, build_bb_ilist() sets and clears it, making the * assumption that the caller is doing no other reading from the region. * If record_translation is true, records translation for inserted instrs * If outf != NULL, does full disassembly with comments to outf * If overlap_info != NULL, records overlap information for the block in * the overlap_info (caller must fill in region_start and region_end). * * FIXME: now that we have better control over following direct ctis, * should we have adaptive mechanism to decided whether to follow direct * ctis, since some bmarks are better doing so (gap, vortex, wupwise) * and others are worse (apsi, perlbmk)? */ static void build_bb_ilist(dcontext_t *dcontext, build_bb_t *bb) { /* Design decision: we will not try to identify branches that target * instructions in this basic block, when we take those branches we will * just make a new basic block and duplicate part of this one */ int total_branches = 0; uint total_instrs = 0; /* maximum number of instructions for current basic block */ uint cur_max_bb_instrs = DYNAMO_OPTION(max_bb_instrs); uint total_writes = 0; /* only used for selfmod */ instr_t *non_cti; /* used if !full_decode */ byte *non_cti_start_pc; /* used if !full_decode */ uint eflags_6 = 0; /* holds arith eflags written so far (in read slots) */ #ifdef HOT_PATCHING_INTERFACE bool hotp_should_inject = false, hotp_injected = false; #endif app_pc page_start_pc = (app_pc)NULL; bool bb_build_nested = false; /* Caller will free objects allocated here so we must use the passed-in * dcontext for allocation; we need separate var for non-global dcontext. */ dcontext_t *my_dcontext = get_thread_private_dcontext(); DEBUG_DECLARE(bool regenerated = false;) bool stop_bb_on_fallthrough = false; ASSERT(bb->initialized); /* note that it's ok for bb->start_pc to be NULL as our check_new_page_start * will catch it */ /* vmlist must start out empty (or N/A) */ ASSERT(bb->vmlist == NULL || !bb->record_vmlist || bb->checked_start_vmarea); ASSERT(!bb->for_cache || bb->record_vmlist); /* for_cache assumes record_vmlist */ #ifdef CUSTOM_TRACES_RET_REMOVAL my_dcontext->num_calls = 0; my_dcontext->num_rets = 0; #endif /* Support bb abort on decode fault */ if (my_dcontext != NULL) { if (bb->for_cache) { /* Caller should have set! */ ASSERT(bb == (build_bb_t *)my_dcontext->bb_build_info); } else if (my_dcontext->bb_build_info == NULL) { my_dcontext->bb_build_info = (void *)bb; } else { /* For nested we leave the original, which should be the only vmlist, * and we give up on freeing dangling instr_t and instrlist_t from this * decode. * We need the original's for_cache so we know to free the bb_building_lock. * FIXME: use TRY to handle decode exceptions locally? Shouldn't have * violation remediations on a !for_cache build. */ ASSERT(bb->vmlist == NULL && !bb->for_cache && ((build_bb_t *)my_dcontext->bb_build_info)->for_cache); /* FIXME: add nested as a field so we can have stat on nested faults */ bb_build_nested = true; } } else ASSERT(dynamo_exited); if ((bb->record_translation && !INTERNAL_OPTION(fast_client_decode)) || !bb->for_cache /* to split riprel, need to decode every instr */ /* in x86_to_x64, need to translate every x86 instr */ IF_X64(|| DYNAMO_OPTION(coarse_split_riprel) || DYNAMO_OPTION(x86_to_x64)) || INTERNAL_OPTION(full_decode) /* We separate rseq regions into their own blocks to make this check easier. */ IF_LINUX(|| (!vmvector_empty(d_r_rseq_areas) && vmvector_overlap(d_r_rseq_areas, bb->start_pc, bb->start_pc + 1)))) bb->full_decode = true; else { #ifdef CHECK_RETURNS_SSE2 bb->full_decode = true; #endif } LOG(THREAD, LOG_INTERP, 3, "\ninterp%s: ", IF_X86_64_ELSE(X64_MODE_DC(dcontext) ? "" : " (x86 mode)", "")); BBPRINT(bb, 3, "start_pc = " PFX "\n", bb->start_pc); DOSTATS({ if (bb->app_interp) { if (fragment_lookup_deleted(dcontext, bb->start_pc)) { /* this will look up private 1st, so yes we will get * dup stats if multiple threads have regnerated the * same private tag, or if a shared tag is deleted and * multiple privates created */ regenerated = true; STATS_INC(num_fragments_deja_vu); } } }); /* start converting instructions into IR */ if (!bb->checked_start_vmarea) check_new_page_start(dcontext, bb); #if defined(WINDOWS) && !defined(STANDALONE_DECODER) /* i#1632: if `bb->start_pc` points into the middle of a DR intercept hook, change * it so instructions are taken from the intercept instead (note that * `instr_set_translation` will hide this adjustment from the client). N.B.: this * must follow `check_new_page_start()` (above) or `bb.vmlist` will be wrong. */ if (could_be_hook_occluded_pc(bb->start_pc)) { app_pc intercept_pc = get_intercept_pc_from_app_pc( bb->start_pc, true /* occlusions only */, true /* exclude start pc */); if (intercept_pc != NULL) { LOG(THREAD, LOG_INTERP, 3, "Changing start_pc from hook-occluded app pc " PFX " to intercept pc " PFX "\n", bb->start_pc, intercept_pc); bb->start_pc = intercept_pc; } } #endif bb->cur_pc = bb->start_pc; /* for translation in case we break out of loop before decoding any * instructions, (i.e. check_for_stopping_point()) */ bb->instr_start = bb->cur_pc; /* create instrlist after check_new_page_start to avoid memory leak * on unreadable memory -- though we now properly clean up and won't leak * on unreadable on any check_thread_vm_area call */ bb->ilist = instrlist_create(dcontext); bb->instr = NULL; /* avoid discrepancy in finding invalid instructions between fast decode * and the full decode of sandboxing by doing full decode up front */ if (TEST(FRAG_SELFMOD_SANDBOXED, bb->flags)) { bb->full_decode = true; bb->follow_direct = false; } if (TEST(FRAG_HAS_TRANSLATION_INFO, bb->flags)) { bb->full_decode = true; bb->record_translation = true; } if (my_dcontext != NULL && my_dcontext->single_step_addr == bb->start_pc) { /* Decodes only one instruction because of single step exception. */ cur_max_bb_instrs = 1; } KSTART(bb_decoding); while (true) { if (check_for_stopping_point(dcontext, bb)) { BBPRINT(bb, 3, "interp: found DynamoRIO stopping point at " PFX "\n", bb->cur_pc); break; } /* fill in a new instr structure and update bb->cur_pc */ bb->instr = instr_create(dcontext); /* if !full_decode: * All we need to decode are control-transfer instructions * For efficiency, put all non-cti into a single instr_t structure */ non_cti_start_pc = bb->cur_pc; do { /* If the thread's vmareas aren't being added to, indicate the * page that's being decoded. */ if (!bb->record_vmlist && page_start_pc != (app_pc)PAGE_START(bb->cur_pc)) { page_start_pc = (app_pc)PAGE_START(bb->cur_pc); set_thread_decode_page_start(my_dcontext == NULL ? dcontext : my_dcontext, page_start_pc); } bb->instr_start = bb->cur_pc; if (bb->full_decode) { /* only going through this do loop once! */ bb->cur_pc = IF_AARCH64_ELSE(decode_with_ldstex, decode)(dcontext, bb->cur_pc, bb->instr); if (bb->record_translation) instr_set_translation(bb->instr, bb->instr_start); } else { /* must reset, may go through loop multiple times */ instr_reset(dcontext, bb->instr); bb->cur_pc = IF_AARCH64_ELSE(decode_cti_with_ldstex, decode_cti)(dcontext, bb->cur_pc, bb->instr); #if defined(ANNOTATIONS) && !(defined(X64) && defined(WINDOWS)) /* Quickly check whether this may be a Valgrind annotation. */ if (is_encoded_valgrind_annotation_tail(bb->instr_start)) { /* Might be an annotation, so try the (slower) full check. */ if (is_encoded_valgrind_annotation(bb->instr_start, bb->start_pc, (app_pc)PAGE_START(bb->cur_pc))) { /* Valgrind annotation needs full decode; clean up and repeat. */ KSTOP(bb_decoding); instr_destroy(dcontext, bb->instr); instrlist_clear_and_destroy(dcontext, bb->ilist); if (bb->vmlist != NULL) { vm_area_destroy_list(dcontext, bb->vmlist); bb->vmlist = NULL; } bb->full_decode = true; build_bb_ilist(dcontext, bb); return; } } #endif } ASSERT(!bb->check_vm_area || bb->checked_end != NULL); if (bb->check_vm_area && bb->cur_pc != NULL && bb->cur_pc - 1 >= bb->checked_end) { /* We're beyond the vmarea allowed -- so check again. * Ideally we'd want to check BEFORE we decode from the * subsequent page, as it could be inaccessible, but not worth * the time estimating the size from a variable number of bytes * before the page boundary. Instead we rely on other * mechanisms to handle faults while decoding, which we need * anyway to handle racy unmaps by the app. */ uint old_flags = bb->flags; DEBUG_DECLARE(bool is_first_instr = (bb->instr_start == bb->start_pc)); if (!check_new_page_contig(dcontext, bb, bb->cur_pc - 1)) { /* i#989: Stop bb building before falling through to an * incompatible vmarea. */ ASSERT(!is_first_instr); bb->cur_pc = NULL; stop_bb_on_fallthrough = true; break; } if (!TEST(FRAG_SELFMOD_SANDBOXED, old_flags) && TEST(FRAG_SELFMOD_SANDBOXED, bb->flags)) { /* Restart the decode loop with full_decode and * !follow_direct, which are needed for sandboxing. This * can't happen more than once because sandboxing is now on. */ ASSERT(is_first_instr); bb->full_decode = true; bb->follow_direct = false; bb->cur_pc = bb->instr_start; instr_reset(dcontext, bb->instr); continue; } } total_instrs++; DOELOG(3, LOG_INTERP, { disassemble_with_bytes(dcontext, bb->instr_start, THREAD); }); if (bb->outf != INVALID_FILE) disassemble_with_bytes(dcontext, bb->instr_start, bb->outf); if (!instr_valid(bb->instr)) break; /* before eflags analysis! */ #ifdef X86 /* If the next instruction at bb->cur_pc fires a debug register, * then we should stop this basic block before getting to it. */ if (my_dcontext != NULL && debug_register_fire_on_addr(bb->instr_start)) { stop_bb_on_fallthrough = true; break; } if (!d_r_is_avx512_code_in_use()) { if (ZMM_ENABLED()) { if (instr_get_prefix_flag(bb->instr, PREFIX_EVEX)) { /* For AVX-512 detection in bb builder, we're checking only * for the prefix flag, which for example can be set by * decode_cti. In client_process_bb, post-client instructions * are checked with instr_may_write_zmm_register. */ LOG(THREAD, LOG_INTERP, 2, "Detected AVX-512 code in use\n"); d_r_set_avx512_code_in_use(true, instr_get_app_pc(bb->instr)); proc_set_num_simd_saved(MCXT_NUM_SIMD_SLOTS); } } } #endif /* Eflags analysis: * We do this even if -unsafe_ignore_eflags_prefix b/c it doesn't cost that * much and we can use the analysis to detect any bb that reads a flag * prior to writing it. */ if (bb->eflags != EFLAGS_WRITE_ARITH IF_X86(&&bb->eflags != EFLAGS_READ_OF)) bb->eflags = eflags_analysis(bb->instr, bb->eflags, &eflags_6); /* stop decoding at an invalid instr (tested above) or a cti *(== opcode valid) or a possible SEH frame push (if * -process_SEH_push). */ #ifdef WINDOWS if (DYNAMO_OPTION(process_SEH_push) && instr_get_prefix_flag(bb->instr, PREFIX_SEG_FS)) { STATS_INC(num_bb_build_fs); break; } #endif #ifdef X64 if (instr_has_rel_addr_reference(bb->instr)) { /* PR 215397: we need to split these out for re-relativization */ break; } #endif #if defined(UNIX) && defined(X86) if (INTERNAL_OPTION(mangle_app_seg) && instr_get_prefix_flag(bb->instr, PREFIX_SEG_FS | PREFIX_SEG_GS)) { /* These segment prefix flags are not persistent and are * only used as hints just after decoding. * They are not accurate later and can be misleading. * This can only be used right after decoding for quick check, * and a walk of operands should be performed to look for * actual far mem refs. */ /* i#107, mangle reference with segment register */ /* we up-decode the instr when !full_decode to make sure it will * pass the instr_opcode_valid check in mangle and be mangled. */ instr_get_opcode(bb->instr); break; } #endif /* i#107, opcode mov_seg will be set in decode_cti, * so instr_opcode_valid(bb->instr) is true, and terminates the loop. */ } while (!instr_opcode_valid(bb->instr) && total_instrs <= cur_max_bb_instrs); if (bb->cur_pc == NULL) { /* invalid instr or vmarea change: reset bb->cur_pc, will end bb * after updating stats */ bb->cur_pc = bb->instr_start; } /* We need the translation when mangling calls and jecxz/loop*. * May as well set it for all cti's since there's * really no extra overhead in doing so. Note that we go * through the above loop only once for cti's, so it's safe * to set the translation here. */ if (instr_opcode_valid(bb->instr) && (instr_is_cti(bb->instr) || bb->record_translation)) instr_set_translation(bb->instr, bb->instr_start); #ifdef HOT_PATCHING_INTERFACE /* If this lookup succeeds then the current bb needs to be patched. * In hotp_inject(), address lookup will be done for each instruction * pc in this bb and patching will be done if an exact match is found. * * Hot patching should be done only for app interp and recreating * pc, not for reproducing app code. Hence we use mangle_ilist. * See case 5981. * * FIXME: this lookup can further be reduced by determining whether or * not the current bb's module needs patching via check_new_page* */ if (DYNAMO_OPTION(hot_patching) && bb->mangle_ilist && !hotp_should_inject) { /* case 8780: we may hold the lock; FIXME: figure out if this can * be avoided - messy to hold hotp_vul_table lock like this for * unnecessary operations. */ bool owns_hotp_lock = self_owns_write_lock(hotp_get_lock()); if (hotp_does_region_need_patch(non_cti_start_pc, bb->cur_pc, owns_hotp_lock)) { BBPRINT(bb, 2, "hotpatch match in " PFX ": " PFX "-" PFX "\n", bb->start_pc, non_cti_start_pc, bb->cur_pc); hotp_should_inject = true; /* Don't elide if we are going to hot patch this bb because * the patch point can be a direct cti; eliding would result * in the patch not being applied. See case 5901. * FIXME: we could make this more efficient by only turning * off follow_direct if the instr is direct cti. */ bb->follow_direct = false; DOSTATS({ if TEST(FRAG_HAS_DIRECT_CTI, bb->flags) STATS_INC(hotp_num_frag_direct_cti); }); } } #endif if (bb->full_decode) { if (TEST(FRAG_SELFMOD_SANDBOXED, bb->flags) && instr_valid(bb->instr) && instr_writes_memory(bb->instr)) { /* to allow tailing non-writes, end prior to the write beyond the max */ total_writes++; if (total_writes > DYNAMO_OPTION(selfmod_max_writes)) { BBPRINT(bb, 3, "reached selfmod write limit %d, stopping\n", DYNAMO_OPTION(selfmod_max_writes)); STATS_INC(num_max_selfmod_writes_enforced); bb_stop_prior_to_instr(dcontext, bb, false /*not added to bb->ilist*/); break; } } } else if (bb->instr_start != non_cti_start_pc) { /* instr now holds the cti, so create an instr_t for the non-cti */ non_cti = instr_create(dcontext); IF_X64(ASSERT(CHECK_TRUNCATE_TYPE_uint(bb->instr_start - non_cti_start_pc))); instr_set_raw_bits(non_cti, non_cti_start_pc, (uint)(bb->instr_start - non_cti_start_pc)); if (bb->record_translation) instr_set_translation(non_cti, non_cti_start_pc); /* add non-cti instructions to instruction list */ instrlist_append(bb->ilist, non_cti); } DOSTATS({ /* This routine is also called for recreating state, we only want * to count app code when we build new bbs, which is indicated by * the bb->app_interp parameter */ if (bb->app_interp && !regenerated) { /* avoid double-counting for adaptive working set */ /* FIXME - ubr ellision leads to double couting. We also * double count when we have multiple entry points into the * same block of cti free instructinos. */ STATS_ADD(app_code_seen, (bb->cur_pc - non_cti_start_pc)); LOG(THREAD, LOG_INTERP, 5, "adding %d bytes to total app code seen\n", bb->cur_pc - non_cti_start_pc); } }); if (!instr_valid(bb->instr)) { bb_process_invalid_instr(dcontext, bb); break; } if (stop_bb_on_fallthrough) { bb_stop_prior_to_instr(dcontext, bb, false /*not appended*/); break; } #ifdef ANNOTATIONS # if !(defined(X64) && defined(WINDOWS)) /* Quickly check whether this may be a Valgrind annotation. */ if (is_decoded_valgrind_annotation_tail(bb->instr)) { /* Might be an annotation, so try the (slower) full check. */ if (is_encoded_valgrind_annotation(bb->instr_start, bb->start_pc, (app_pc)PAGE_START(bb->cur_pc))) { instrument_valgrind_annotation(dcontext, bb->ilist, bb->instr, bb->instr_start, bb->cur_pc, total_instrs); continue; } } else /* Top-level annotation recognition is unambiguous (xchg vs. jmp). */ # endif if (is_annotation_jump_over_dead_code(bb->instr)) { instr_t *substitution = NULL; if (instrument_annotation( dcontext, &bb->cur_pc, &substitution _IF_WINDOWS_X64(bb->cur_pc < bb->checked_end))) { instr_destroy(dcontext, bb->instr); if (substitution == NULL) continue; /* ignore annotation if no handlers are registered */ else bb->instr = substitution; } } #endif #ifdef WINDOWS if (DYNAMO_OPTION(process_SEH_push) && instr_get_prefix_flag(bb->instr, PREFIX_SEG_FS)) { DEBUG_DECLARE(ssize_t dbl_count = bb->cur_pc - bb->instr_start); if (!bb_process_fs_ref(dcontext, bb)) { DOSTATS({ if (bb->app_interp) { LOG(THREAD, LOG_INTERP, 3, "stopping bb at fs-using instr @ " PFX "\n", bb->instr_start); STATS_INC(num_process_SEH_bb_early_terminate); /* don't double count the fs instruction itself * since we removed it from this bb */ if (!regenerated) STATS_ADD(app_code_seen, -dbl_count); } }); break; } } #else # if defined(X86) && defined(LINUX) if (instr_get_prefix_flag(bb->instr, (SEG_TLS == SEG_GS) ? PREFIX_SEG_GS : PREFIX_SEG_FS) /* __errno_location is interpreted when global, though it's hidden in TOT */ IF_UNIX(&&!is_in_dynamo_dll(bb->instr_start)) && /* i#107 allows DR/APP using the same segment register. */ !INTERNAL_OPTION(mangle_app_seg)) { CLIENT_ASSERT(false, "no support for app using DR's segment w/o -mangle_app_seg"); ASSERT_BUG_NUM(205276, false); } # endif /* X86 */ #endif /* WINDOWS */ if (my_dcontext != NULL && my_dcontext->single_step_addr == bb->instr_start) { bb_process_single_step(dcontext, bb); /* Stops basic block right now. */ break; } /* far direct is treated as indirect (i#823) */ if (instr_is_near_ubr(bb->instr)) { if (bb_process_ubr(dcontext, bb)) continue; else { if (bb->instr != NULL) /* else, bb_process_ubr() set exit_type */ bb->exit_type |= instr_branch_type(bb->instr); break; } } else instrlist_append(bb->ilist, bb->instr); #ifdef RETURN_AFTER_CALL if (bb->app_interp && dynamo_options.ret_after_call) { if (instr_is_call(bb->instr)) { /* add after call instruction to valid return targets */ add_return_target(dcontext, bb->instr_start, bb->instr); } } #endif /* RETURN_AFTER_CALL */ #ifdef X64 /* must be prior to mbr check since mbr location could be rip-rel */ if (DYNAMO_OPTION(coarse_split_riprel) && DYNAMO_OPTION(coarse_units) && TEST(FRAG_COARSE_GRAIN, bb->flags) && instr_has_rel_addr_reference(bb->instr)) { if (instrlist_first(bb->ilist) != bb->instr) { /* have ref be in its own bb */ bb_stop_prior_to_instr(dcontext, bb, true /*appended already*/); break; /* stop bb */ } else { /* single-instr fine-grained bb */ bb->flags &= ~FRAG_COARSE_GRAIN; STATS_INC(coarse_prevent_riprel); } } #endif if (instr_is_near_call_direct(bb->instr)) { if (!bb_process_call_direct(dcontext, bb)) { if (bb->instr != NULL) bb->exit_type |= instr_branch_type(bb->instr); break; } } else if (instr_is_mbr(bb->instr) /* including indirect calls */ IF_X86( /* far direct is treated as indirect (i#823) */ || instr_get_opcode(bb->instr) == OP_jmp_far || instr_get_opcode(bb->instr) == OP_call_far) IF_ARM(/* mode-switch direct is treated as indirect */ || instr_get_opcode(bb->instr) == OP_blx)) { /* Manage the case where we don't need to perform 'normal' * indirect branch processing. */ bool normal_indirect_processing = true; bool elide_and_continue_if_converted = true; if (instr_is_return(bb->instr)) { bb->ibl_branch_type = IBL_RETURN; STATS_INC(num_returns); } else if (instr_is_call_indirect(bb->instr)) { STATS_INC(num_all_calls); STATS_INC(num_indirect_calls); if (DYNAMO_OPTION(coarse_split_calls) && DYNAMO_OPTION(coarse_units) && TEST(FRAG_COARSE_GRAIN, bb->flags)) { if (instrlist_first(bb->ilist) != bb->instr) { /* have call be in its own bb */ bb_stop_prior_to_instr(dcontext, bb, true /*appended already*/); break; /* stop bb */ } else { /* single-call fine-grained bb */ bb->flags &= ~FRAG_COARSE_GRAIN; STATS_INC(coarse_prevent_cti); } } /* If the indirect call can be converted into a direct one, * bypass normal indirect call processing. * First, check for a call* that we treat as a syscall. */ if (bb_process_indcall_syscall(dcontext, bb, &elide_and_continue_if_converted)) { normal_indirect_processing = false; } else if (DYNAMO_OPTION(indcall2direct) && bb_process_convertible_indcall(dcontext, bb)) { normal_indirect_processing = false; elide_and_continue_if_converted = true; } else if (DYNAMO_OPTION(IAT_convert) && bb_process_IAT_convertible_indcall( dcontext, bb, &elide_and_continue_if_converted)) { normal_indirect_processing = false; } else bb->ibl_branch_type = IBL_INDCALL; #ifdef X86 } else if (instr_get_opcode(bb->instr) == OP_jmp_far) { /* far direct is treated as indirect (i#823) */ bb->ibl_branch_type = IBL_INDJMP; } else if (instr_get_opcode(bb->instr) == OP_call_far) { /* far direct is treated as indirect (i#823) */ bb->ibl_branch_type = IBL_INDCALL; #elif defined(ARM) } else if (instr_get_opcode(bb->instr) == OP_blx) { /* mode-changing direct call is treated as indirect */ bb->ibl_branch_type = IBL_INDCALL; #endif /* X86 */ } else { /* indirect jump */ /* was prev instr a direct call? if so, this is a PLT-style ind call */ instr_t *prev = instr_get_prev(bb->instr); if (prev != NULL && instr_opcode_valid(prev) && instr_is_call_direct(prev)) { bb->exit_type |= INSTR_IND_JMP_PLT_EXIT; /* just because we have a CALL to JMP* makes it only a _likely_ PLT call, we still have to make sure it goes through IAT - see case 4269 */ STATS_INC(num_indirect_jumps_likely_PLT); } elide_and_continue_if_converted = true; if (DYNAMO_OPTION(IAT_convert) && bb_process_IAT_convertible_indjmp(dcontext, bb, &elide_and_continue_if_converted)) { /* Clear the IND_JMP_PLT_EXIT flag since we've converted * the PLT to a direct transition (and possibly elided). * Xref case 7867 for why leaving this flag in the eliding * case can cause later failures. */ bb->exit_type &= ~INSTR_CALL_EXIT; /* leave just JMP */ normal_indirect_processing = false; } else /* FIXME: this can always be set */ bb->ibl_branch_type = IBL_INDJMP; STATS_INC(num_indirect_jumps); } #ifdef CUSTOM_TRACES_RET_REMOVAL if (instr_is_return(bb->instr)) my_dcontext->num_rets++; else if (instr_is_call_indirect(bb->instr)) my_dcontext->num_calls++; #endif /* set exit type since this instruction will get mangled */ if (normal_indirect_processing) { bb->exit_type |= instr_branch_type(bb->instr); bb->exit_target = get_ibl_routine(dcontext, get_ibl_entry_type(bb->exit_type), DEFAULT_IBL_BB(), bb->ibl_branch_type); LOG(THREAD, LOG_INTERP, 4, "mbr exit target = " PFX "\n", bb->exit_target); break; } else { /* decide whether to stop bb here */ if (!elide_and_continue_if_converted) break; /* fall through for -max_bb_instrs check */ } } else if (instr_is_cti(bb->instr) && (!instr_is_call(bb->instr) || instr_is_cbr(bb->instr))) { total_branches++; if (total_branches >= BRANCH_LIMIT) { /* set type of 1st exit cti for cbr (bb->exit_type is for fall-through) */ instr_exit_branch_set_type(bb->instr, instr_branch_type(bb->instr)); break; } } else if (instr_is_syscall(bb->instr)) { if (!bb_process_syscall(dcontext, bb)) break; } /* end syscall */ else if (instr_get_opcode(bb->instr) == IF_X86_ELSE(OP_int, OP_svc)) { /* non-syscall int */ if (!bb_process_interrupt(dcontext, bb)) break; } #ifdef AARCH64 /* OP_isb, when mangled, has a potential side exit. */ else if (instr_get_opcode(bb->instr) == OP_isb) break; #endif #if 0 /*i#1313, i#1314*/ else if (instr_get_opcode(bb->instr) == OP_getsec) { /* XXX i#1313: if we support CPL0 in the future we'll need to * dynamically handle the leaf functions here, which can change eip * and other state. We'll need OP_getsec in decode_cti(). */ } else if (instr_get_opcode(bb->instr) == OP_xend || instr_get_opcode(bb->instr) == OP_xabort) { /* XXX i#1314: support OP_xend failing and setting eip to the * fallback pc recorded by OP_xbegin. We'll need both in decode_cti(). */ } #endif #ifdef CHECK_RETURNS_SSE2 /* There are SSE and SSE2 instrs that operate on MMX instead of XMM, but * we perform a simple coarse-grain check here. */ else if (instr_is_sse_or_sse2(bb->instr)) { FATAL_USAGE_ERROR(CHECK_RETURNS_SSE2_XMM_USED, 2, get_application_name(), get_application_pid()); } #endif #if defined(UNIX) && !defined(DGC_DIAGNOSTICS) && defined(X86) else if (instr_get_opcode(bb->instr) == OP_mov_seg) { if (!bb_process_mov_seg(dcontext, bb)) break; } #endif else if (instr_saves_float_pc(bb->instr)) { bb_process_float_pc(dcontext, bb); break; } if (bb->cur_pc == bb->stop_pc) { /* We only check stop_pc for full_decode, so not in inner loop. */ BBPRINT(bb, 3, "reached end pc " PFX ", stopping\n", bb->stop_pc); break; } if (total_instrs > DYNAMO_OPTION(max_bb_instrs)) { /* this could be an enormous basic block, or it could * be some degenerate infinite-loop case like a call * to a function that calls exit() and then calls itself, * so just end it here, we'll pick up where we left off * if it's legit */ BBPRINT(bb, 3, "reached -max_bb_instrs(%d): %d, ", DYNAMO_OPTION(max_bb_instrs), total_instrs); if (bb_safe_to_stop(dcontext, bb->ilist, NULL)) { BBPRINT(bb, 3, "stopping\n"); STATS_INC(num_max_bb_instrs_enforced); break; } else { /* XXX i#1669: cannot stop bb now, what's the best way to handle? * We can either roll-back and find previous safe stop point, or * simply extend the bb with a few more instructions. * We can always lower the -max_bb_instrs to offset the additional * instructions. In contrast, roll-back seems complex and * potentially problematic. */ BBPRINT(bb, 3, "cannot stop, continuing\n"); } } } /* end of while (true) */ KSTOP(bb_decoding); #ifdef DEBUG_MEMORY /* make sure anyone who destroyed also set to NULL */ ASSERT(bb->instr == NULL || (bb->instr->bytes != (byte *)HEAP_UNALLOCATED_PTR_UINT && bb->instr->bytes != (byte *)HEAP_ALLOCATED_PTR_UINT && bb->instr->bytes != (byte *)HEAP_PAD_PTR_UINT)); #endif if (!check_new_page_contig(dcontext, bb, bb->cur_pc - 1)) { ASSERT(false && "Should have checked cur_pc-1 in decode loop"); } bb->end_pc = bb->cur_pc; BBPRINT(bb, 3, "end_pc = " PFX "\n\n", bb->end_pc); /* We could put this in check_new_page_jmp where it already checks * for native_exec overlap, but selfmod ubrs don't even call that routine */ if (DYNAMO_OPTION(native_exec) && DYNAMO_OPTION(native_exec_callcall) && !vmvector_empty(native_exec_areas) && bb->app_interp && bb->instr != NULL && (instr_is_near_ubr(bb->instr) || instr_is_near_call_direct(bb->instr)) && instrlist_first(bb->ilist) == instrlist_last(bb->ilist)) { /* Case 4564/3558: handle .NET COM method table where a call* targets * a call to a native_exec dll -- we need to put the gateway at the * call* to avoid retaddr mangling of the method table call. * As a side effect we can also handle call*, jmp. * We don't actually verify or care that it was specifically a call*, * whatever at_native_exec_gateway() requires to assure itself that we're * at a return-address-clobberable point. */ app_pc tgt = opnd_get_pc(instr_get_target(bb->instr)); if (is_native_pc(tgt) && at_native_exec_gateway(dcontext, tgt, &bb->native_call _IF_DEBUG(true /*xfer tgt*/))) { /* replace this ilist w/ a native exec one */ LOG(THREAD, LOG_INTERP, 2, "direct xfer @gateway @" PFX " to native_exec module " PFX "\n", bb->start_pc, tgt); bb->native_exec = true; /* add this ubr/call to the native_exec_list, both as an optimization * for future entrances and b/c .NET changes its method table call * from targeting a native_exec image to instead target DGC directly, * thwarting our gateway! * FIXME: if heap region de-allocated, we'll remove, but what if re-used * w/o going through syscalls? Just written over w/ something else? * We'll keep it on native_exec_list... */ ASSERT(bb->end_pc == bb->start_pc + DIRECT_XFER_LENGTH); vmvector_add(native_exec_areas, bb->start_pc, bb->end_pc, NULL); DODEBUG({ report_native_module(dcontext, tgt); }); STATS_INC(num_native_module_entrances_callcall); return; } } #ifdef UNIX /* XXX: i#1247: After a call to a native module throught plt, DR * loses control of the app b/c of _dl_runtime_resolve */ int ret_imm; if (DYNAMO_OPTION(native_exec) && DYNAMO_OPTION(native_exec_opt) && bb->app_interp && bb->instr != NULL && instr_is_return(bb->instr) && at_dl_runtime_resolve_ret(dcontext, bb->start_pc, &ret_imm)) { dr_insert_clean_call(dcontext, bb->ilist, bb->instr, (void *)native_module_at_runtime_resolve_ret, false, 2, opnd_create_reg(REG_XSP), OPND_CREATE_INT32(ret_imm)); } #endif STATS_TRACK_MAX(max_instrs_in_a_bb, total_instrs); if (stop_bb_on_fallthrough && TEST(FRAG_HAS_DIRECT_CTI, bb->flags)) { /* If we followed a direct cti to an instruction straddling a vmarea * boundary, we can't actually do the elision. See the * sandbox_last_byte() test case in security-common/sandbox.c. Restart * bb building without follow_direct. Alternatively, we could check the * vmareas of the targeted instruction before performing elision. */ /* FIXME: a better assert is needed because this can trigger if * hot patching turns off follow_direct, the current bb was elided * earlier and is marked as selfmod. hotp_num_frag_direct_cti will * track this for now. */ ASSERT(bb->follow_direct); /* else, infinite loop possible */ BBPRINT(bb, 2, "*** must rebuild bb to avoid following direct cti to " "incompatible vmarea\n"); STATS_INC(num_bb_end_early); instrlist_clear_and_destroy(dcontext, bb->ilist); if (bb->vmlist != NULL) { vm_area_destroy_list(dcontext, bb->vmlist); bb->vmlist = NULL; } /* Remove FRAG_HAS_DIRECT_CTI, since we're turning off follow_direct. * Try to keep the known flags. We stopped the bb before merging in any * incompatible flags. */ bb->flags &= ~FRAG_HAS_DIRECT_CTI; bb->follow_direct = false; bb->exit_type = 0; /* i#577 */ bb->exit_target = NULL; /* i#928 */ /* overlap info will be reset by check_new_page_start */ build_bb_ilist(dcontext, bb); return; } if (TEST(FRAG_SELFMOD_SANDBOXED, bb->flags)) { ASSERT(bb->full_decode); ASSERT(!bb->follow_direct); ASSERT(!TEST(FRAG_HAS_DIRECT_CTI, bb->flags)); } #ifdef HOT_PATCHING_INTERFACE /* CAUTION: This can't be moved below client interface as the basic block * can be changed by the client. This will mess up hot patching. * The same is true for mangling. */ if (hotp_should_inject) { ASSERT(DYNAMO_OPTION(hot_patching)); hotp_injected = hotp_inject(dcontext, bb->ilist); /* Fix for 5272. Hot patch injection uses dr clean call api which * accesses dcontext fields directly, so the injected bbs can't be * shared until that is changed or the clean call mechanism is replaced * with bb termination to execute hot patchces. * Case 9995 assumes that hotp fragments are fine-grained, which we * achieve today by being private; if we make shared we must explicitly * prevent from being coarse-grained. */ if (hotp_injected) { bb->flags &= ~FRAG_SHARED; bb->flags |= FRAG_CANNOT_BE_TRACE; } } #endif /* Until we're more confident in our decoder/encoder consistency this is * at the default debug build -checklevel 2. */ IF_ARM(DOCHECK(2, check_encode_decode_consistency(dcontext, bb->ilist);)); #ifdef DR_APP_EXPORTS /* changes by DR that are visible to clients */ mangle_pre_client(dcontext, bb); #endif /* DR_APP_EXPORTS */ #ifdef DEBUG /* This is a special debugging feature */ if (bb->for_cache && INTERNAL_OPTION(go_native_at_bb_count) > 0 && debug_bb_count++ >= INTERNAL_OPTION(go_native_at_bb_count)) { SYSLOG_INTERNAL_INFO("thread " TIDFMT " is going native @%d bbs to " PFX, d_r_get_thread_id(), debug_bb_count - 1, bb->start_pc); /* we leverage the existing native_exec mechanism */ dcontext->native_exec_postsyscall = bb->start_pc; dcontext->next_tag = BACK_TO_NATIVE_AFTER_SYSCALL; dynamo_thread_not_under_dynamo(dcontext); IF_UNIX(os_swap_context(dcontext, true /*to app*/, DR_STATE_GO_NATIVE)); /* i#1921: for now we do not support re-attach, so remove handlers */ os_process_not_under_dynamorio(dcontext); bb_build_abort(dcontext, true /*free vmlist*/, false /*don't unlock*/); return; } #endif if (!client_process_bb(dcontext, bb)) { bb_build_abort(dcontext, true /*free vmlist*/, false /*don't unlock*/); return; } /* i#620: provide API to set fall-through and retaddr targets at end of bb */ if (instrlist_get_return_target(bb->ilist) != NULL || instrlist_get_fall_through_target(bb->ilist) != NULL) { CLIENT_ASSERT(instr_is_cbr(instrlist_last(bb->ilist)) || instr_is_call(instrlist_last(bb->ilist)), "instr_set_return_target/instr_set_fall_through_target" " can only be used in a bb ending with call/cbr"); /* the bb cannot be added to a trace */ bb->flags |= FRAG_CANNOT_BE_TRACE; } if (bb->unmangled_ilist != NULL) *bb->unmangled_ilist = instrlist_clone(dcontext, bb->ilist); if (bb->instr != NULL && instr_opcode_valid(bb->instr) && instr_is_far_cti(bb->instr)) { /* Simplify far_ibl (i#823) vs trace_cmp ibl as well as * cross-mode direct stubs varying in a trace by disallowing * far cti in middle of trace */ bb->flags |= FRAG_MUST_END_TRACE; /* Simplify coarse by not requiring extra prefix stubs */ bb->flags &= ~FRAG_COARSE_GRAIN; } /* create a final instruction that will jump to the exit stub * corresponding to the fall-through of the conditional branch or * the target of the final indirect branch (the indirect branch itself * will get mangled into a non-cti) */ if (bb->exit_target == NULL) { /* not set by ind branch, etc. */ /* fall-through pc */ /* i#620: provide API to set fall-through target at end of bb */ bb->exit_target = instrlist_get_fall_through_target(bb->ilist); if (bb->exit_target == NULL) bb->exit_target = (cache_pc)bb->cur_pc; else { LOG(THREAD, LOG_INTERP, 3, "set fall-throught target " PFX " by client\n", bb->exit_target); } if (bb->instr != NULL && instr_opcode_valid(bb->instr) && instr_is_cbr(bb->instr) && (int)(bb->exit_target - bb->start_pc) <= SHRT_MAX && (int)(bb->exit_target - bb->start_pc) >= SHRT_MIN && /* rule out jecxz, etc. */ !instr_is_cti_loop(bb->instr)) bb->flags |= FRAG_CBR_FALLTHROUGH_SHORT; } /* we share all basic blocks except selfmod (since want no-synch quick deletion) * or syscall-containing ones (to bound delay on threads exiting shared cache, * for cache management, both consistency and capacity) * bbs injected with hot patches are also not shared (see case 5272). */ if (DYNAMO_OPTION(shared_bbs) && !TEST(FRAG_SELFMOD_SANDBOXED, bb->flags) && !TEST(FRAG_TEMP_PRIVATE, bb->flags) #ifdef HOT_PATCHING_INTERFACE && !hotp_injected #endif && (my_dcontext == NULL || my_dcontext->single_step_addr != bb->instr_start)) { /* If the fragment doesn't have a syscall or contains a * non-ignorable one -- meaning that the frag will exit the cache * to execute the syscall -- it can be shared. * We don't support ignorable syscalls in shared fragments, as they * don't set at_syscall and so are incompatible w/ -syscalls_synch_flush. */ if (!TEST(FRAG_HAS_SYSCALL, bb->flags) || TESTANY(LINK_NI_SYSCALL_ALL, bb->exit_type) || TEST(LINK_SPECIAL_EXIT, bb->exit_type)) bb->flags |= FRAG_SHARED; #ifdef WINDOWS /* A fragment can be shared if it contains a syscall that will be * executed via the version of shared syscall that can be targetted by * shared frags. */ else if (TEST(FRAG_HAS_SYSCALL, bb->flags) && DYNAMO_OPTION(shared_fragment_shared_syscalls) && bb->exit_target == shared_syscall_routine(dcontext)) bb->flags |= FRAG_SHARED; else { ASSERT((TEST(FRAG_HAS_SYSCALL, bb->flags) && (DYNAMO_OPTION(ignore_syscalls) || (!DYNAMO_OPTION(shared_fragment_shared_syscalls) && bb->exit_target == shared_syscall_routine(dcontext)))) && "BB not shared for unknown reason"); } #endif } else if (my_dcontext != NULL && my_dcontext->single_step_addr == bb->instr_start) { /* Field exit_type might have been cleared by client_process_bb. */ bb->exit_type |= LINK_SPECIAL_EXIT; } if (TEST(FRAG_COARSE_GRAIN, bb->flags) && (!TEST(FRAG_SHARED, bb->flags) || /* Ignorable syscalls on linux are mangled w/ intra-fragment jmps, which * decode_fragment() cannot handle -- and on win32 this overlaps w/ * FRAG_MUST_END_TRACE and LINK_NI_SYSCALL */ TEST(FRAG_HAS_SYSCALL, bb->flags) || TEST(FRAG_MUST_END_TRACE, bb->flags) || TEST(FRAG_CANNOT_BE_TRACE, bb->flags) || TEST(FRAG_SELFMOD_SANDBOXED, bb->flags) || /* PR 214142: coarse units does not support storing translations */ TEST(FRAG_HAS_TRANSLATION_INFO, bb->flags) || /* FRAG_HAS_DIRECT_CTI: we never elide (assert is below); * not-inlined call/jmp: we turn off FRAG_COARSE_GRAIN up above */ #ifdef WINDOWS TEST(LINK_CALLBACK_RETURN, bb->exit_type) || #endif TESTANY(LINK_NI_SYSCALL_ALL, bb->exit_type))) { /* Currently not supported in a coarse unit */ STATS_INC(num_fine_in_coarse); DOSTATS({ if (!TEST(FRAG_SHARED, bb->flags)) STATS_INC(coarse_prevent_private); else if (TEST(FRAG_HAS_SYSCALL, bb->flags)) STATS_INC(coarse_prevent_syscall); else if (TEST(FRAG_MUST_END_TRACE, bb->flags)) STATS_INC(coarse_prevent_end_trace); else if (TEST(FRAG_CANNOT_BE_TRACE, bb->flags)) STATS_INC(coarse_prevent_no_trace); else if (TEST(FRAG_SELFMOD_SANDBOXED, bb->flags)) STATS_INC(coarse_prevent_selfmod); else if (TEST(FRAG_HAS_TRANSLATION_INFO, bb->flags)) STATS_INC(coarse_prevent_translation); else if (IF_WINDOWS_ELSE_0(TEST(LINK_CALLBACK_RETURN, bb->exit_type))) STATS_INC(coarse_prevent_cbret); else if (TESTANY(LINK_NI_SYSCALL_ALL, bb->exit_type)) STATS_INC(coarse_prevent_syscall); else ASSERT_NOT_REACHED(); }); bb->flags &= ~FRAG_COARSE_GRAIN; } ASSERT(!TEST(FRAG_COARSE_GRAIN, bb->flags) || !TEST(FRAG_HAS_DIRECT_CTI, bb->flags)); /* now that we know whether shared, ensure we have the right ibl routine */ if (!TEST(FRAG_SHARED, bb->flags) && TEST(LINK_INDIRECT, bb->exit_type)) { ASSERT(bb->exit_target == get_ibl_routine(dcontext, get_ibl_entry_type(bb->exit_type), DEFAULT_IBL_BB(), bb->ibl_branch_type)); bb->exit_target = get_ibl_routine(dcontext, get_ibl_entry_type(bb->exit_type), IBL_BB_PRIVATE, bb->ibl_branch_type); } if (bb->mangle_ilist && (bb->instr == NULL || !instr_opcode_valid(bb->instr) || !instr_is_near_ubr(bb->instr) || instr_is_meta(bb->instr))) { instr_t *exit_instr = XINST_CREATE_jump(dcontext, opnd_create_pc(bb->exit_target)); if (bb->record_translation) { app_pc translation = NULL; if (bb->instr == NULL || !instr_opcode_valid(bb->instr)) { /* we removed (or mangle will remove) the last instruction * for special handling (invalid/syscall/int 2b) or there were * no instructions added (i.e. check_stopping_point in which * case instr_start == cur_pc), use last instruction's start * address for the translation */ translation = bb->instr_start; } else if (instr_is_cti(bb->instr)) { /* last instruction is a cti, consider the exit jmp part of * the mangling of the cti (since we might not know the target * if, for ex., its indirect) */ translation = instr_get_translation(bb->instr); } else { /* target is the instr after the last instr in the list */ translation = bb->cur_pc; ASSERT(bb->cur_pc == bb->exit_target); } ASSERT(translation != NULL); instr_set_translation(exit_instr, translation); } /* PR 214962: we need this jmp to be marked as "our mangling" so that * we won't relocate a thread there and re-do a ret pop or call push */ instr_set_our_mangling(exit_instr, true); /* here we need to set exit_type */ LOG(THREAD, LOG_EMIT, 3, "exit_branch_type=0x%x bb->exit_target=" PFX "\n", bb->exit_type, bb->exit_target); instr_exit_branch_set_type(exit_instr, bb->exit_type); instrlist_append(bb->ilist, exit_instr); #ifdef ARM if (bb->svc_pred != DR_PRED_NONE) { /* we have a conditional syscall, add predicate to current exit */ instr_set_predicate(exit_instr, bb->svc_pred); /* add another ubr exit as the fall-through */ exit_instr = XINST_CREATE_jump(dcontext, opnd_create_pc(bb->exit_target)); if (bb->record_translation) instr_set_translation(exit_instr, bb->cur_pc); instr_set_our_mangling(exit_instr, true); instr_exit_branch_set_type(exit_instr, LINK_DIRECT | LINK_JMP); instrlist_append(bb->ilist, exit_instr); /* XXX i#1734: instr svc.cc will be deleted later in mangle_syscall, * so we need reset encode state to avoid holding a dangling pointer. */ encode_reset_it_block(dcontext); } #endif } /* set flags */ #ifdef DGC_DIAGNOSTICS /* no traces in dyngen code, that would mess up our exit tracking */ if (TEST(FRAG_DYNGEN, bb->flags)) bb->flags |= FRAG_CANNOT_BE_TRACE; #endif if (!INTERNAL_OPTION(unsafe_ignore_eflags_prefix) IF_X64(|| !INTERNAL_OPTION(unsafe_ignore_eflags_trace))) { bb->flags |= instr_eflags_to_fragment_eflags(bb->eflags); if (TEST(FRAG_WRITES_EFLAGS_OF, bb->flags)) { LOG(THREAD, LOG_INTERP, 4, "fragment writes OF prior to reading it!\n"); STATS_INC(bbs_eflags_writes_of); } else if (TEST(FRAG_WRITES_EFLAGS_6, bb->flags)) { IF_X86(ASSERT(TEST(FRAG_WRITES_EFLAGS_OF, bb->flags))); LOG(THREAD, LOG_INTERP, 4, "fragment writes all 6 flags prior to reading any\n"); STATS_INC(bbs_eflags_writes_6); } else { DOSTATS({ if (bb->eflags == EFLAGS_READ_ARITH) { /* Reads a flag before writing any. Won't get here if * reads one flag and later writes OF, or writes OF and * later reads one flag before writing that flag. */ STATS_INC(bbs_eflags_reads); } else { STATS_INC(bbs_eflags_writes_none); if (TEST(LINK_INDIRECT, bb->exit_type)) STATS_INC(bbs_eflags_writes_none_ind); } }); } } /* can only have proactive translation info if flag was set from the beginning */ if (TEST(FRAG_HAS_TRANSLATION_INFO, bb->flags) && (!bb->record_translation || !bb->full_decode)) bb->flags &= ~FRAG_HAS_TRANSLATION_INFO; /* if for_cache, caller must clear once done emitting (emitting can deref * app memory so we wait until all done) */ if (!bb_build_nested && !bb->for_cache && my_dcontext != NULL) { ASSERT(my_dcontext->bb_build_info == (void *)bb); my_dcontext->bb_build_info = NULL; } bb->instr = NULL; /* mangle the instruction list */ if (!bb->mangle_ilist) { /* do not mangle! * caller must use full_decode to find invalid instrs and avoid * a discrepancy w/ for_cache case that aborts b/c of selfmod sandbox * returning false (in code below) */ return; } if (!mangle_bb_ilist(dcontext, bb)) { /* have to rebuild bb w/ new bb flags set by mangle_bb_ilist */ build_bb_ilist(dcontext, bb); return; } } /* Call when about to throw exception or other drastic action in the * middle of bb building, in order to free resources */ void bb_build_abort(dcontext_t *dcontext, bool clean_vmarea, bool unlock) { ASSERT(dcontext->bb_build_info != NULL); /* caller should check */ if (dcontext->bb_build_info != NULL) { build_bb_t *bb = (build_bb_t *)dcontext->bb_build_info; /* free instr memory */ if (bb->instr != NULL && bb->ilist != NULL && instrlist_last(bb->ilist) != bb->instr) instr_destroy(dcontext, bb->instr); /* not added to bb->ilist yet */ DODEBUG({ bb->instr = NULL; }); if (bb->ilist != NULL) { instrlist_clear_and_destroy(dcontext, bb->ilist); DODEBUG({ bb->ilist = NULL; }); } if (clean_vmarea) { /* Free the vmlist and any locks held (we could have been in * the middle of check_thread_vm_area and had a decode fault * during code origins checking!) */ check_thread_vm_area_abort(dcontext, &bb->vmlist, bb->flags); } /* else we were presumably called from vmarea so caller does cleanup */ if (unlock) { /* Assumption: bb building lock is held iff bb->for_cache, * and on a nested app bb build where !bb->for_cache we do keep the * original bb info in dcontext (see build_bb_ilist()). */ if (bb->has_bb_building_lock) { ASSERT_OWN_MUTEX(USE_BB_BUILDING_LOCK(), &bb_building_lock); SHARED_BB_UNLOCK(); KSTOP_REWIND(bb_building); } else ASSERT_DO_NOT_OWN_MUTEX(USE_BB_BUILDING_LOCK(), &bb_building_lock); } dcontext->bb_build_info = NULL; } } bool expand_should_set_translation(dcontext_t *dcontext) { if (dcontext->bb_build_info != NULL) { build_bb_t *bb = (build_bb_t *)dcontext->bb_build_info; /* Expanding to a higher level should set the translation to * the raw bytes if we're building a bb where we can assume * the raw byte pointer is the app pc. */ return bb->record_translation; } return false; } /* returns false if need to rebuild bb: in that case this routine will * set the bb flags needed to ensure successful mangling 2nd time around */ static bool mangle_bb_ilist(dcontext_t *dcontext, build_bb_t *bb) { #ifdef X86 if (TEST(FRAG_SELFMOD_SANDBOXED, bb->flags)) { byte *selfmod_start, *selfmod_end; /* sandbox requires that bb have no direct cti followings! * check_thread_vm_area should have ensured this for us */ ASSERT(!TEST(FRAG_HAS_DIRECT_CTI, bb->flags)); LOG(THREAD, LOG_INTERP, 2, "fragment overlaps selfmod area, inserting sandboxing\n"); /* only reason can't be trace is don't have mechanism set up * to store app code for each trace bb and update sandbox code * to point there */ bb->flags |= FRAG_CANNOT_BE_TRACE; if (bb->pretend_pc != NULL) { selfmod_start = bb->pretend_pc; selfmod_end = bb->pretend_pc + (bb->cur_pc - bb->start_pc); } else { selfmod_start = bb->start_pc; selfmod_end = bb->cur_pc; } if (!insert_selfmod_sandbox(dcontext, bb->ilist, bb->flags, selfmod_start, selfmod_end, bb->record_translation, bb->for_cache)) { /* have to rebuild bb using full decode -- it has invalid instrs * in middle, which we don't want to deal w/ for sandboxing! */ ASSERT(!bb->full_decode); /* else, how did we get here??? */ LOG(THREAD, LOG_INTERP, 2, "*** must rebuild bb to avoid invalid instr in middle ***\n"); STATS_INC(num_bb_end_early); instrlist_clear_and_destroy(dcontext, bb->ilist); if (bb->vmlist != NULL) { vm_area_destroy_list(dcontext, bb->vmlist); bb->vmlist = NULL; } bb->flags = FRAG_SELFMOD_SANDBOXED; /* lose all other flags */ bb->full_decode = true; /* full decode this time! */ bb->follow_direct = false; bb->exit_type = 0; /* i#577 */ bb->exit_target = NULL; /* i#928 */ /* overlap info will be reset by check_new_page_start */ return false; } STATS_INC(num_sandboxed_fragments); } #endif /* X86 */ DOLOG(4, LOG_INTERP, { LOG(THREAD, LOG_INTERP, 4, "bb ilist before mangling:\n"); instrlist_disassemble(dcontext, bb->start_pc, bb->ilist, THREAD); }); d_r_mangle(dcontext, bb->ilist, &bb->flags, true, bb->record_translation); DOLOG(4, LOG_INTERP, { LOG(THREAD, LOG_INTERP, 4, "bb ilist after mangling:\n"); instrlist_disassemble(dcontext, bb->start_pc, bb->ilist, THREAD); }); return true; } /* Interprets the application's instructions until the end of a basic * block is found, following all the rules that build_bb_ilist follows * with regard to terminating the block. Does no mangling or anything of * the app code, though -- this routine is intended only for building the * original code! * Caller is responsible for freeing the list and its instrs! * If outf != INVALID_FILE, does full disassembly with comments to outf. */ instrlist_t * build_app_bb_ilist(dcontext_t *dcontext, byte *start_pc, file_t outf) { build_bb_t bb; init_build_bb(&bb, start_pc, false /*not interp*/, false /*not for cache*/, false /*do not mangle*/, false /*no translation*/, outf, 0 /*no pre flags*/, NULL /*no overlap*/); build_bb_ilist(dcontext, &bb); return bb.ilist; } /* Client routine to decode instructions at an arbitrary app address, * following all the rules that DynamoRIO follows internally for * terminating basic blocks. Note that DynamoRIO does not validate * that start_pc is actually the first instruction of a basic block. * \note Caller is reponsible for freeing the list and its instrs! */ instrlist_t * decode_as_bb(void *drcontext, byte *start_pc) { build_bb_t bb; /* Case 10009: When we hook ntdll functions, we hide the jump to * the interception buffer from the client BB callback. If the * client asks to decode that address here, we need to decode the * instructions in the interception buffer instead so that we * again hide our hooking. * We will have the jmp from the buffer back to after the hooked * app code visible to the client (just like it is for the * real bb built there, so at least we're consistent). */ #ifdef WINDOWS byte *real_pc; if (is_intercepted_app_pc((app_pc)start_pc, &real_pc)) start_pc = real_pc; #endif init_build_bb(&bb, start_pc, false /*not interp*/, false /*not for cache*/, false /*do not mangle*/, true, /* translation; xref case 10070 where this * currently turns on full decode; today we * provide no way to turn that off, as IR * expansion routines are not exported (PR 200409). */ INVALID_FILE, 0 /*no pre flags*/, NULL /*no overlap*/); build_bb_ilist((dcontext_t *)drcontext, &bb); return bb.ilist; } /* Client routine to decode a trace. We return the instructions in * the original app code, i.e., no client modifications. */ instrlist_t * decode_trace(void *drcontext, void *tag) { dcontext_t *dcontext = (dcontext_t *)drcontext; fragment_t *frag = fragment_lookup(dcontext, tag); /* We don't support asking about other threads, for synch purposes * (see recreate_fragment_ilist() synch notes) */ if (get_thread_private_dcontext() != dcontext) return NULL; if (frag != NULL && TEST(FRAG_IS_TRACE, frag->flags)) { instrlist_t *ilist; bool alloc_res; /* Support being called from bb/trace hook (couldbelinking) or * from cache clean call (nolinking). We disallow asking about * another thread's private traces. */ if (!is_couldbelinking(dcontext)) d_r_mutex_lock(&thread_initexit_lock); ilist = recreate_fragment_ilist(dcontext, NULL, &frag, &alloc_res, false /*no mangling*/, false /*do not re-call client*/); ASSERT(!alloc_res); if (!is_couldbelinking(dcontext)) d_r_mutex_unlock(&thread_initexit_lock); return ilist; } return NULL; } app_pc find_app_bb_end(dcontext_t *dcontext, byte *start_pc, uint flags) { build_bb_t bb; init_build_bb(&bb, start_pc, false /*not interp*/, false /*not for cache*/, false /*do not mangle*/, false /*no translation*/, INVALID_FILE, flags, NULL /*no overlap*/); build_bb_ilist(dcontext, &bb); instrlist_clear_and_destroy(dcontext, bb.ilist); return bb.end_pc; } bool app_bb_overlaps(dcontext_t *dcontext, byte *start_pc, uint flags, byte *region_start, byte *region_end, overlap_info_t *info_res) { build_bb_t bb; overlap_info_t info; info.region_start = region_start; info.region_end = region_end; init_build_bb(&bb, start_pc, false /*not interp*/, false /*not for cache*/, false /*do not mangle*/, false /*no translation*/, INVALID_FILE, flags, &info); build_bb_ilist(dcontext, &bb); instrlist_clear_and_destroy(dcontext, bb.ilist); info.bb_end = bb.end_pc; if (info_res != NULL) *info_res = info; return info.overlap; } #ifdef DEBUG static void report_native_module(dcontext_t *dcontext, app_pc modpc) { char name[MAX_MODNAME_INTERNAL]; const char *modname = name; if (os_get_module_name_buf(modpc, name, BUFFER_SIZE_ELEMENTS(name)) == 0) { /* for native_exec_callcall we do end up putting DGC on native_exec_list */ ASSERT(DYNAMO_OPTION(native_exec_callcall)); modname = "<DGC>"; } LOG(THREAD, LOG_INTERP | LOG_VMAREAS, 2, "module %s is on native list, executing natively\n", modname); STATS_INC(num_native_module_entrances); SYSLOG_INTERNAL_WARNING_ONCE("module %s set up for native execution", modname); } #endif /* WARNING: breaks all kinds of rules, like ret addr transparency and * assuming app stack and not doing calls out of the cache and not having * control during dll loads, etc... */ static void build_native_exec_bb(dcontext_t *dcontext, build_bb_t *bb) { instr_t *in; opnd_t jmp_tgt; #if defined(X86) && defined(X64) bool reachable = rel32_reachable_from_vmcode(bb->start_pc); #endif DEBUG_DECLARE(bool ok;) /* if we ever protect from simultaneous thread attacks then this will * be a hole -- for now should work, all protected while native until * another thread goes into DR */ /* Create a bb that changes the return address on the app stack such that we * will take control when coming back, and then goes native. * N.B.: we ASSUME we reached this moduled via a call -- * build_basic_block_fragment needs to make sure, since we can't verify here * w/o trying to decode backward from retaddr, and if we're wrong we'll * clobber the stack and never regain control! * We also assume this bb is never reached later through a non-call. */ ASSERT(bb->initialized); ASSERT(bb->app_interp); ASSERT(!bb->record_translation); ASSERT(bb->start_pc != NULL); /* vmlist must start out empty (or N/A). For clients it may have started early. */ ASSERT(bb->vmlist == NULL || !bb->record_vmlist || bb->checked_start_vmarea); if (TEST(FRAG_HAS_TRANSLATION_INFO, bb->flags)) bb->flags &= ~FRAG_HAS_TRANSLATION_INFO; bb->native_exec = true; BBPRINT(bb, IF_DGCDIAG_ELSE(1, 2), "build_native_exec_bb @" PFX "\n", bb->start_pc); DOLOG(2, LOG_INTERP, { dump_mcontext(get_mcontext(dcontext), THREAD, DUMP_NOT_XML); }); if (!bb->checked_start_vmarea) check_new_page_start(dcontext, bb); /* create instrlist after check_new_page_start to avoid memory leak * on unreadable memory * WARNING: do not add any app instructions to this ilist! * If you do you must enable selfmod below. */ bb->ilist = instrlist_create(dcontext); /* FIXME PR 303413: we won't properly translate a fault in our app * stack references here. We mark as our own mangling so we'll at * least return failure from our translate routine. */ instrlist_set_our_mangling(bb->ilist, true); /* get dcontext to xdi, for prot-dcontext, xsi holds upcontext too */ insert_shared_get_dcontext(dcontext, bb->ilist, NULL, true /*save xdi*/); instrlist_append(bb->ilist, instr_create_save_to_dc_via_reg(dcontext, REG_NULL /*default*/, SCRATCH_REG0, SCRATCH_REG0_OFFS)); /* need some cleanup prior to native: turn off asynch, clobber trace, etc. * Now that we have a stack of native retaddrs, we save the app retaddr in C * code. */ if (bb->native_call) { dr_insert_clean_call_ex(dcontext, bb->ilist, NULL, (void *)call_to_native, DR_CLEANCALL_RETURNS_TO_NATIVE, 1, opnd_create_reg(REG_XSP)); } else { if (DYNAMO_OPTION(native_exec_opt)) { insert_return_to_native(dcontext, bb->ilist, NULL, REG_NULL /* default */, SCRATCH_REG0); } else { dr_insert_clean_call_ex(dcontext, bb->ilist, NULL, (void *)return_to_native, DR_CLEANCALL_RETURNS_TO_NATIVE, 0); } } #if defined(X86) && defined(X64) if (!reachable) { /* best to store the target at the end of the bb, to keep it readonly, * but that requires a post-pass to patch its value: since native_exec * is already hacky we just go through TLS and ignore multi-thread selfmod. */ instrlist_append( bb->ilist, INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(SCRATCH_REG0), OPND_CREATE_INTPTR((ptr_int_t)bb->start_pc))); if (X64_CACHE_MODE_DC(dcontext) && !X64_MODE_DC(dcontext) && DYNAMO_OPTION(x86_to_x64_ibl_opt)) { jmp_tgt = opnd_create_reg(REG_R9); } else { jmp_tgt = opnd_create_tls_slot(os_tls_offset(MANGLE_XCX_SPILL_SLOT)); } instrlist_append( bb->ilist, INSTR_CREATE_mov_st(dcontext, jmp_tgt, opnd_create_reg(REG_XAX))); } else #endif { jmp_tgt = opnd_create_pc(bb->start_pc); } instrlist_append(bb->ilist, instr_create_restore_from_dc_via_reg(dcontext, REG_NULL /*default*/, SCRATCH_REG0, SCRATCH_REG0_OFFS)); insert_shared_restore_dcontext_reg(dcontext, bb->ilist, NULL); #ifdef AARCH64 ASSERT_NOT_IMPLEMENTED(false); /* FIXME i#1569 */ #else /* this is the jump to native code */ instrlist_append(bb->ilist, opnd_is_pc(jmp_tgt) ? XINST_CREATE_jump(dcontext, jmp_tgt) : XINST_CREATE_jump_mem(dcontext, jmp_tgt)); #endif /* mark all as do-not-mangle, so selfmod, etc. will leave alone (in absence * of selfmod only really needed for the jmp to native code) */ for (in = instrlist_first(bb->ilist); in != NULL; in = instr_get_next(in)) instr_set_meta(in); /* this is a jump for a dummy exit cti */ instrlist_append(bb->ilist, XINST_CREATE_jump(dcontext, opnd_create_pc(bb->start_pc))); if (DYNAMO_OPTION(shared_bbs) && !TEST(FRAG_TEMP_PRIVATE, bb->flags)) bb->flags |= FRAG_SHARED; /* Can't be coarse-grain since has non-exit cti */ bb->flags &= ~FRAG_COARSE_GRAIN; STATS_INC(coarse_prevent_native_exec); /* We exclude the bb from trace to avoid going native in the process of * building a trace for simplicity. * XXX i#1239: DR needs to be able to unlink native exec gateway bbs for * proper cache consistency and signal handling, in which case we could * use FRAG_MUST_END_TRACE here instead. */ bb->flags |= FRAG_CANNOT_BE_TRACE; /* We support mangling here, though currently we don't need it as we don't * include any app code (although we mark this bb as belonging to the start * pc, so we'll get flushed if this region does), and even if target is * selfmod we're running it natively no matter how it modifies itself. We * only care that transition to target is via a call or call* so we can * clobber the retaddr and regain control, and that no retaddr mangling * happens while native before coming back out. While the former does not * depend on the target at all, unfortunately we cannot verify the latter. */ if (TEST(FRAG_SELFMOD_SANDBOXED, bb->flags)) bb->flags &= ~FRAG_SELFMOD_SANDBOXED; DEBUG_DECLARE(ok =) mangle_bb_ilist(dcontext, bb); ASSERT(ok); #ifdef DEBUG DOLOG(3, LOG_INTERP, { LOG(THREAD, LOG_INTERP, 3, "native_exec_bb @" PFX "\n", bb->start_pc); instrlist_disassemble(dcontext, bb->start_pc, bb->ilist, THREAD); }); #endif } static bool at_native_exec_gateway(dcontext_t *dcontext, app_pc start, bool *is_call _IF_DEBUG(bool xfer_target)) { /* ASSUMPTION: transfer to another module will always be by indirect call * or non-inlined direct call from a fragment that will not be flushed. * For now we will only go native if last_exit was * a call, a true call*, or a PLT-style call,jmp* (and we detect the latter only * if the call is inlined, so if the jmp* table is in a DGC-marked region * or if -no_inline_calls we will miss these: FIXME). * FIXME: what if have PLT-style but no GOT indirection: call,jmp ?!? * * We try to identify funky call* constructions (like * call*,...,jmp* in case 4269) by examining TOS to see whether it's a * retaddr -- we do this if last_exit is a jmp* or is unknown (for the * target_delete ibl path). * * FIXME: we will fail to identify a delay-loaded indirect xfer! * Need to know dynamic link patchup code to look for. * * FIXME: we will fail to take over w/ non-call entrances to a dll, like * NtContinue or direct jmp from DGC. * we could try to take the top-of-stack value and see if it's a retaddr by * decoding the prev instr to see if it's a call. decode backwards may have * issues, and if really want everything will have to do this on every bb, * not just if lastexit is ind xfer. * * We count up easy-to-identify cases we've missed in the DOSTATS below. */ bool native_exec_bb = false; /* We can get here if we start interpreting native modules. */ ASSERT(start != (app_pc)back_from_native && start != (app_pc)native_module_callout && "interpreting return from native module?"); ASSERT(is_call != NULL); *is_call = false; if (DYNAMO_OPTION(native_exec) && !vmvector_empty(native_exec_areas)) { /* do we KNOW that we came from an indirect call? */ if (TEST(LINK_CALL /*includes IND_JMP_PLT*/, dcontext->last_exit->flags) && /* only check direct calls if native_exec_dircalls is on */ (DYNAMO_OPTION(native_exec_dircalls) || LINKSTUB_INDIRECT(dcontext->last_exit->flags))) { STATS_INC(num_native_entrance_checks); /* we do the overlap check last since it's more costly */ if (is_native_pc(start)) { native_exec_bb = true; *is_call = true; DOSTATS({ if (EXIT_IS_CALL(dcontext->last_exit->flags)) { if (LINKSTUB_INDIRECT(dcontext->last_exit->flags)) STATS_INC(num_native_module_entrances_indcall); else STATS_INC(num_native_module_entrances_call); } else STATS_INC(num_native_module_entrances_plt); }); } } /* can we GUESS that we came from an indirect call? */ else if (DYNAMO_OPTION(native_exec_guess_calls) && (/* FIXME: require jmp* be in separate module? */ (LINKSTUB_INDIRECT(dcontext->last_exit->flags) && EXIT_IS_JMP(dcontext->last_exit->flags)) || LINKSTUB_FAKE(dcontext->last_exit))) { /* if unknown last exit, or last exit was jmp*, examine TOS and guess * whether it's a retaddr */ app_pc *tos = (app_pc *)get_mcontext(dcontext)->xsp; STATS_INC(num_native_entrance_TOS_checks); /* vector check cheaper than is_readable syscall, etc. so do it before them, * but after last_exit checks above since overlap is more costly */ if (is_native_pc(start) && is_readable_without_exception((app_pc)tos, sizeof(app_pc))) { enum { MAX_CALL_CONSIDER = 6 /* ignore prefixes */ }; app_pc retaddr = *tos; LOG(THREAD, LOG_INTERP | LOG_VMAREAS, 2, "at native_exec target: checking TOS " PFX " => " PFX " for retaddr\n", tos, retaddr); #ifdef RETURN_AFTER_CALL if (DYNAMO_OPTION(ret_after_call)) { native_exec_bb = is_observed_call_site(dcontext, retaddr); *is_call = true; LOG(THREAD, LOG_INTERP | LOG_VMAREAS, 2, "native_exec: *TOS is %sa call site in ret-after-call table\n", native_exec_bb ? "" : "NOT "); } else { #endif /* try to decode backward -- make sure readable for decoding */ if (is_readable_without_exception(retaddr - MAX_CALL_CONSIDER, MAX_CALL_CONSIDER + MAX_INSTR_LENGTH)) { /* ind calls have variable length and form so we decode * each byte rather than searching for ff and guessing length */ app_pc pc, next_pc; instr_t instr; instr_init(dcontext, &instr); for (pc = retaddr - MAX_CALL_CONSIDER; pc < retaddr; pc++) { LOG(THREAD, LOG_INTERP | LOG_VMAREAS, 3, "native_exec: decoding @" PFX " looking for call\n", pc); instr_reset(dcontext, &instr); next_pc = IF_AARCH64_ELSE(decode_cti_with_ldstex, decode_cti)(dcontext, pc, &instr); STATS_INC(num_native_entrance_TOS_decodes); if (next_pc == retaddr && instr_is_call(&instr)) { native_exec_bb = true; *is_call = true; LOG(THREAD, LOG_INTERP | LOG_VMAREAS, 2, "native_exec: found call @ pre-*TOS " PFX "\n", pc); break; } } instr_free(dcontext, &instr); } #ifdef RETURN_AFTER_CALL } #endif DOSTATS({ if (native_exec_bb) { if (LINKSTUB_FAKE(dcontext->last_exit)) STATS_INC(num_native_module_entrances_TOS_unknown); else STATS_INC(num_native_module_entrances_TOS_jmp); } }); } } /* i#2381: Only now can we check things that might preempt the * "guess" code above. */ /* Is this a return from a non-native module into a native module? */ if (!native_exec_bb && DYNAMO_OPTION(native_exec_retakeover) && LINKSTUB_INDIRECT(dcontext->last_exit->flags) && TEST(LINK_RETURN, dcontext->last_exit->flags)) { if (is_native_pc(start)) { /* XXX: check that this is the return address of a known native * callsite where we took over on a module transition. */ STATS_INC(num_native_module_entrances_ret); native_exec_bb = true; *is_call = false; } } #ifdef UNIX /* Is this the entry point of a native ELF executable? The entry point * (usually _start) cannot return as there is no retaddr. */ else if (!native_exec_bb && DYNAMO_OPTION(native_exec_retakeover) && LINKSTUB_INDIRECT(dcontext->last_exit->flags) && start == get_image_entry()) { if (is_native_pc(start)) { native_exec_bb = true; *is_call = false; } } #endif DOSTATS({ /* did we reach a native dll w/o going through an ind call caught above? */ if (!xfer_target /* else we'll re-check at the target itself */ && !native_exec_bb && is_native_pc(start)) { LOG(THREAD, LOG_INTERP | LOG_VMAREAS, 2, "WARNING: pc " PFX " is on native list but reached bypassing " "gateway!\n", start); STATS_INC(num_native_entrance_miss); /* do-once since once get into dll past gateway may xfer * through a bunch of lastexit-null or indjmp to same dll */ ASSERT_CURIOSITY_ONCE(false && "inside native_exec dll"); } }); } return native_exec_bb; } /* Use when calling build_bb_ilist with for_cache = true. * Must hold bb_building_lock. */ static inline void init_interp_build_bb(dcontext_t *dcontext, build_bb_t *bb, app_pc start, uint initial_flags, bool for_trace, instrlist_t **unmangled_ilist) { ASSERT_OWN_MUTEX(USE_BB_BUILDING_LOCK() && !TEST(FRAG_TEMP_PRIVATE, initial_flags), &bb_building_lock); /* We need to set up for abort prior to native exec and other checks * that can crash */ ASSERT(dcontext->bb_build_info == NULL); /* This won't make us be nested b/c for bb.for_cache caller is supposed * to set this up */ dcontext->bb_build_info = (void *)bb; init_build_bb( bb, start, true /*real interp*/, true /*for cache*/, true /*mangle*/, false /* translation: set below for clients */, INVALID_FILE, initial_flags | (INTERNAL_OPTION(store_translations) ? FRAG_HAS_TRANSLATION_INFO : 0), NULL /*no overlap*/); if (!TEST(FRAG_TEMP_PRIVATE, initial_flags)) bb->has_bb_building_lock = true; /* We avoid races where there is no hook when we start building a * bb (and hence we don't record translation or do full decode) yet * a hook when we're ready to call one by storing whether there is a * hook at translation/decode decision time: now. */ if (dr_bb_hook_exists()) { /* i#805: Don't instrument code on the null instru list. * Because the module load event is now on 1st exec, we need to trigger * it now so the client can adjust the null instru list: */ check_new_page_start(dcontext, bb); bb->checked_start_vmarea = true; if (!os_module_get_flag(bb->start_pc, MODULE_NULL_INSTRUMENT)) bb->pass_to_client = true; } /* PR 299808: even if no bb hook, for a trace hook we need to * record translation and do full decode. It's racy to check * dr_trace_hook_exists() here so we rely on trace building having * set unmangled_ilist. */ if (bb->pass_to_client || unmangled_ilist != NULL) { /* case 10009/214444: For client interface builds, store the translation. * by default. This ensures clients can get the correct app address * of any instruction. We also rely on this for allowing the client * to return DR_EMIT_STORE_TRANSLATIONS and setting the * FRAG_HAS_TRANSLATION_INFO flag after decoding the app code. * * FIXME: xref case 10070/214505. Currently this means that all * instructions are fully decoded for client interface builds. */ bb->record_translation = true; /* PR 200409: If a bb hook exists, we always do a full decode. * Note that we currently do this anyway to get * translation fields, but once we fix case 10070 it * won't be that way. * We do not let the client turn this off (the runtime * option is not dynamic, and off by default anyway), as we * do not export level-handling instr_t routines like *_expand * for walking instrlists and instr_decode(). */ bb->full_decode = !INTERNAL_OPTION(fast_client_decode); /* PR 299808: we give client chance to re-add instrumentation */ bb->for_trace = for_trace; } /* we need to clone the ilist pre-mangling */ bb->unmangled_ilist = unmangled_ilist; } static inline void exit_interp_build_bb(dcontext_t *dcontext, build_bb_t *bb) { ASSERT(dcontext->bb_build_info == (void *)bb); /* Caller's responsibility to clean up since bb.for_cache */ dcontext->bb_build_info = NULL; /* free the instrlist_t elements */ instrlist_clear_and_destroy(dcontext, bb->ilist); } /* Interprets the application's instructions until the end of a basic * block is found, and then creates a fragment for the basic block. * DOES NOT look in the hashtable to see if such a fragment already exists! */ fragment_t * build_basic_block_fragment(dcontext_t *dcontext, app_pc start, uint initial_flags, bool link, bool visible, bool for_trace, instrlist_t **unmangled_ilist) { fragment_t *f; build_bb_t bb; dr_where_am_i_t wherewasi = dcontext->whereami; bool image_entry; KSTART(bb_building); dcontext->whereami = DR_WHERE_INTERP; /* Neither thin_client nor hotp_only should be building any bbs. */ ASSERT(!RUNNING_WITHOUT_CODE_CACHE()); /* ASSUMPTION: image entry is reached via indirect transfer and * so will be the start of a bb */ image_entry = check_for_image_entry(start); init_interp_build_bb(dcontext, &bb, start, initial_flags, for_trace, unmangled_ilist); if (at_native_exec_gateway(dcontext, start, &bb.native_call _IF_DEBUG(false /*not xfer tgt*/))) { DODEBUG({ report_native_module(dcontext, bb.start_pc); }); /* PR 232617 - build_native_exec_bb doesn't support setting translation * info, but it also doesn't pass the built bb to the client (it * contains no app code) so we don't need it. */ bb.record_translation = false; build_native_exec_bb(dcontext, &bb); } else { build_bb_ilist(dcontext, &bb); if (dcontext->bb_build_info == NULL) { /* going native */ f = NULL; goto build_basic_block_fragment_done; } if (bb.native_exec) { /* change bb to be a native_exec gateway */ bool is_call = bb.native_call; LOG(THREAD, LOG_INTERP, 2, "replacing built bb with native_exec bb\n"); instrlist_clear_and_destroy(dcontext, bb.ilist); vm_area_destroy_list(dcontext, bb.vmlist); dcontext->bb_build_info = NULL; init_interp_build_bb(dcontext, &bb, start, initial_flags, for_trace, unmangled_ilist); /* PR 232617 - build_native_exec_bb doesn't support setting * translation info, but it also doesn't pass the built bb to the * client (it contains no app code) so we don't need it. */ bb.record_translation = false; bb.native_call = is_call; build_native_exec_bb(dcontext, &bb); } } /* case 9652: we do not want to persist the image entry point, so we keep * it fine-grained */ if (image_entry) bb.flags &= ~FRAG_COARSE_GRAIN; if (DYNAMO_OPTION(opt_jit) && visible && is_jit_managed_area(bb.start_pc)) { ASSERT(bb.overlap_info == NULL || bb.overlap_info->contiguous); jitopt_add_dgc_bb(bb.start_pc, bb.end_pc, TEST(FRAG_IS_TRACE_HEAD, bb.flags)); } /* emit fragment into fcache */ KSTART(bb_emit); f = emit_fragment_ex(dcontext, start, bb.ilist, bb.flags, bb.vmlist, link, visible); KSTOP(bb_emit); #ifdef CUSTOM_TRACES_RET_REMOVAL f->num_calls = dcontext->num_calls; f->num_rets = dcontext->num_rets; #endif #ifdef DGC_DIAGNOSTICS if ((f->flags & FRAG_DYNGEN)) { LOG(THREAD, LOG_INTERP, 1, "new bb is DGC:\n"); DOLOG(1, LOG_INTERP, { disassemble_app_bb(dcontext, start, THREAD); }); DOLOG(3, LOG_INTERP, { disassemble_fragment(dcontext, f, false); }); } #endif DOLOG(2, LOG_INTERP, { disassemble_fragment(dcontext, f, d_r_stats->loglevel <= 3); }); DOLOG(4, LOG_INTERP, { if (TEST(FRAG_SELFMOD_SANDBOXED, f->flags)) { LOG(THREAD, LOG_INTERP, 4, "\nXXXX sandboxed fragment! original code:\n"); disassemble_app_bb(dcontext, f->tag, THREAD); LOG(THREAD, LOG_INTERP, 4, "code cache code:\n"); disassemble_fragment(dcontext, f, false); } }); if (INTERNAL_OPTION(bbdump_tags)) { disassemble_fragment_header(dcontext, f, bbdump_file); } #ifdef INTERNAL DODEBUG({ if (INTERNAL_OPTION(stress_recreate_pc)) { /* verify recreation */ stress_test_recreate(dcontext, f, bb.ilist); } }); #endif exit_interp_build_bb(dcontext, &bb); build_basic_block_fragment_done: dcontext->whereami = wherewasi; KSTOP(bb_building); return f; } /* Builds an instrlist_t as though building a bb from pretend_pc, but decodes * from pc. * Use recreate_fragment_ilist() for building an instrlist_t for a fragment. * If check_vm_area is false, Does NOT call check_thread_vm_area()! * Make sure you know it will terminate at the right spot. It does * check selfmod and native_exec for elision, but otherwise will * follow ubrs to the limit. Currently used for * record_translation_info() (case 3559). * If vmlist!=NULL and check_vm_area, returns the vmlist, which the * caller must free by calling vm_area_destroy_list. */ instrlist_t * recreate_bb_ilist(dcontext_t *dcontext, byte *pc, byte *pretend_pc, app_pc stop_pc, uint flags, uint *res_flags OUT, uint *res_exit_type OUT, bool check_vm_area, bool mangle, void **vmlist_out OUT, bool call_client, bool for_trace) { build_bb_t bb; /* don't know full range -- just do simple check now */ if (!is_readable_without_exception(pc, 4)) { LOG(THREAD, LOG_INTERP, 3, "recreate_bb_ilist: cannot read memory at " PFX "\n", pc); return NULL; } LOG(THREAD, LOG_INTERP, 3, "\nbuilding bb instrlist now *********************\n"); init_build_bb(&bb, pc, false /*not interp*/, false /*not for cache*/, mangle, true /*translation*/, INVALID_FILE, flags, NULL /*no overlap*/); /* We support a stop pc to ensure selfmod matches how it was originally built, * w/o having to include the next instr which might have triggered the bb * termination but not been included in the bb (i#1441). * It only applies to full_decode. */ bb.stop_pc = stop_pc; bb.check_vm_area = check_vm_area; if (check_vm_area && vmlist_out != NULL) bb.record_vmlist = true; if (check_vm_area && !bb.record_vmlist) bb.record_vmlist = true; /* for xl8 region checks */ /* PR 214962: we call bb hook again, unless the client told us * DR_EMIT_STORE_TRANSLATIONS, in which case we shouldn't come here, * except for traces (see below): */ bb.pass_to_client = (DYNAMO_OPTION(code_api) && call_client && /* i#843: This flag cannot be changed dynamically, so * its current value should match the value used at * ilist building time. Alternatively, we could store * bb->pass_to_client in the fragment. */ !os_module_get_flag(pc, MODULE_NULL_INSTRUMENT)); /* PR 299808: we call bb hook again when translating a trace that * didn't have DR_EMIT_STORE_TRANSLATIONS on itself (or on any * for_trace bb if there was no trace hook). */ bb.for_trace = for_trace; /* instrument_basic_block, called by build_bb_ilist, verifies that all * non-meta instrs have translation fields */ if (pretend_pc != pc) bb.pretend_pc = pretend_pc; build_bb_ilist(dcontext, &bb); LOG(THREAD, LOG_INTERP, 3, "\ndone building bb instrlist *********************\n\n"); if (res_flags != NULL) *res_flags = bb.flags; if (res_exit_type != NULL) *res_exit_type = bb.exit_type; if (check_vm_area && vmlist_out != NULL) *vmlist_out = bb.vmlist; else if (bb.record_vmlist) vm_area_destroy_list(dcontext, bb.vmlist); return bb.ilist; } /* Re-creates an ilist of the fragment that currently contains the * passed-in code cache pc, also returns the fragment. * * Exactly one of pc and (f_res or *f_res) must be NULL: * If pc==NULL, assumes that *f_res is the fragment to use; * else, looks up the fragment, allocating it if necessary. * If f_res!=NULL, the fragment is returned and whether it was allocated * is returned in the alloc_res param. * If f_res==NULL, if the fragment was allocated it is freed here. * * NOTE : does not add prefix instructions to the created ilist, if we change * this to add them be sure to check recreate_app_* for compatibility (for ex. * adding them and setting their translation to pc would break current * implementation, also setting translation to NULL would trigger an assert) * * Returns NULL if unable to recreate the fragment ilist (fragment not found * or fragment is pending deletion and app memory might have changed). * In that case f_res is still pointed at the fragment if it was found, and * alloc is valid. * * For proper synchronization : * If caller is the dcontext's owner then needs to be couldbelinking, otherwise * the dcontext's owner should be suspended and the callers should own the * thread_initexit_lock */ instrlist_t * recreate_fragment_ilist(dcontext_t *dcontext, byte *pc, /*IN/OUT*/ fragment_t **f_res, /*OUT*/ bool *alloc_res, bool mangle, bool call_client) { fragment_t *f; uint flags = 0; instrlist_t *ilist; bool alloc = false, ok; monitor_data_t md = { 0, }; dr_isa_mode_t old_mode = DEFAULT_ISA_MODE; /* check synchronization, we need to make sure no one flushes the * fragment we just looked up while we are recreating it, if it's the * caller's dcontext then just need to be couldbelinking, otherwise need * the thread_initexit_lock since then we are looking up in someone else's * table (the dcontext's owning thread would also need to be suspended) */ ASSERT((dcontext != GLOBAL_DCONTEXT && d_r_get_thread_id() == dcontext->owning_thread && is_couldbelinking(dcontext)) || (ASSERT_OWN_MUTEX(true, &thread_initexit_lock), true)); STATS_INC(num_recreated_fragments); if (pc == NULL) { ASSERT(f_res != NULL && *f_res != NULL); f = *f_res; } else { /* Ensure callers don't give us both valid f and valid pc */ ASSERT(f_res == NULL || *f_res == NULL); LOG(THREAD, LOG_INTERP, 3, "recreate_fragment_ilist: looking up pc " PFX "\n", pc); f = fragment_pclookup_with_linkstubs(dcontext, pc, &alloc); LOG(THREAD, LOG_INTERP, 3, "\tfound F%d\n", f == NULL ? -1 : f->id); if (f_res != NULL) *f_res = f; /* ref case 3559, others, we won't be able to reliably recreate if * target is pending flush, original memory might no longer be there or * the memory might have changed. caller should use the stored * translation info instead. */ if (f == NULL || TEST(FRAG_WAS_DELETED, f->flags)) { ASSERT(f != NULL || !alloc); /* alloc shouldn't be set if no f */ ilist = NULL; goto recreate_fragment_done; } } /* Recreate in same mode as original fragment */ ok = dr_set_isa_mode(dcontext, FRAG_ISA_MODE(f->flags), &old_mode); ASSERT(ok); if ((f->flags & FRAG_IS_TRACE) == 0) { /* easy case: just a bb */ ilist = recreate_bb_ilist(dcontext, (byte *)f->tag, (byte *)f->tag, NULL /*default stop*/, 0 /*no pre flags*/, &flags, NULL, true /*check vm area*/, mangle, NULL, call_client, false /*not for_trace*/); ASSERT(ilist != NULL); if (ilist == NULL) /* a race */ goto recreate_fragment_done; if (PAD_FRAGMENT_JMPS(f->flags)) nop_pad_ilist(dcontext, f, ilist, false /* set translation */); goto recreate_fragment_done; } else { /* build trace up one bb at a time */ instrlist_t *bb; byte *apc; trace_only_t *t = TRACE_FIELDS(f); uint i; instr_t *last; bool mangle_at_end = mangle_trace_at_end(); if (mangle_at_end) { /* we need an md for mangle_trace */ md.trace_tag = f->tag; /* be sure we ask for translation fields */ md.trace_flags = f->flags | FRAG_HAS_TRANSLATION_INFO; md.num_blks = t->num_bbs; md.blk_info = (trace_bb_build_t *)HEAP_ARRAY_ALLOC( dcontext, trace_bb_build_t, md.num_blks, ACCT_TRACE, true); md.pass_to_client = true; } ilist = instrlist_create(dcontext); STATS_INC(num_recreated_traces); ASSERT(t->bbs != NULL); for (i = 0; i < t->num_bbs; i++) { void *vmlist = NULL; apc = (byte *)t->bbs[i].tag; bb = recreate_bb_ilist( dcontext, apc, apc, NULL /*default stop*/, 0 /*no pre flags*/, &flags, &md.final_exit_flags, true /*check vm area*/, !mangle_at_end, (mangle_at_end ? &vmlist : NULL), call_client, true /*for_trace*/); ASSERT(bb != NULL); if (bb == NULL) { instrlist_clear_and_destroy(dcontext, ilist); vm_area_destroy_list(dcontext, vmlist); ilist = NULL; goto recreate_fragment_done; } if (mangle_at_end) md.blk_info[i].info = t->bbs[i]; last = instrlist_last(bb); ASSERT(last != NULL); if (mangle_at_end) { md.blk_info[i].vmlist = vmlist; md.blk_info[i].final_cti = instr_is_cti(instrlist_last(bb)); } /* PR 299808: we need to duplicate what we did when we built the trace. * While if there's no client trace hook we could mangle and fixup as we * go, for simplicity we mangle at the end either way (in either case our * code here is not exactly what we did when we made it anyway) * PR 333597: we can't use mangle_trace if we have elision on. */ if (mangle && !mangle_at_end) { /* To duplicate the trace-building logic: * - call fixup_last_cti() * - retarget the ibl routine just like extend_trace() does */ app_pc target = (last != NULL) ? opnd_get_pc(instr_get_target(last)) : NULL; /* FIXME: is it always safe */ /* convert a basic block IBL, and retarget it to IBL_TRACE* */ if (target != NULL && is_indirect_branch_lookup_routine(dcontext, target)) { target = get_alternate_ibl_routine(dcontext, target, f->flags); ASSERT(target != NULL); LOG(THREAD, LOG_MONITOR, 3, "recreate_fragment_ilist: replacing ibl_routine to target=" PFX "\n", target); instr_set_target(last, opnd_create_pc(target)); instr_set_our_mangling(last, true); /* undone by target set */ } if (DYNAMO_OPTION(pad_jmps) && !INTERNAL_OPTION(pad_jmps_shift_bb)) { /* FIXME - hack, but pad_jmps_shift_bb will be on by * default. Synchronize changes here with recreate_fragment_ilist. * This hack is protected by asserts in nop_pad_ilist() (that * we never add nops to a bb if -pad_jmps_shift_bb) and in * extend_trace_pad_bytes (that we only add bbs to traces). */ /* FIXME - on linux the signal fence exit can trigger the * protective assert in nop_pad_ilist() */ remove_nops_from_ilist(dcontext, bb _IF_DEBUG(true)); } if (instrlist_last(ilist) != NULL) { fixup_last_cti(dcontext, ilist, (app_pc)apc, flags, f->flags, NULL, NULL, true /* record translation */, NULL, NULL, NULL); } } instrlist_append(ilist, instrlist_first(bb)); instrlist_init(bb); /* to clear fields to make destroy happy */ instrlist_destroy(dcontext, bb); } /* PR 214962: re-apply client changes, this time storing translation * info for modified instrs */ if (call_client) /* else it's decode_trace() who is not really recreating */ instrument_trace(dcontext, f->tag, ilist, true); /* instrument_trace checks that all non-meta instrs have translation fields */ if (mangle) { if (mangle_at_end) { if (!mangle_trace(dcontext, ilist, &md)) { instrlist_clear_and_destroy(dcontext, ilist); ilist = NULL; goto recreate_fragment_done; } } /* else we mangled one bb at a time up above */ #ifdef INTERNAL /* we only optimize traces */ if (dynamo_options.optimize) { /* re-apply all optimizations to ilist * assumption: all optimizations are deterministic and stateless, * so we can exactly replicate their results */ LOG(THREAD_GET, LOG_INTERP, 2, "\tre-applying optimizations to F%d\n", f->id); # ifdef SIDELINE if (dynamo_options.sideline) { if (!TEST(FRAG_DO_NOT_SIDELINE, f->flags)) optimize_trace(dcontext, f->tag, ilist); /* else, never optimized */ } else # endif optimize_trace(dcontext, f->tag, ilist); } #endif /* FIXME: case 4718 append_trace_speculate_last_ibl(true) * should be called as well */ if (PAD_FRAGMENT_JMPS(f->flags)) nop_pad_ilist(dcontext, f, ilist, false /* set translation */); } } recreate_fragment_done: if (md.blk_info != NULL) { uint i; for (i = 0; i < md.num_blks; i++) { vm_area_destroy_list(dcontext, md.blk_info[i].vmlist); md.blk_info[i].vmlist = NULL; } HEAP_ARRAY_FREE(dcontext, md.blk_info, trace_bb_build_t, md.num_blks, ACCT_TRACE, true); } if (alloc_res != NULL) *alloc_res = alloc; if (f_res == NULL && alloc) fragment_free(dcontext, f); ok = dr_set_isa_mode(dcontext, old_mode, NULL); ASSERT(ok); return ilist; } /*** TRACE BUILDING ROUTINES *****************************************************/ static void process_nops_for_trace(dcontext_t *dcontext, instrlist_t *ilist, uint flags _IF_DEBUG(bool recreating)) { if (PAD_FRAGMENT_JMPS(flags) && !INTERNAL_OPTION(pad_jmps_shift_bb)) { /* FIXME - hack, but pad_jmps_shift_bb will be on by * default. Synchronize changes here with recreate_fragment_ilist. * This hack is protected by asserts in nop_pad_ilist() (that * we never add nops to a bb if -pad_jmps_shift_bb) and in * extend_trace_pad_bytes (that we only add bbs to traces). */ /* FIXME - on linux the signal fence exit can trigger the * protective assert in nop_pad_ilist() */ remove_nops_from_ilist(dcontext, ilist _IF_DEBUG(recreating)); } } /* Combines instrlist_preinsert to ilist and the size calculation of the addition */ static inline int tracelist_add(dcontext_t *dcontext, instrlist_t *ilist, instr_t *where, instr_t *inst) { /* when we emit the trace we're going to call instr_length() on all instrs * anyway, and we'll re-use any memory allocated here for an encoding */ int size; #if defined(X86) && defined(X64) if (!X64_CACHE_MODE_DC(dcontext)) { instr_set_x86_mode(inst, true /*x86*/); instr_shrink_to_32_bits(inst); } #endif size = instr_length(dcontext, inst); instrlist_preinsert(ilist, where, inst); return size; } /* FIXME i#1668, i#2974: NYI on ARM/AArch64 */ #ifdef X86 /* Combines instrlist_postinsert to ilist and the size calculation of the addition */ static inline int tracelist_add_after(dcontext_t *dcontext, instrlist_t *ilist, instr_t *where, instr_t *inst) { /* when we emit the trace we're going to call instr_length() on all instrs * anyway, and we'll re-use any memory allocated here for an encoding */ int size; # ifdef X64 if (!X64_CACHE_MODE_DC(dcontext)) { instr_set_x86_mode(inst, true /*x86*/); instr_shrink_to_32_bits(inst); } # endif size = instr_length(dcontext, inst); instrlist_postinsert(ilist, where, inst); return size; } #endif /* X86 */ #ifdef HASHTABLE_STATISTICS /* increments a given counter - assuming XCX/R2 is dead */ int insert_increment_stat_counter(dcontext_t *dcontext, instrlist_t *trace, instr_t *next, uint *counter_address) { int added_size = 0; /* incrementing a branch-type specific thread private counter */ opnd_t private_branchtype_counter = OPND_CREATE_ABSMEM(counter_address, OPSZ_4); /* using LEA to avoid clobbering eflags in a simple load-increment-store */ /*>>> movl counter, %ecx */ /*>>> lea 1(%ecx), %ecx */ /*>>> movl %ecx, counter */ /* x64: the counter is still 32 bits */ added_size += tracelist_add(dcontext, trace, next, XINST_CREATE_load(dcontext, opnd_create_reg(SCRATCH_REG2), private_branchtype_counter)); added_size += tracelist_add( dcontext, trace, next, XINST_CREATE_add(dcontext, opnd_create_reg(SCRATCH_REG2), OPND_CREATE_INT8(1))); added_size += tracelist_add(dcontext, trace, next, XINST_CREATE_store(dcontext, private_branchtype_counter, opnd_create_reg(SCRATCH_REG2))); return added_size; } #endif /* HASHTABLE_STATISTICS */ /* inserts proper instruction(s) to restore XCX spilled on indirect branch mangling * assumes target instrlist is a trace! * returns size to be added to trace */ static inline int insert_restore_spilled_xcx(dcontext_t *dcontext, instrlist_t *trace, instr_t *next) { int added_size = 0; if (DYNAMO_OPTION(private_ib_in_tls)) { #ifdef X86 if (X64_CACHE_MODE_DC(dcontext) && !X64_MODE_DC(dcontext) && IF_X64_ELSE(DYNAMO_OPTION(x86_to_x64_ibl_opt), false)) { added_size += tracelist_add(dcontext, trace, next, INSTR_CREATE_mov_ld(dcontext, opnd_create_reg(REG_XCX), opnd_create_reg(REG_R9))); } else #endif { added_size += tracelist_add( dcontext, trace, next, XINST_CREATE_load( dcontext, opnd_create_reg(SCRATCH_REG2), opnd_create_tls_slot(os_tls_offset(MANGLE_XCX_SPILL_SLOT)))); } } else { /* We need to restore XCX from TLS for shared fragments, but from * mcontext for private fragments, and all traces are private */ added_size += tracelist_add(dcontext, trace, next, instr_create_restore_from_dcontext( dcontext, SCRATCH_REG2, SCRATCH_REG2_OFFS)); } return added_size; } bool instr_is_trace_cmp(dcontext_t *dcontext, instr_t *inst) { if (!instr_is_our_mangling(inst)) return false; #ifdef X86 return # ifdef X64 instr_get_opcode(inst) == OP_mov_imm || /* mov %rax -> xbx-tls-spill-slot */ instr_get_opcode(inst) == OP_mov_st || instr_get_opcode(inst) == OP_lahf || instr_get_opcode(inst) == OP_seto || instr_get_opcode(inst) == OP_cmp || instr_get_opcode(inst) == OP_jnz || instr_get_opcode(inst) == OP_add || instr_get_opcode(inst) == OP_sahf # else instr_get_opcode(inst) == OP_lea || instr_get_opcode(inst) == OP_jecxz || instr_get_opcode(inst) == OP_jmp # endif ; #elif defined(AARCHXX) /* FIXME i#1668, i#2974: NYI on ARM/AArch64 */ ASSERT_NOT_IMPLEMENTED(DYNAMO_OPTION(disable_traces)); return false; #endif } /* 32-bit only: inserts a comparison to speculative_tag with no side effect and * if value is matched continue target is assumed to be immediately * after targeter (which must be < 127 bytes away). * returns size to be added to trace */ static int insert_transparent_comparison(dcontext_t *dcontext, instrlist_t *trace, instr_t *targeter, /* exit CTI */ app_pc speculative_tag) { int added_size = 0; #ifdef X86 instr_t *jecxz; instr_t *continue_label = INSTR_CREATE_label(dcontext); /* instead of: * cmp ecx,const * we use: * lea -const(ecx) -> ecx * jecxz continue * lea const(ecx) -> ecx * jmp exit # usual targeter for stay on trace comparison * continue: # if match, we target post-targeter * * we have to use the landing pad b/c we don't know whether the * stub will be <128 away */ /* lea requires OPSZ_lea operand */ added_size += tracelist_add( dcontext, trace, targeter, INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_ECX), opnd_create_base_disp(REG_ECX, REG_NULL, 0, -((int)(ptr_int_t)speculative_tag), OPSZ_lea))); jecxz = INSTR_CREATE_jecxz(dcontext, opnd_create_instr(continue_label)); /* do not treat jecxz as exit cti! */ instr_set_meta(jecxz); added_size += tracelist_add(dcontext, trace, targeter, jecxz); /* need to recover address in ecx */ IF_X64(ASSERT_NOT_IMPLEMENTED(!X64_MODE_DC(dcontext))); added_size += tracelist_add(dcontext, trace, targeter, INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_ECX), opnd_create_base_disp( REG_ECX, REG_NULL, 0, ((int)(ptr_int_t)speculative_tag), OPSZ_lea))); added_size += tracelist_add_after(dcontext, trace, targeter, continue_label); #elif defined(ARM) /* FIXME i#1551: NYI on ARM */ ASSERT_NOT_IMPLEMENTED(false); #endif return added_size; } #if defined(X86) && defined(X64) static int mangle_x64_ib_in_trace(dcontext_t *dcontext, instrlist_t *trace, instr_t *targeter, app_pc next_tag) { int added_size = 0; if (X64_MODE_DC(dcontext) || !DYNAMO_OPTION(x86_to_x64_ibl_opt)) { added_size += tracelist_add( dcontext, trace, targeter, INSTR_CREATE_mov_st( dcontext, opnd_create_tls_slot(os_tls_offset(PREFIX_XAX_SPILL_SLOT)), opnd_create_reg(REG_XAX))); added_size += tracelist_add(dcontext, trace, targeter, INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_XAX), OPND_CREATE_INTPTR((ptr_int_t)next_tag))); } else { ASSERT(X64_CACHE_MODE_DC(dcontext)); added_size += tracelist_add(dcontext, trace, targeter, INSTR_CREATE_mov_ld(dcontext, opnd_create_reg(REG_R8), opnd_create_reg(REG_XAX))); added_size += tracelist_add(dcontext, trace, targeter, INSTR_CREATE_mov_imm(dcontext, opnd_create_reg(REG_R10), OPND_CREATE_INTPTR((ptr_int_t)next_tag))); } /* saving in the trace and restoring in ibl means that * -unsafe_ignore_eflags_{trace,ibl} must be equivalent */ if (!INTERNAL_OPTION(unsafe_ignore_eflags_trace)) { if (X64_MODE_DC(dcontext) || !DYNAMO_OPTION(x86_to_x64_ibl_opt)) { added_size += tracelist_add( dcontext, trace, targeter, INSTR_CREATE_mov_st( dcontext, opnd_create_tls_slot(os_tls_offset(INDIRECT_STUB_SPILL_SLOT)), opnd_create_reg(REG_XAX))); } /* FIXME: share w/ insert_save_eflags() */ added_size += tracelist_add(dcontext, trace, targeter, INSTR_CREATE_lahf(dcontext)); if (!INTERNAL_OPTION(unsafe_ignore_overflow)) { /* OF needs saving */ /* Move OF flags into the OF flag spill slot. */ added_size += tracelist_add( dcontext, trace, targeter, INSTR_CREATE_setcc(dcontext, OP_seto, opnd_create_reg(REG_AL))); } if (X64_MODE_DC(dcontext) || !DYNAMO_OPTION(x86_to_x64_ibl_opt)) { added_size += tracelist_add( dcontext, trace, targeter, INSTR_CREATE_cmp( dcontext, opnd_create_reg(REG_XCX), opnd_create_tls_slot(os_tls_offset(INDIRECT_STUB_SPILL_SLOT)))); } else { added_size += tracelist_add(dcontext, trace, targeter, INSTR_CREATE_cmp(dcontext, opnd_create_reg(REG_XCX), opnd_create_reg(REG_R10))); } } else { added_size += tracelist_add( dcontext, trace, targeter, INSTR_CREATE_cmp(dcontext, opnd_create_reg(REG_XCX), (X64_MODE_DC(dcontext) || !DYNAMO_OPTION(x86_to_x64_ibl_opt)) ? opnd_create_reg(REG_XAX) : opnd_create_reg(REG_R10))); } /* change jmp into jne to trace cmp entry of ibl routine (special entry * that is after the eflags save) */ instr_set_opcode(targeter, OP_jnz); added_size++; /* jcc is 6 bytes, jmp is 5 bytes */ ASSERT(opnd_is_pc(instr_get_target(targeter))); instr_set_target(targeter, opnd_create_pc(get_trace_cmp_entry( dcontext, opnd_get_pc(instr_get_target(targeter))))); /* since the target gets lost we need to OR in this flag */ instr_exit_branch_set_type(targeter, instr_exit_branch_type(targeter) | INSTR_TRACE_CMP_EXIT); return added_size; } #endif /* Mangles an indirect branch in a trace where a basic block with tag "tag" * is being added as the next block beyond the indirect branch. * Returns the size of instructions added to trace. */ static int mangle_indirect_branch_in_trace(dcontext_t *dcontext, instrlist_t *trace, instr_t *targeter, app_pc next_tag, uint next_flags, instr_t **delete_after /*OUT*/, instr_t *end_instr) { int added_size = 0; #ifdef X86 instr_t *next = instr_get_next(targeter); /* all indirect branches should be ubrs */ ASSERT(instr_is_ubr(targeter)); /* expecting basic blocks only */ ASSERT((end_instr != NULL && targeter == end_instr) || targeter == instrlist_last(trace)); ASSERT(delete_after != NULL); *delete_after = (next == NULL || (end_instr != NULL && targeter == end_instr)) ? NULL : instr_get_prev(next); STATS_INC(trace_ib_cmp); /* Change jump to indirect_branch_lookup to a conditional jump * based on indirect target not equaling next block in trace * * the bb has already done: * spill xcx to xcx-tls-spill-slot * mov curtarget, xcx * <any other side effects of ind branch, like ret xsp adjust> * * and we now want to accomplish: * cmp ecx,const * * on 32-bit we use: * lea -const(ecx) -> ecx * jecxz continue * lea const(ecx) -> ecx * jmp exit # usual targeter for stay on trace comparison * continue: # if match, we target post-targeter * restore ecx * we have to use the landing pad b/c we don't know whether the * stub will be <128 away * * on 64-bit we use (PR 245832): * mov xax, xax-tls-spill-slot * mov $staytarget, xax * if !INTERNAL_OPTION(unsafe_ignore_eflags_{trace,ibl}) * mov xax, xbx-tls-spill-slot * lahf * seto al * cmp xcx, xbx-tls-spill-slot * else * cmp xcx, xax * jne exit * if xcx live: * mov xcx-tls-spill-slot, xcx * if flags live && unsafe options not on: * add 7f, al * sahf * if xax live: * mov xax-tls-spill-slot, xax */ # ifdef CUSTOM_TRACES_RET_REMOVAL IF_X64(ASSERT_NOT_IMPLEMENTED(false)); /* try to remove ret * FIXME: also handle ret imm => prev instr is add */ inst = instr_get_prev(targeter); if (dcontext->call_depth >= 0 && instr_raw_bits_valid(inst)) { byte *b = inst->bytes + inst->length - 1; /* 0x40538115 89 0d ec 68 06 40 mov %ecx -> 0x400668ec 0x4053811b 59 pop %esp (%esp) -> %ecx %esp 0x4053811c 83 c4 04 add $0x04 %esp -> %esp */ LOG(THREAD, LOG_MONITOR, 4, "ret removal: *b=0x%x, prev=" PFX ", dcontext=" PFX ", 0x%x\n", *b, *((int *)(b - 4)), dcontext, XCX_OFFSET); if ((*b == 0x59 && *((int *)(b - 4)) == ((uint)dcontext) + XCX_OFFSET) || (*(b - 3) == 0x59 && *((int *)(b - 7)) == ((uint)dcontext) + XCX_OFFSET && *(b - 2) == 0x83 && *(b - 1) == 0xc4)) { uint esp_add; /* already added calls & rets to call depth * if not negative, the call for this ret is earlier in this trace! */ LOG(THREAD, LOG_MONITOR, 4, "fixup_last_cti: removing ret!\n"); /* delete save ecx and pop */ if (*b == 0x59) { instr_set_raw_bits(inst, inst->bytes, inst->length - 7); esp_add = 4; } else { /* delete add too */ instr_set_raw_bits(inst, inst->bytes, inst->length - 10); esp_add = 4 + (uint)(*b); LOG(THREAD, LOG_MONITOR, 4, "*b=0x%x, esp_add=%d\n", *b, esp_add); } # ifdef DEBUG num_rets_removed++; # endif removed_ret = true; added_size += tracelist_add(dcontext, trace, targeter, INSTR_CREATE_lea(dcontext, opnd_create_reg(REG_ESP), opnd_create_base_disp(REG_ESP, REG_NULL, 0, esp_add, OPSZ_lea))); } } if (removed_ret) { *delete_after = instr_get_prev(targeter); return added_size; } # endif /* CUSTOM_TRACES_RET_REMOVAL */ # ifdef X64 if (X64_CACHE_MODE_DC(dcontext)) { added_size += mangle_x64_ib_in_trace(dcontext, trace, targeter, next_tag); } else { # endif if (!INTERNAL_OPTION(unsafe_ignore_eflags_trace)) { /* if equal follow to the next instruction after the exit CTI */ added_size += insert_transparent_comparison(dcontext, trace, targeter, next_tag); /* leave jmp as it is, a jmp to exit stub (thence to ind br * lookup) */ } else { /* assume eflags don't need to be saved across ind branches, * so go ahead and use cmp, jne */ /* FIXME: no way to cmp w/ 64-bit immed */ IF_X64(ASSERT_NOT_IMPLEMENTED(!X64_MODE_DC(dcontext))); added_size += tracelist_add( dcontext, trace, targeter, INSTR_CREATE_cmp(dcontext, opnd_create_reg(REG_ECX), OPND_CREATE_INT32((int)(ptr_int_t)next_tag))); /* Change jmp into jne indirect_branch_lookup */ /* CHECK: is that also going to exit stub */ instr_set_opcode(targeter, OP_jnz); added_size++; /* jcc is 6 bytes, jmp is 5 bytes */ } # ifdef X64 } # endif /* X64 */ /* PR 214962: our spill restoration needs this whole sequence marked mangle */ instr_set_our_mangling(targeter, true); LOG(THREAD, LOG_MONITOR, 3, "fixup_last_cti: added cmp vs. " PFX " for ind br\n", next_tag); # ifdef HASHTABLE_STATISTICS /* If we do stay on the trace, increment a counter using dead XCX */ if (INTERNAL_OPTION(stay_on_trace_stats)) { ibl_type_t ibl_type; /* FIXME: see if can test the instr flags instead */ DEBUG_DECLARE(bool ok =) get_ibl_routine_type(dcontext, opnd_get_pc(instr_get_target(targeter)), &ibl_type); ASSERT(ok); added_size += insert_increment_stat_counter( dcontext, trace, next, &get_ibl_per_type_statistics(dcontext, ibl_type.branch_type) ->ib_stay_on_trace_stat); } # endif /* HASHTABLE_STATISTICS */ /* If we do stay on the trace, must restore xcx * TODO optimization: check if xcx is live or not in next bb */ added_size += insert_restore_spilled_xcx(dcontext, trace, next); # ifdef X64 if (X64_CACHE_MODE_DC(dcontext)) { LOG(THREAD, LOG_INTERP, 4, "next_flags for post-ibl-cmp: 0x%x\n", next_flags); if (!TEST(FRAG_WRITES_EFLAGS_6, next_flags) && !INTERNAL_OPTION(unsafe_ignore_eflags_trace)) { if (!TEST(FRAG_WRITES_EFLAGS_OF, next_flags) && /* OF was saved */ !INTERNAL_OPTION(unsafe_ignore_overflow)) { /* restore OF using add that overflows if OF was on when we did seto */ added_size += tracelist_add(dcontext, trace, next, INSTR_CREATE_add(dcontext, opnd_create_reg(REG_AL), OPND_CREATE_INT8(0x7f))); } added_size += tracelist_add(dcontext, trace, next, INSTR_CREATE_sahf(dcontext)); } else STATS_INC(trace_ib_no_flag_restore); /* TODO optimization: check if xax is live or not in next bb */ if (X64_MODE_DC(dcontext) || !DYNAMO_OPTION(x86_to_x64_ibl_opt)) { added_size += tracelist_add( dcontext, trace, next, INSTR_CREATE_mov_ld( dcontext, opnd_create_reg(REG_XAX), opnd_create_tls_slot(os_tls_offset(PREFIX_XAX_SPILL_SLOT)))); } else { added_size += tracelist_add(dcontext, trace, next, INSTR_CREATE_mov_ld(dcontext, opnd_create_reg(REG_XAX), opnd_create_reg(REG_R8))); } } # endif #elif defined(ARM) /* FIXME i#1551: NYI on ARM */ ASSERT_NOT_IMPLEMENTED(false); #endif /* X86/ARM */ return added_size; } /* This routine handles the mangling of the cti at the end of the * previous block when adding a new block (f) to the trace fragment. * If prev_l is not NULL, matches the ordinal of prev_l to the nth * exit cti in the trace instrlist_t. * * If prev_l is NULL: WARNING: this routine assumes that the previous * block can only have a single indirect branch -- otherwise there is * no way to determine which indirect exit targeted the new block! No * assumptions are made about direct exits -- we can walk through them * all to find the one that targeted the new block. * * Returns an upper bound on the size added to the trace with inserted * instructions. * If we change this to add a substantial # of instrs, should update * TRACE_CTI_MANGLE_SIZE_UPPER_BOUND (assert at bottom should notify us) * * If you want to re-add the ability to add the front end of a trace, * revive the now-removed CUSTOM_TRACES_ADD_TRACE define from the attic. */ static int fixup_last_cti(dcontext_t *dcontext, instrlist_t *trace, app_pc next_tag, uint next_flags, uint trace_flags, fragment_t *prev_f, linkstub_t *prev_l, bool record_translation, uint *num_exits_deleted /*OUT*/, /* If non-NULL, only looks inside trace between these two */ instr_t *start_instr, instr_t *end_instr) { app_pc target_tag; instr_t *inst, *targeter = NULL; /* at end of routine we will delete all instrs after this one: */ instr_t *delete_after = NULL; bool is_indirect = false; /* Added size for transformations done here. * Use tracelist_add to automate adding inserted instr sizes. */ int added_size = 0; uint exits_deleted = 0; /* count exit stubs to get the ordinal of the exit that targeted us * start at prev_l, and count up extraneous exits and blks until end */ uint nth_exit = 0, cur_exit; #ifdef CUSTOM_TRACES_RET_REMOVAL bool removed_ret = false; #endif bool have_ordinal = false; if (prev_l != NULL && prev_l == get_deleted_linkstub(dcontext)) { int last_ordinal = get_last_linkstub_ordinal(dcontext); if (last_ordinal != -1) { nth_exit = last_ordinal; have_ordinal = true; } } if (!have_ordinal && prev_l != NULL && !LINKSTUB_FAKE(prev_l)) { linkstub_t *stub = FRAGMENT_EXIT_STUBS(prev_f); while (stub != prev_l) stub = LINKSTUB_NEXT_EXIT(stub); /* if prev_l is cbr followed by ubr, we'll get 1 for ubr, * but we want 0, so we count prev_l itself, then decrement */ stub = LINKSTUB_NEXT_EXIT(stub); while (stub != NULL) { nth_exit++; stub = LINKSTUB_NEXT_EXIT(stub); } } /* else, we assume it's the final exit */ LOG(THREAD, LOG_MONITOR, 4, "fixup_last_cti: looking for %d-th exit cti from bottom\n", nth_exit); if (start_instr != NULL) { ASSERT(end_instr != NULL); } else { start_instr = instrlist_first(trace); end_instr = instrlist_last(trace); } start_instr = instr_get_prev(start_instr); /* get open-ended bound */ cur_exit = nth_exit; /* now match the ordinal to the instrs. * we don't have any way to find boundary with previous-previous block * to make sure we didn't go backwards too far -- does it matter? */ for (inst = end_instr; inst != NULL && inst != start_instr; inst = instr_get_prev(inst)) { if (instr_is_exit_cti(inst)) { if (cur_exit == 0) { ibl_type_t ibl_type; /* exit cti is guaranteed to have pc target */ target_tag = opnd_get_pc(instr_get_target(inst)); is_indirect = get_ibl_routine_type(dcontext, target_tag, &ibl_type); if (is_indirect) { /* this should be a trace exit stub therefore it cannot be IBL_BB* */ ASSERT(IS_IBL_TRACE(ibl_type.source_fragment_type)); targeter = inst; break; } else { if (prev_l != NULL) { /* direct jmp, better point to us */ ASSERT(target_tag == next_tag); targeter = inst; break; } else { /* need to search for targeting jmp */ DOLOG(4, LOG_MONITOR, { d_r_loginst(dcontext, 4, inst, "exit==targeter?"); }); LOG(THREAD, LOG_MONITOR, 4, "target_tag = " PFX ", next_tag = " PFX "\n", target_tag, next_tag); if (target_tag == next_tag) { targeter = inst; break; } } } } else if (prev_l != NULL) { LOG(THREAD, LOG_MONITOR, 4, "counting backwards: %d == target_tag = " PFX "\n", cur_exit, opnd_get_pc(instr_get_target(inst))); cur_exit--; } } /* is exit cti */ } ASSERT(targeter != NULL); if (record_translation) instrlist_set_translation_target(trace, instr_get_translation(targeter)); instrlist_set_our_mangling(trace, true); /* PR 267260 */ DOLOG(4, LOG_MONITOR, { d_r_loginst(dcontext, 4, targeter, "\ttargeter"); }); if (is_indirect) { added_size += mangle_indirect_branch_in_trace( dcontext, trace, targeter, next_tag, next_flags, &delete_after, end_instr); } else { /* direct jump or conditional branch */ instr_t *next = targeter->next; if (instr_is_cbr(targeter)) { LOG(THREAD, LOG_MONITOR, 4, "fixup_last_cti: inverted logic of cbr\n"); if (next != NULL && instr_is_ubr(next)) { /* cbr followed by ubr: if cbr got us here, reverse cbr and * remove ubr */ instr_invert_cbr(targeter); instr_set_target(targeter, instr_get_target(next)); ASSERT(next == end_instr); delete_after = targeter; LOG(THREAD, LOG_MONITOR, 4, "\tremoved ubr following cbr\n"); } else { ASSERT_NOT_REACHED(); } } else if (instr_is_ubr(targeter)) { /* remove unnecessary ubr at end of block */ delete_after = instr_get_prev(targeter); if (delete_after != NULL) { LOG(THREAD, LOG_MONITOR, 4, "fixup_last_cti: removed ubr\n"); } } else ASSERT_NOT_REACHED(); } /* remove all instrs after this cti -- but what if internal * control flow jumps ahead and then comes back? * too expensive to check for such all the time. * XXX: what to do? * * XXX: rather than adding entire trace on and then chopping off where * we exited, why not add after we know where to stop? */ if (delete_after != NULL) { ASSERT(delete_after != end_instr); delete_after = instr_get_next(delete_after); while (delete_after != NULL) { inst = delete_after; if (delete_after == end_instr) delete_after = NULL; else delete_after = instr_get_next(delete_after); if (instr_is_exit_cti(inst)) { /* assumption: passing in cache target to exit_stub_size works * just as well as linkstub_t target, since only cares whether * targeting ibl */ app_pc target = opnd_get_pc(instr_get_target(inst)); /* we already added all the stub size differences to the trace, * so we subtract the trace size of the stub here */ added_size -= local_exit_stub_size(dcontext, target, trace_flags); exits_deleted++; } else if (instr_opcode_valid(inst) && instr_is_cti(inst)) { LOG(THREAD, LOG_MONITOR, 3, "WARNING: deleting non-exit cti in unused tail of frag added to " "trace\n"); } d_r_loginst(dcontext, 4, inst, "\tdeleting"); instrlist_remove(trace, inst); added_size -= instr_length(dcontext, inst); instr_destroy(dcontext, inst); } } if (num_exits_deleted != NULL) *num_exits_deleted = exits_deleted; if (record_translation) instrlist_set_translation_target(trace, NULL); instrlist_set_our_mangling(trace, false); /* PR 267260 */ #if defined(X86) && defined(X64) DOCHECK(1, { if (FRAG_IS_32(trace_flags)) { instr_t *in; /* in case we missed any in tracelist_add() */ for (in = instrlist_first(trace); in != NULL; in = instr_get_next(in)) { if (instr_is_our_mangling(in)) ASSERT(instr_get_x86_mode(in)); } } }); #endif ASSERT(added_size < TRACE_CTI_MANGLE_SIZE_UPPER_BOUND); return added_size; } /* Add a speculative counter on last IBL exit * Returns additional size to add to trace estimate. */ int append_trace_speculate_last_ibl(dcontext_t *dcontext, instrlist_t *trace, app_pc speculate_next_tag, bool record_translation) { /* unlike fixup_last_cti() here we are about to go directly to the IBL routine */ /* spill XCX in a scratch slot - note always using TLS */ int added_size = 0; ibl_type_t ibl_type; instr_t *inst = instrlist_last(trace); /* currently only relevant to last CTI */ instr_t *where = inst; /* preinsert before last CTI */ instr_t *next = instr_get_next(inst); DEBUG_DECLARE(bool ok;) ASSERT(speculate_next_tag != NULL); ASSERT(inst != NULL); ASSERT(instr_is_exit_cti(inst)); /* FIXME: see if can test the instr flags instead */ DEBUG_DECLARE(ok =) get_ibl_routine_type(dcontext, opnd_get_pc(instr_get_target(inst)), &ibl_type); ASSERT(ok); if (record_translation) instrlist_set_translation_target(trace, instr_get_translation(inst)); instrlist_set_our_mangling(trace, true); /* PR 267260 */ STATS_INC(num_traces_end_at_ibl_speculative_link); #ifdef HASHTABLE_STATISTICS DOSTATS({ if (INTERNAL_OPTION(speculate_last_exit_stats)) { int tls_stat_scratch_slot = os_tls_offset(HTABLE_STATS_SPILL_SLOT); added_size += tracelist_add( dcontext, trace, where, XINST_CREATE_store(dcontext, opnd_create_tls_slot(tls_stat_scratch_slot), opnd_create_reg(SCRATCH_REG2))); added_size += insert_increment_stat_counter( dcontext, trace, where, &get_ibl_per_type_statistics(dcontext, ibl_type.branch_type) ->ib_trace_last_ibl_exit); added_size += tracelist_add( dcontext, trace, where, XINST_CREATE_load(dcontext, opnd_create_reg(SCRATCH_REG2), opnd_create_tls_slot(tls_stat_scratch_slot))); } }); #endif /* preinsert comparison before exit CTI, but increment of success * statistics after it */ /* we need to compare to speculate_next_tag now */ /* XCX holds value to match */ /* should use similar eflags-clobbering scheme to inline cmp */ IF_X64(ASSERT_NOT_IMPLEMENTED(false)); /* * 8d 89 76 9b bf ff lea -tag(%ecx) -> %ecx * e3 0b jecxz continue * 8d 89 8a 64 40 00 lea tag(%ecx) -> %ecx * e9 17 00 00 00 jmp <exit stub 1: IBL> * * continue: * <increment stats> * # see FIXME whether to go to prefix or do here * <restore app ecx> * e9 cc aa dd 00 jmp speculate_next_tag * */ /* leave jmp as it is, a jmp to exit stub (thence to ind br lookup) */ added_size += insert_transparent_comparison(dcontext, trace, where, speculate_next_tag); #ifdef HASHTABLE_STATISTICS DOSTATS({ reg_id_t reg = IF_X86_ELSE(REG_XCX, DR_REG_R2); if (INTERNAL_OPTION(speculate_last_exit_stats)) { int tls_stat_scratch_slot = os_tls_offset(HTABLE_STATS_SPILL_SLOT); /* XCX already saved */ added_size += insert_increment_stat_counter( dcontext, trace, next, &get_ibl_per_type_statistics(dcontext, ibl_type.branch_type) ->ib_trace_last_ibl_speculate_success); /* restore XCX to app IB target*/ added_size += tracelist_add( dcontext, trace, next, XINST_CREATE_load(dcontext, opnd_create_reg(reg), opnd_create_tls_slot(tls_stat_scratch_slot))); } }); #endif /* adding a new CTI for speculative target that is a pseudo * direct exit. Although we could have used the indirect stub * to be the unlinked path, with a new CTI way we can unlink a * speculated fragment without affecting any other targets * reached by the IBL. Also in general we could decide to add * multiple speculative comparisons and to chain them we'd * need new CTIs for them. */ /* Ensure all register state is properly preserved on both linked * and unlinked paths - currently only XCX is in use. * * * Preferably we should be targeting prefix of target to * save some space for recovering XCX from hot path. We'd * restore XCX in the exit stub when unlinked. * So it would act like a direct CTI when linked and like indirect * when unlinked. It could just be an unlinked indirect stub, if * we haven't modified any other registers or flags. * * For simplicity, we currently restore XCX here and use a plain * direct exit stub that goes to target start_pc instead of * prefixes. * * FIXME: (case 5085) the problem with the current scheme is that * when we exit unlinked the source will be marked as a DIRECT * exit - therefore no security policies will be enforced. * * FIXME: (case 4718) should add speculated target to current list * in case of RCT policy that needs to be invalidated if target is * flushed */ /* must restore xcx to app value, FIXME: see above for doing this in prefix+stub */ added_size += insert_restore_spilled_xcx(dcontext, trace, next); /* add a new direct exit stub */ added_size += tracelist_add(dcontext, trace, next, XINST_CREATE_jump(dcontext, opnd_create_pc(speculate_next_tag))); LOG(THREAD, LOG_INTERP, 3, "append_trace_speculate_last_ibl: added cmp vs. " PFX " for ind br\n", speculate_next_tag); if (record_translation) instrlist_set_translation_target(trace, NULL); instrlist_set_our_mangling(trace, false); /* PR 267260 */ return added_size; } #ifdef HASHTABLE_STATISTICS /* Add a counter on last IBL exit * if speculate_next_tag is not NULL then check case 4817's possible success */ /* FIXME: remove this routine once append_trace_speculate_last_ibl() * currently useful only to see statistics without side effects of * adding exit stub */ int append_ib_trace_last_ibl_exit_stat(dcontext_t *dcontext, instrlist_t *trace, app_pc speculate_next_tag) { /* unlike fixup_last_cti() here we are about to go directly to the IBL routine */ /* spill XCX in a scratch slot - note always using TLS */ int tls_stat_scratch_slot = os_tls_offset(HTABLE_STATS_SPILL_SLOT); int added_size = 0; ibl_type_t ibl_type; instr_t *inst = instrlist_last(trace); /* currently only relevant to last CTI */ instr_t *where = inst; /* preinsert before exit CTI */ reg_id_t reg = IF_X86_ELSE(REG_XCX, DR_REG_R2); DEBUG_DECLARE(bool ok;) /* should use similar eflags-clobbering scheme to inline cmp */ IF_X64(ASSERT_NOT_IMPLEMENTED(false)); ASSERT(inst != NULL); ASSERT(instr_is_exit_cti(inst)); /* FIXME: see if can test the instr flags instead */ ok = get_ibl_routine_type(dcontext, opnd_get_pc(instr_get_target(inst)), &ibl_type); ASSERT(ok); added_size += tracelist_add( dcontext, trace, where, XINST_CREATE_store(dcontext, opnd_create_tls_slot(tls_stat_scratch_slot), opnd_create_reg(reg))); added_size += insert_increment_stat_counter( dcontext, trace, where, &get_ibl_per_type_statistics(dcontext, ibl_type.branch_type) ->ib_trace_last_ibl_exit); added_size += tracelist_add(dcontext, trace, where, XINST_CREATE_load(dcontext, opnd_create_reg(reg), opnd_create_tls_slot(tls_stat_scratch_slot))); if (speculate_next_tag != NULL) { instr_t *next = instr_get_next(inst); /* preinsert comparison before exit CTI, but increment goes after it */ /* we need to compare to speculate_next_tag now - just like * fixup_last_cti() would do later. */ /* ECX holds value to match here */ /* leave jmp as it is, a jmp to exit stub (thence to ind br lookup) */ /* continue: * increment success counter * jmp targeter * * FIXME: now the last instruction is no longer the exit_cti - see if that * breaks any assumptions, using a short jump to see if anyone erroneously * uses this */ added_size += insert_transparent_comparison(dcontext, trace, where, speculate_next_tag); /* we'll kill again although ECX restored unnecessarily by comparison routine */ added_size += insert_increment_stat_counter( dcontext, trace, next, &get_ibl_per_type_statistics(dcontext, ibl_type.branch_type) ->ib_trace_last_ibl_speculate_success); /* restore ECX */ added_size += tracelist_add(dcontext, trace, next, XINST_CREATE_load(dcontext, opnd_create_reg(reg), opnd_create_tls_slot(tls_stat_scratch_slot))); /* jmp where */ added_size += tracelist_add(dcontext, trace, next, IF_X86_ELSE(INSTR_CREATE_jmp_short, XINST_CREATE_jump)( dcontext, opnd_create_instr(where))); } return added_size; } #endif /* HASHTABLE_STATISTICS */ /* Add the fragment f to the end of the trace instrlist_t kept in dcontext * * Note that recreate_fragment_ilist() is making assumptions about its operation * synchronize changes * * Returns the size change in the trace from mangling the previous block * (assumes the caller has already calculated the size from adding the new block) */ uint extend_trace(dcontext_t *dcontext, fragment_t *f, linkstub_t *prev_l) { monitor_data_t *md = (monitor_data_t *)dcontext->monitor_field; fragment_t *prev_f = NULL; instrlist_t *trace = &(md->trace); instrlist_t *ilist; uint size; uint prev_mangle_size = 0; uint num_exits_deleted = 0; uint new_exits_dir = 0, new_exits_indir = 0; #ifdef X64 ASSERT((!!FRAG_IS_32(md->trace_flags) == !X64_MODE_DC(dcontext)) || (!FRAG_IS_32(md->trace_flags) && !X64_MODE_DC(dcontext) && DYNAMO_OPTION(x86_to_x64))); #endif STATS_INC(num_traces_extended); /* if you want to re-add the ability to add traces, revive * CUSTOM_TRACES_ADD_TRACE from the attic */ ASSERT(!TEST(FRAG_IS_TRACE, f->flags)); /* expecting block fragments */ if (prev_l != NULL) { ASSERT(!LINKSTUB_FAKE(prev_l) || /* we track the ordinal of the del linkstub so it's ok */ prev_l == get_deleted_linkstub(dcontext)); prev_f = linkstub_fragment(dcontext, prev_l); LOG(THREAD, LOG_MONITOR, 4, "prev_l = owned by F%d, branch pc " PFX "\n", prev_f->id, EXIT_CTI_PC(prev_f, prev_l)); } else { LOG(THREAD, LOG_MONITOR, 4, "prev_l is NULL\n"); } /* insert code to optimize last branch based on new fragment */ if (instrlist_last(trace) != NULL) { prev_mangle_size = fixup_last_cti(dcontext, trace, f->tag, f->flags, md->trace_flags, prev_f, prev_l, false, &num_exits_deleted, NULL, NULL); } #ifdef CUSTOM_TRACES_RET_REMOVAL /* add now, want fixup to operate on depth before adding new blk */ dcontext->call_depth += f->num_calls; dcontext->call_depth -= f->num_rets; #endif LOG(THREAD, LOG_MONITOR, 4, "\tadding block %d == " PFX "\n", md->num_blks, f->tag); size = md->trace_buf_size - md->trace_buf_top; LOG(THREAD, LOG_MONITOR, 4, "decoding F%d into trace buf @" PFX " + 0x%x = " PFX "\n", f->id, md->trace_buf, md->trace_buf_top, md->trace_buf + md->trace_buf_top); /* FIXME: PR 307388: if md->pass_to_client, much of this is a waste of time as * we're going to re-mangle and re-fixup after passing our unmangled list to the * client. We do want to keep the size estimate, which requires having the last * cti at least, so for now we keep all the work. Of course the size estimate is * less valuable when the client may add a ton of instrumentation. */ /* decode_fragment will convert f's ibl routines into those appropriate for * our trace, whether f and the trace are shared or private */ ilist = decode_fragment(dcontext, f, md->trace_buf + md->trace_buf_top, &size, md->trace_flags, &new_exits_dir, &new_exits_indir); md->blk_info[md->num_blks].info.tag = f->tag; #if defined(RETURN_AFTER_CALL) || defined(RCT_IND_BRANCH) if (md->num_blks > 0) md->blk_info[md->num_blks - 1].info.num_exits -= num_exits_deleted; md->blk_info[md->num_blks].info.num_exits = new_exits_dir + new_exits_indir; #endif md->num_blks++; /* We need to remove any nops we added for -pad_jmps (we don't expect there * to be any in a bb if -pad_jmps_shift_bb) to avoid screwing up * fixup_last_cti etc. */ process_nops_for_trace(dcontext, ilist, f->flags _IF_DEBUG(false /*!recreating*/)); DOLOG(5, LOG_MONITOR, { LOG(THREAD, LOG_MONITOR, 5, "post-trace-ibl-fixup, ilist is:\n"); instrlist_disassemble(dcontext, f->tag, ilist, THREAD); }); ASSERT(!instrlist_get_our_mangling(ilist)); instrlist_append(trace, instrlist_first(ilist)); instrlist_init(ilist); /* clear fields so destroy won't kill instrs on trace list */ instrlist_destroy(dcontext, ilist); md->trace_buf_top += size; ASSERT(md->trace_buf_top < md->trace_buf_size); LOG(THREAD, LOG_MONITOR, 4, "post-extend_trace, trace buf + 0x%x => " PFX "\n", md->trace_buf_top, md->trace_buf); DOLOG(4, LOG_MONITOR, { LOG(THREAD, LOG_MONITOR, 4, "\nafter extending trace:\n"); instrlist_disassemble(dcontext, md->trace_tag, trace, THREAD); }); return prev_mangle_size; } /* If branch_type is 0, sets it to the type of a ubr */ static instr_t * create_exit_jmp(dcontext_t *dcontext, app_pc target, app_pc translation, uint branch_type) { instr_t *jmp = XINST_CREATE_jump(dcontext, opnd_create_pc(target)); instr_set_translation(jmp, translation); if (branch_type == 0) instr_exit_branch_set_type(jmp, instr_branch_type(jmp)); else instr_exit_branch_set_type(jmp, branch_type); instr_set_our_mangling(jmp, true); return jmp; } /* Given an ilist with no mangling or stitching together, this routine does those * things. This is used both for clients and for recreating traces * for state translation. * It assumes the ilist abides by client rules: single-mbr bbs, no * changes in source app code. Else, it returns false. * Elision is supported. * * Our docs disallow removal of an entire block, changing inter-block ctis, and * changing the ordering of the blocks, which is what allows us to correctly * mangle the inter-block ctis here. * * Reads the following fields from md: * - trace_tag * - trace_flags * - num_blks * - blk_info * - final_exit_flags */ bool mangle_trace(dcontext_t *dcontext, instrlist_t *ilist, monitor_data_t *md) { instr_t *inst, *next_inst, *start_instr, *jmp; uint blk, num_exits_deleted; app_pc fallthrough = NULL; bool found_syscall = false, found_int = false; /* We don't assert that mangle_trace_at_end() is true b/c the client * can unregister its bb and trace hooks if it really wants to, * though we discourage it. */ ASSERT(md->pass_to_client); LOG(THREAD, LOG_MONITOR, 2, "mangle_trace " PFX "\n", md->trace_tag); DOLOG(4, LOG_INTERP, { LOG(THREAD, LOG_INTERP, 4, "ilist passed to mangle_trace:\n"); instrlist_disassemble(dcontext, md->trace_tag, ilist, THREAD); }); /* We make 3 passes. * 1st walk: find bb boundaries */ blk = 0; for (inst = instrlist_first(ilist); inst != NULL; inst = next_inst) { app_pc xl8 = instr_get_translation(inst); next_inst = instr_get_next(inst); if (instr_is_meta(inst)) continue; DOLOG(5, LOG_INTERP, { LOG(THREAD, LOG_MONITOR, 4, "transl " PFX " ", xl8); d_r_loginst(dcontext, 4, inst, "considering non-meta"); }); /* Skip blocks that don't end in ctis (except final) */ while (blk < md->num_blks - 1 && !md->blk_info[blk].final_cti) { LOG(THREAD, LOG_MONITOR, 4, "skipping fall-through bb #%d\n", blk); md->blk_info[blk].end_instr = NULL; blk++; } /* Ensure non-ignorable syscall/int2b terminates trace */ if (md->pass_to_client && !client_check_syscall(ilist, inst, &found_syscall, &found_int)) return false; /* Clients should not add new source code regions, which would mess us up * here, as well as mess up our cache consistency (both page prot and * selfmod). */ if (md->pass_to_client && (!vm_list_overlaps(dcontext, md->blk_info[blk].vmlist, xl8, xl8 + 1) && !(instr_is_ubr(inst) && opnd_is_pc(instr_get_target(inst)) && xl8 == opnd_get_pc(instr_get_target(inst)))) IF_WINDOWS(&&!vmvector_overlap(landing_pad_areas, md->blk_info[blk].info.tag, md->blk_info[blk].info.tag + 1))) { LOG(THREAD, LOG_MONITOR, 2, "trace error: out-of-bounds transl " PFX " vs block w/ start " PFX "\n", xl8, md->blk_info[blk].info.tag); CLIENT_ASSERT(false, "trace's app sources (instr_set_translation() targets) " "must remain within original bounds"); return false; } /* in case no exit ctis in last block, find last non-meta fall-through */ if (blk == md->num_blks - 1) { /* Do not call instr_length() on this inst: use length * of translation! (i#509) */ fallthrough = decode_next_pc(dcontext, xl8); } /* PR 299808: identify bb boundaries. We can't go by translations alone, as * ubrs can point at their targets and theoretically the entire trace could * be ubrs: so we have to go by exits, and limit what the client can do. We * can assume that each bb should not violate the bb callback rules (PR * 215217): if has cbr or mbr, that must end bb. If it has a call, that * could be elided; if not, its target should match the start of the next * block. We also want to * impose the can't-be-trace rules (PR 215219), which are not documented for * bbs: if more than one exit cti or if code beyond last exit cti then can't * be in a trace. We can soften a little and allow extra ubrs if they do not * target the subsequent block. FIXME: we could have stricter translation * reqts for ubrs: make them point at corresponding app ubr (but what if * really correspond to app cbr?): then can handle code past exit ubr. */ if (instr_will_be_exit_cti(inst) && ((!instr_is_ubr(inst) && !instr_is_near_call_direct(inst)) || (inst == instrlist_last(ilist) || (blk + 1 < md->num_blks && /* client is disallowed from changing bb exits and sequencing in trace * hook; if they change in bb for_trace, will be reflected here. */ opnd_get_pc(instr_get_target(inst)) == md->blk_info[blk + 1].info.tag)))) { DOLOG(4, LOG_INTERP, { d_r_loginst(dcontext, 4, inst, "end of bb"); }); /* Add jump that fixup_last_cti expects */ if (!instr_is_ubr(inst) IF_X86(|| instr_get_opcode(inst) == OP_jmp_far)) { app_pc target; if (instr_is_mbr(inst) IF_X86(|| instr_get_opcode(inst) == OP_jmp_far)) { target = get_ibl_routine( dcontext, get_ibl_entry_type(instr_branch_type(inst)), DEFAULT_IBL_TRACE(), get_ibl_branch_type(inst)); } else if (instr_is_cbr(inst)) { /* Do not call instr_length() on this inst: use length * of translation! (i#509) */ target = decode_next_pc(dcontext, xl8); } else { target = opnd_get_pc(instr_get_target(inst)); } ASSERT(target != NULL); jmp = create_exit_jmp(dcontext, target, xl8, instr_branch_type(inst)); instrlist_postinsert(ilist, inst, jmp); /* we're now done w/ vmlist: switch to end instr. * d_r_mangle() shouldn't remove the exit cti. */ vm_area_destroy_list(dcontext, md->blk_info[blk].vmlist); md->blk_info[blk].vmlist = NULL; md->blk_info[blk].end_instr = jmp; } else md->blk_info[blk].end_instr = inst; blk++; DOLOG(4, LOG_INTERP, { if (blk < md->num_blks) { LOG(THREAD, LOG_MONITOR, 4, "starting next bb " PFX "\n", md->blk_info[blk].info.tag); } }); if (blk >= md->num_blks && next_inst != NULL) { CLIENT_ASSERT(false, "unsupported trace modification: too many exits"); return false; } } #if defined(RETURN_AFTER_CALL) || defined(RCT_IND_BRANCH) /* PR 306761: we need to re-calculate md->blk_info[blk].info.num_exits, * and then adjust after fixup_last_cti. */ if (instr_will_be_exit_cti(inst)) md->blk_info[blk].info.num_exits++; #endif } if (blk < md->num_blks) { ASSERT(!instr_is_ubr(instrlist_last(ilist))); if (blk + 1 < md->num_blks) { CLIENT_ASSERT(false, "unsupported trace modification: too few exits"); return false; } /* must have been no final exit cti: add final fall-through jmp */ jmp = create_exit_jmp(dcontext, fallthrough, fallthrough, 0); /* FIXME PR 307284: support client modifying, replacing, or adding * syscalls and ints: need to re-analyze. Then we wouldn't * need the md->final_exit_flags field anymore. * For now we disallow. */ if (found_syscall || found_int) { instr_exit_branch_set_type(jmp, md->final_exit_flags); #ifdef WINDOWS /* For INSTR_SHARED_SYSCALL, we set it pre-mangling, and it * survives to here if the instr is not clobbered, * and does not come from md->final_exit_flags */ if (TEST(INSTR_SHARED_SYSCALL, instrlist_last(ilist)->flags)) { instr_set_target(jmp, opnd_create_pc(shared_syscall_routine(dcontext))); instr_set_our_mangling(jmp, true); /* undone by target set */ } /* FIXME: test for linux too, but allowing ignorable syscalls */ if (!TESTANY(LINK_NI_SYSCALL_ALL IF_WINDOWS(| LINK_CALLBACK_RETURN), md->final_exit_flags) && !TEST(INSTR_SHARED_SYSCALL, instrlist_last(ilist)->flags)) { CLIENT_ASSERT(false, "client modified or added a syscall or int: unsupported"); return false; } #endif } instrlist_append(ilist, jmp); md->blk_info[blk].end_instr = jmp; } else { CLIENT_ASSERT((!found_syscall && !found_int) /* On linux we allow ignorable syscalls in middle. * FIXME PR 307284: see notes above. */ IF_UNIX(|| !TEST(LINK_NI_SYSCALL, md->final_exit_flags)), "client changed exit target where unsupported\n" "check if trace ends in a syscall or int"); } ASSERT(instr_is_ubr(instrlist_last(ilist))); if (found_syscall) md->trace_flags |= FRAG_HAS_SYSCALL; else md->trace_flags &= ~FRAG_HAS_SYSCALL; /* 2nd walk: mangle */ DOLOG(4, LOG_INTERP, { LOG(THREAD, LOG_INTERP, 4, "trace ilist before mangling:\n"); instrlist_disassemble(dcontext, md->trace_tag, ilist, THREAD); }); /* We do not need to remove nops since we never emitted */ d_r_mangle(dcontext, ilist, &md->trace_flags, true /*mangle calls*/, /* we're post-client so we don't need translations unless storing */ TEST(FRAG_HAS_TRANSLATION_INFO, md->trace_flags)); DOLOG(4, LOG_INTERP, { LOG(THREAD, LOG_INTERP, 4, "trace ilist after mangling:\n"); instrlist_disassemble(dcontext, md->trace_tag, ilist, THREAD); }); /* 3rd walk: stitch together delineated bbs */ for (blk = 0; blk < md->num_blks && md->blk_info[blk].end_instr == NULL; blk++) ; /* nothing */ start_instr = instrlist_first(ilist); for (inst = instrlist_first(ilist); inst != NULL; inst = next_inst) { next_inst = instr_get_next(inst); if (inst == md->blk_info[blk].end_instr) { /* Chain exit to point to next bb */ if (blk + 1 < md->num_blks) { /* We must do proper analysis so that state translation matches * created traces in whether eflags are restored post-cmp */ uint next_flags = forward_eflags_analysis(dcontext, ilist, instr_get_next(inst)); next_flags = instr_eflags_to_fragment_eflags(next_flags); LOG(THREAD, LOG_INTERP, 4, "next_flags for fixup_last_cti: 0x%x\n", next_flags); fixup_last_cti(dcontext, ilist, md->blk_info[blk + 1].info.tag, next_flags, md->trace_flags, NULL, NULL, TEST(FRAG_HAS_TRANSLATION_INFO, md->trace_flags), &num_exits_deleted, /* Only walk ilist between these instrs */ start_instr, inst); #if defined(RETURN_AFTER_CALL) || defined(RCT_IND_BRANCH) md->blk_info[blk].info.num_exits -= num_exits_deleted; #endif } blk++; /* skip fall-throughs */ while (blk < md->num_blks && md->blk_info[blk].end_instr == NULL) blk++; if (blk >= md->num_blks && next_inst != NULL) { CLIENT_ASSERT(false, "unsupported trace modification: exits modified"); return false; } start_instr = next_inst; } } if (blk < md->num_blks) { CLIENT_ASSERT(false, "unsupported trace modification: cannot find all exits"); return false; } return true; } /**************************************************************************** * UTILITIES */ /* Converts instr_t EFLAGS_ flags to corresponding fragment_t FRAG_ flags, * assuming that the instr_t flags correspond to the start of the fragment_t. * Assumes instr_eflags has already accounted for predication. */ uint instr_eflags_to_fragment_eflags(uint instr_eflags) { uint frag_eflags = 0; #ifdef X86 if (instr_eflags == EFLAGS_WRITE_OF) { /* this fragment writes OF before reading it * May still read other flags before writing them. */ frag_eflags |= FRAG_WRITES_EFLAGS_OF; return frag_eflags; } #endif if (instr_eflags == EFLAGS_WRITE_ARITH) { /* fragment writes all 6 prior to reading */ frag_eflags |= FRAG_WRITES_EFLAGS_ARITH; #ifdef X86 frag_eflags |= FRAG_WRITES_EFLAGS_OF; #endif } return frag_eflags; } /* Returns one of these flags, defined in instr.h: * EFLAGS_WRITE_ARITH = writes all arith flags before reading any * EFLAGS_WRITE_OF = writes OF before reading it (x86-only) * EFLAGS_READ_ARITH = reads some of arith flags before writing * EFLAGS_READ_OF = reads OF before writing OF (x86-only) * 0 = no information before 1st cti */ uint forward_eflags_analysis(dcontext_t *dcontext, instrlist_t *ilist, instr_t *instr) { instr_t *in; uint eflags_6 = 0; /* holds flags written so far (in read slots) */ int eflags_result = 0; for (in = instr; in != NULL; in = instr_get_next_expanded(dcontext, ilist, in)) { if (!instr_valid(in) || instr_is_cti(in)) { /* give up */ break; } if (eflags_result != EFLAGS_WRITE_ARITH IF_X86(&&eflags_result != EFLAGS_READ_OF)) eflags_result = eflags_analysis(in, eflags_result, &eflags_6); DOLOG(4, LOG_INTERP, { d_r_loginst(dcontext, 4, in, "forward_eflags_analysis"); LOG(THREAD, LOG_INTERP, 4, "\tinstr %x => %x\n", instr_get_eflags(in, DR_QUERY_DEFAULT), eflags_result); }); } return eflags_result; } /* This translates f's code into an instrlist_t and returns it. * If buf is NULL: * The Instrs returned point into f's raw bits, so encode them * before you delete f! * Else, f's raw bits are copied into buf, and *bufsz is modified to * contain the total bytes copied * FIXME: should have release build checks and not just asserts where * we rely on caller to have big-enough buffer? * If target_flags differ from f->flags in sharing and/or in trace-ness, * converts ibl and tls usage in f to match the desired target_flags. * FIXME: converting from private to shared tls is not yet * implemented: we rely on -private_ib_in_tls for adding normal * private bbs to shared traces, and disallow any extensive mangling * (native_exec, selfmod) from becoming shared traces. * The caller is responsible for destroying the instrlist and its instrs. * If the fragment ends in an elided jmp, a new jmp instr is created, though * its bits field is NULL, allowing the caller to set it to do-not-emit if * trying to exactly duplicate or calculate the size, though most callers * will want to emit that jmp. See decode_fragment_exact(). */ static void instr_set_raw_bits_trace_buf(instr_t *instr, byte *buf_writable_addr, uint length) { /* The trace buffer is a writable address, so we need to translate to an * executable address for pointing at bits. */ instr_set_raw_bits(instr, vmcode_get_executable_addr(buf_writable_addr), length); } /* We want to avoid low-loglevel disassembly when we're in the middle of disassembly */ #define DF_LOGLEVEL(dc) (((dc) != GLOBAL_DCONTEXT && (dc)->in_opnd_disassemble) ? 6U : 4U) instrlist_t * decode_fragment(dcontext_t *dcontext, fragment_t *f, byte *buf, /*IN/OUT*/ uint *bufsz, uint target_flags, /*OUT*/ uint *dir_exits, /*OUT*/ uint *indir_exits) { linkstub_t *l; cache_pc start_pc, stop_pc, pc, prev_pc = NULL, raw_start_pc; instr_t *instr, *cti = NULL, *raw_instr; instrlist_t *ilist = instrlist_create(dcontext); byte *top_buf = NULL, *cur_buf = NULL; app_pc target_tag; uint num_bytes, offset; uint num_dir = 0, num_indir = 0; bool tls_to_dc; bool shared_to_private = TEST(FRAG_SHARED, f->flags) && !TEST(FRAG_SHARED, target_flags); #ifdef WINDOWS /* The fragment could contain an ignorable sysenter instruction if * the following conditions are satisfied. */ bool possible_ignorable_sysenter = DYNAMO_OPTION(ignore_syscalls) && (get_syscall_method() == SYSCALL_METHOD_SYSENTER) && TEST(FRAG_HAS_SYSCALL, f->flags); #endif instrlist_t intra_ctis; coarse_info_t *info = NULL; bool coarse_elided_ubrs = false; dr_isa_mode_t old_mode; /* for decoding and get_ibl routines we need the dcontext mode set */ bool ok = dr_set_isa_mode(dcontext, FRAG_ISA_MODE(f->flags), &old_mode); /* i#1494: Decoding a code fragment from code cache, decode_fragment * may mess up the 32-bit/64-bit mode in -x86_to_x64 because 32-bit * application code is encoded as 64-bit code fragments into the code cache. * Thus we currently do not support using decode_fragment with -x86_to_x64, * including trace and coarse_units (coarse-grain code cache management) */ IF_X86_64(ASSERT(!DYNAMO_OPTION(x86_to_x64))); instrlist_init(&intra_ctis); /* Now we need to go through f and make cti's for each of its exit cti's and * non-exit cti's with off-fragment targets that need to be re-pc-relativized. * The rest of the instructions can be lumped into raw instructions. */ start_pc = FCACHE_ENTRY_PC(f); pc = start_pc; raw_start_pc = start_pc; if (buf != NULL) { cur_buf = buf; top_buf = cur_buf; ASSERT(bufsz != NULL); } /* Handle code after last exit but before stubs by allowing l to be NULL. * Handle coarse-grain fake fragment_t by discovering exits as we go, with * l being NULL the whole time. */ if (TEST(FRAG_FAKE, f->flags)) { ASSERT(TEST(FRAG_COARSE_GRAIN, f->flags)); info = get_fragment_coarse_info(f); ASSERT(info != NULL); coarse_elided_ubrs = (info->persisted && TEST(PERSCACHE_ELIDED_UBR, info->flags)) || (!info->persisted && DYNAMO_OPTION(coarse_freeze_elide_ubr)); /* Assumption: coarse-grain fragments have no ctis w/ off-fragment targets * that are not exit ctis */ l = NULL; } else l = FRAGMENT_EXIT_STUBS(f); while (true) { uint l_flags; cti = NULL; if (l != NULL) { stop_pc = EXIT_CTI_PC(f, l); } else if (TEST(FRAG_FAKE, f->flags)) { /* we don't know the size of f */ stop_pc = (cache_pc)UNIVERSAL_REGION_END; } else { /* fake fragment_t, or code between last exit but before stubs or padding */ stop_pc = fragment_body_end_pc(dcontext, f); if (PAD_FRAGMENT_JMPS(f->flags) && stop_pc != raw_start_pc) { /* We need to adjust stop_pc to account for any padding, only * way any code could get here is via client interface, * and there really is no nice way to distinguish it * from any padding we added. * PR 213005: we do not support decode_fragment() for bbs * that have code added beyond the last exit cti (we turn * off FRAG_COARSE_GRAIN and set FRAG_CANNOT_BE_TRACE). * Sanity check, make sure it at least looks like there is no * code here */ ASSERT(IS_SET_TO_DEBUG(raw_start_pc, stop_pc - raw_start_pc)); stop_pc = raw_start_pc; } } IF_X64(ASSERT(TEST(FRAG_FAKE, f->flags) /* no copy made */ || CHECK_TRUNCATE_TYPE_uint((stop_pc - raw_start_pc)))); num_bytes = (uint)(stop_pc - raw_start_pc); LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext), "decoding fragment from " PFX " to " PFX "\n", raw_start_pc, stop_pc); if (num_bytes > 0) { if (buf != NULL) { if (TEST(FRAG_FAKE, f->flags)) { /* we don't know the size of f, so we copy later, though * we do point instrs into buf before we copy! */ } else { /* first copy entire sequence up to exit cti into buf * so we don't have to copy it in pieces if we find cti's, if we don't * find any we want one giant piece anyway */ ASSERT(cur_buf + num_bytes < buf + *bufsz); memcpy(cur_buf, raw_start_pc, num_bytes); top_buf = cur_buf + num_bytes; LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext), "decode_fragment: copied " PFX "-" PFX " to " PFX "-" PFX "\n", raw_start_pc, raw_start_pc + num_bytes, cur_buf, cur_buf + num_bytes); /* cur_buf is incremented later -- it always points to start * of raw bytes for next-to-add-to-ilist instr, while * top_buf points to top of copied-to-buf data */ } } else { /* point at bits in code cache */ cur_buf = raw_start_pc; } /* now, we can't make a single raw instr for all that, there may * be calls with off-fragment targets in there that need to be * re-pc-relativized (instrumentation, etc. insert calls), or * we may not even know where the exit ctis are (coarse-grain fragments), * so walk through (original bytes!) and decode, looking for cti's */ instr = instr_create(dcontext); pc = raw_start_pc; /* do we have to translate the store of xcx from tls to dcontext? * be careful -- there can be private bbs w/ indirect branches, so * must see if this is a shared fragment we're adding */ tls_to_dc = (shared_to_private && !DYNAMO_OPTION(private_ib_in_tls) && /* if l==NULL (coarse src) we'll check for xcx every time */ (l == NULL || LINKSTUB_INDIRECT(l->flags))); do { #ifdef WINDOWS cache_pc prev_decode_pc = prev_pc; /* store the address of the * previous decode, the instr * before the one 'pc' * currently points to *before* * the call to decode() just * below */ #endif /* For frozen coarse fragments, ubr eliding forces us to check * every instr for a potential next fragment start. This is * expensive so users are advised to decode from app code if * possible (case 9325 -- need exact re-mangle + re-instrument), * though -coarse_pclookup_table helps. */ if (info != NULL && info->frozen && coarse_elided_ubrs && pc != start_pc) { /* case 6532: check for ib stubs as we elide the jmp there too */ bool stop = false; if (coarse_is_indirect_stub(pc)) { stop = true; LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext) - 1, "\thit ib stub @" PFX "\n", pc); } else { app_pc tag = fragment_coarse_entry_pclookup(dcontext, info, pc); if (tag != NULL) { stop = true; LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext) - 1, "\thit frozen tgt: " PFX "." PFX "\n", tag, pc); } } if (stop) { /* Add the ubr ourselves */ ASSERT(cti == NULL); cti = XINST_CREATE_jump(dcontext, opnd_create_pc(pc)); /* It's up to the caller to decide whether to mark this * as do-not-emit or not */ /* Process as an exit cti */ stop_pc = pc; pc = stop_pc; break; } } instr_reset(dcontext, instr); prev_pc = pc; pc = IF_AARCH64_ELSE(decode_cti_with_ldstex, decode_cti)(dcontext, pc, instr); DOLOG(DF_LOGLEVEL(dcontext), LOG_INTERP, { disassemble_with_info(dcontext, prev_pc, THREAD, true, true); }); #ifdef WINDOWS /* Perform fixups for ignorable syscalls on XP & 2003. */ if (possible_ignorable_sysenter && instr_opcode_valid(instr) && instr_is_syscall(instr)) { /* We want to find the instr preceding the sysenter and have * it point to the post-sysenter instr in the trace, rather than * remain pointing to the post-sysenter instr in the BB. */ instr_t *sysenter_prev; instr_t *sysenter_post; ASSERT(prev_decode_pc != NULL); LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext), "decode_fragment: sysenter found @" PFX "\n", instr_get_raw_bits(instr)); /* create single raw instr for instructions up to the * sysenter EXCEPT for the immediately preceding instruction */ offset = (int)(prev_decode_pc - raw_start_pc); ASSERT(offset > 0); raw_instr = instr_create(dcontext); /* point to buffer bits */ instr_set_raw_bits_trace_buf(raw_instr, cur_buf, offset); instrlist_append(ilist, raw_instr); cur_buf += offset; /* Get the "mov" instr just before the sysenter. We know that * it's there because mangle put it there, so we can safely * decode at prev_decode_pc. */ sysenter_prev = instr_create(dcontext); decode(dcontext, prev_decode_pc, sysenter_prev); ASSERT(instr_valid(instr) && instr_is_mov_imm_to_tos(sysenter_prev)); instrlist_append(ilist, sysenter_prev); cur_buf += instr_length(dcontext, sysenter_prev); /* Append the sysenter. */ instr_set_raw_bits_trace_buf(instr, cur_buf, (int)(pc - prev_pc)); instrlist_append(ilist, instr); instr_set_meta(instr); /* skip current instr -- the sysenter */ cur_buf += (int)(pc - prev_pc); /* Decode the next instr -- the one after the sysenter. */ sysenter_post = instr_create(dcontext); prev_decode_pc = pc; prev_pc = pc; pc = decode(dcontext, pc, sysenter_post); if (DYNAMO_OPTION(ignore_syscalls_follow_sysenter)) ASSERT(!instr_is_cti(sysenter_post)); raw_start_pc = pc; /* skip the post-sysenter instr */ cur_buf += (int)(pc - prev_pc); instrlist_append(ilist, sysenter_post); /* Point the pre-sysenter mov to the post-sysenter instr. */ instr_set_src(sysenter_prev, 0, opnd_create_instr(sysenter_post)); instr_set_meta(sysenter_prev); instr_set_meta(sysenter_post); DOLOG(DF_LOGLEVEL(dcontext), LOG_INTERP, { LOG(THREAD, LOG_INTERP, DF_LOGLEVEL(dcontext), "Post-sysenter -- F%d (" PFX ") into:\n", f->id, f->tag); instrlist_disassemble(dcontext, f->tag, ilist, THREAD); }); /* Set all local state so that we can fall-thru and correctly * process the post-sysenter instruction. Point instr to the * already decoded instruction, sysenter_post. At this point, * pc and raw_start_pc point to just after sysenter_post, * prev_pc points to sysenter_post, prev_decode_pc points to * the sysenter itself, and cur_buf points to post_sysenter. */ instr = sysenter_post; } #endif /* look for a cti with an off-fragment target */ if (instr_opcode_valid(instr) && instr_is_cti(instr)) { bool separate_cti = false; bool re_relativize = false; bool intra_target = true; DOLOG(DF_LOGLEVEL(dcontext), LOG_MONITOR, { d_r_loginst(dcontext, 4, instr, "decode_fragment: found non-exit cti"); }); if (TEST(FRAG_FAKE, f->flags)) { /* Case 8711: we don't know the size so we can't even * distinguish off-fragment from intra-fragment targets. * Thus we have to assume that any cti is an exit cti, and * make all fragments for which that is not true into * fine-grained. * Except that we want to support intra-fragment ctis for * clients (i#665), so we use some heuristics. */ if (instr_is_cti_short_rewrite(instr, prev_pc)) { /* Pull in the two short jmps for a "short-rewrite" instr. * We must do this before asking whether it's an * intra-fragment so we don't just look at the * first part of the sequence. */ pc = remangle_short_rewrite(dcontext, instr, prev_pc, 0 /*same target*/); } if (!coarse_cti_is_intra_fragment(dcontext, info, instr, start_pc)) { /* Process this cti as an exit cti. FIXME: we will then * re-copy the raw bytes from this cti to the end of the * fragment at the top of the next loop iter, but for * coarse-grain bbs that should be just one instr for cbr bbs * or none for others, so not worth doing anything about. */ DOLOG(DF_LOGLEVEL(dcontext), LOG_MONITOR, { d_r_loginst(dcontext, DF_LOGLEVEL(dcontext), instr, "\tcoarse exit cti"); }); intra_target = false; stop_pc = prev_pc; pc = stop_pc; break; } else { /* we'll make it to intra_target if() below */ DOLOG(DF_LOGLEVEL(dcontext), LOG_MONITOR, { d_r_loginst(dcontext, DF_LOGLEVEL(dcontext), instr, "\tcoarse intra-fragment cti"); }); } } else if (instr_is_return(instr) || !opnd_is_near_pc(instr_get_target(instr))) { /* just leave it undecoded */ intra_target = false; } else if (instr_is_cti_short_rewrite(instr, prev_pc)) { /* Cti-short should only occur as exit ctis, which are * separated out unless we're decoding a fake fragment. We * include this case for future use, as otherwise we'll * decode just the short cti and think it is an * intra-fragment cti. */ ASSERT_NOT_REACHED(); separate_cti = true; re_relativize = true; intra_target = false; } else if (opnd_get_pc(instr_get_target(instr)) < start_pc || opnd_get_pc(instr_get_target(instr)) > start_pc + f->size) { separate_cti = true; re_relativize = true; intra_target = false; DOLOG(DF_LOGLEVEL(dcontext), LOG_MONITOR, { d_r_loginst(dcontext, 4, instr, "\tcti has off-fragment target"); }); } if (intra_target) { /* intra-fragment target: we'll change its target operand * from pc to instr_t in second pass, so remember it here */ instr_t *clone = instr_clone(dcontext, instr); /* HACK: use note field! */ instr_set_note(clone, (void *)instr); /* we leave the clone pointing at valid original raw bits */ instrlist_append(&intra_ctis, clone); /* intra-fragment target */ DOLOG(DF_LOGLEVEL(dcontext), LOG_MONITOR, { d_r_loginst(dcontext, 4, instr, "\tcti has intra-fragment target"); }); /* since the resulting instrlist could be manipulated, * we need to change the target operand from pc to instr_t. * that requires having this instr separated out now so * our clone-in-note-field hack above works. */ separate_cti = true; re_relativize = false; } if (separate_cti) { /* create single raw instr for instructions up to the cti */ offset = (int)(prev_pc - raw_start_pc); if (offset > 0) { raw_instr = instr_create(dcontext); /* point to buffer bits */ instr_set_raw_bits_trace_buf(raw_instr, cur_buf, offset); instrlist_append(ilist, raw_instr); cur_buf += offset; raw_start_pc = prev_pc; } /* now append cti, indicating that relative target must be * re-encoded, and that it is not an exit cti */ instr_set_meta(instr); if (re_relativize) instr_set_raw_bits_valid(instr, false); else if (!instr_is_cti_short_rewrite(instr, NULL)) { instr_set_raw_bits_trace_buf(instr, cur_buf, (int)(pc - prev_pc)); } instrlist_append(ilist, instr); /* include buf for off-fragment cti, to simplify assert below */ cur_buf += (int)(pc - prev_pc); raw_start_pc = pc; /* create new instr for future fast decodes */ instr = instr_create(dcontext); } } /* is cti */ /* instr_is_tls_xcx_spill won't upgrade from level 1 */ else if (tls_to_dc && instr_is_tls_xcx_spill(instr)) { /* shouldn't get here for x64, where everything uses tls */ IF_X64(ASSERT_NOT_IMPLEMENTED(false)); LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext), "mangling xcx save from tls to dcontext\n"); /* create single raw instr for instructions up to the xcx save */ offset = (int)(prev_pc - raw_start_pc); if (offset > 0) { raw_instr = instr_create(dcontext); /* point to buffer bits */ instr_set_raw_bits_trace_buf(raw_instr, cur_buf, offset); instrlist_append(ilist, raw_instr); cur_buf += offset; raw_start_pc = prev_pc; } /* now append our new xcx save */ instrlist_append(ilist, instr_create_save_to_dcontext( dcontext, IF_X86_ELSE(REG_XCX, DR_REG_R2), IF_X86_ELSE(XCX_OFFSET, R2_OFFSET))); /* make sure skip current instr */ cur_buf += (int)(pc - prev_pc); raw_start_pc = pc; } #if defined(X86) && defined(X64) else if (instr_has_rel_addr_reference(instr)) { /* We need to re-relativize, which is done automatically only for * level 1 instrs (PR 251479), and only when raw bits point to * their original location. We assume that all the if statements * above end up creating a high-level instr, so a cti w/ a * rip-rel operand is already covered. */ /* create single raw instr for instructions up to this one */ offset = (int)(prev_pc - raw_start_pc); if (offset > 0) { raw_instr = instr_create(dcontext); /* point to buffer bits */ instr_set_raw_bits_trace_buf(raw_instr, cur_buf, offset); instrlist_append(ilist, raw_instr); cur_buf += offset; raw_start_pc = prev_pc; } /* should be valid right now since pointing at original bits */ ASSERT(instr_rip_rel_valid(instr)); if (buf != NULL) { /* re-relativize into the new buffer */ DEBUG_DECLARE(byte *nxt =) instr_encode_to_copy(dcontext, instr, cur_buf, vmcode_get_executable_addr(cur_buf)); instr_set_raw_bits_trace_buf(instr, vmcode_get_executable_addr(cur_buf), (int)(pc - prev_pc)); instr_set_rip_rel_valid(instr, true); ASSERT(nxt != NULL); } instrlist_append(ilist, instr); cur_buf += (int)(pc - prev_pc); raw_start_pc = pc; /* create new instr for future fast decodes */ instr = instr_create(dcontext); } #endif } while (pc < stop_pc); DODEBUG({ if (pc != stop_pc) { LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext), "PC " PFX ", stop_pc " PFX "\n", pc, stop_pc); } }); ASSERT(pc == stop_pc); cache_pc next_pc = pc; if (l != NULL && TEST(LINK_PADDED, l->flags) && instr_is_nop(instr)) { /* Throw away our padding nop. */ LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext) - 1, "%s: removing padding nop @" PFX "\n", __FUNCTION__, prev_pc); pc = prev_pc; if (buf != NULL) top_buf -= instr_length(dcontext, instr); } /* create single raw instr for rest of instructions up to exit cti */ if (pc > raw_start_pc) { instr_reset(dcontext, instr); /* point to buffer bits */ offset = (int)(pc - raw_start_pc); if (offset > 0) { instr_set_raw_bits_trace_buf(instr, cur_buf, offset); instrlist_append(ilist, instr); cur_buf += offset; } if (buf != NULL && TEST(FRAG_FAKE, f->flags)) { /* Now that we know the size we can copy into buf. * We have been incrementing cur_buf all along, though * we didn't have contents there. */ ASSERT(top_buf < cur_buf); IF_X64(ASSERT(CHECK_TRUNCATE_TYPE_uint((cur_buf - top_buf)))); num_bytes = (uint)(cur_buf - top_buf); ASSERT(cur_buf + num_bytes < buf + *bufsz); memcpy(cur_buf, raw_start_pc, num_bytes); top_buf = cur_buf + num_bytes; LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext), "decode_fragment: copied " PFX "-" PFX " to " PFX "-" PFX "\n", raw_start_pc, raw_start_pc + num_bytes, cur_buf, cur_buf + num_bytes); } ASSERT(buf == NULL || cur_buf == top_buf); } else { /* will reach here if had a processed instr (off-fragment target, etc.) * immediately prior to exit cti, so now don't need instr -- an * example (in absence of clients) is trampoline to interception code */ instr_destroy(dcontext, instr); } pc = next_pc; } if (l == NULL && !TEST(FRAG_FAKE, f->flags)) break; /* decode the exit branch */ if (cti != NULL) { /* already created */ instr = cti; ASSERT(info != NULL && info->frozen && instr_is_ubr(instr)); raw_start_pc = pc; } else { instr = instr_create(dcontext); raw_start_pc = decode(dcontext, stop_pc, instr); ASSERT(raw_start_pc != NULL); /* our own code! */ /* pc now points into fragment! */ } ASSERT(instr_is_ubr(instr) || instr_is_cbr(instr)); /* replace fcache target with target_tag and add to fragment */ if (l == NULL) { app_pc instr_tgt; /* Ensure we get proper target for short cti sequence */ if (instr_is_cti_short_rewrite(instr, stop_pc)) remangle_short_rewrite(dcontext, instr, stop_pc, 0 /*same target*/); instr_tgt = opnd_get_pc(instr_get_target(instr)); ASSERT(TEST(FRAG_COARSE_GRAIN, f->flags)); if (cti == NULL && coarse_is_entrance_stub(instr_tgt)) { target_tag = entrance_stub_target_tag(instr_tgt, info); l_flags = LINK_DIRECT; /* FIXME; try to get LINK_JMP vs LINK_CALL vs fall-through? */ LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext) - 1, "\tstub tgt: " PFX " => " PFX "\n", instr_tgt, target_tag); } else if (instr_tgt == raw_start_pc /*target next instr*/ /* could optimize by not checking for stub if * coarse_elided_ubrs but we need to know whether ALL * ubrs were elided, which we don't know as normally * entire-bb-ubrs are not elided (case 9677). * plus now that we elide jmp-to-ib-stub we must check. */ && coarse_is_indirect_stub(instr_tgt)) { ibl_type_t ibl_type; DEBUG_DECLARE(bool is_ibl;) target_tag = coarse_indirect_stub_jmp_target(instr_tgt); l_flags = LINK_INDIRECT; DEBUG_DECLARE(is_ibl =) get_ibl_routine_type_ex(dcontext, target_tag, &ibl_type _IF_X86_64(NULL)); ASSERT(is_ibl); l_flags |= ibltype_to_linktype(ibl_type.branch_type); LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext) - 1, "\tind stub tgt: " PFX " => " PFX "\n", instr_tgt, target_tag); } else { target_tag = fragment_coarse_entry_pclookup(dcontext, info, instr_tgt); /* Only frozen units don't jump through stubs */ ASSERT(info != NULL && info->frozen); ASSERT(target_tag != NULL); l_flags = LINK_DIRECT; LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext) - 1, "\tfrozen tgt: " PFX "." PFX "\n", target_tag, instr_tgt); } } else { target_tag = EXIT_TARGET_TAG(dcontext, f, l); l_flags = l->flags; } if (LINKSTUB_DIRECT(l_flags)) num_dir++; else num_indir++; ASSERT(target_tag != NULL); if (instr_is_cti_short_rewrite(instr, stop_pc)) { raw_start_pc = remangle_short_rewrite(dcontext, instr, stop_pc, target_tag); } else { app_pc new_target = target_tag; /* don't point to fcache bits */ instr_set_raw_bits_valid(instr, false); LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext) - 1, "decode_fragment exit_cti: pc=" PFX " l->target_tag=" PFX " l->flags=0x%x\n", stop_pc, target_tag, l_flags); /* need to propagate exit branch type flags, * instr_t flag copied from old fragment linkstub * TODO: when ibl targets are different this won't be necessary */ instr_exit_branch_set_type(instr, linkstub_propagatable_flags(l_flags)); /* convert to proper ibl */ if (is_indirect_branch_lookup_routine(dcontext, target_tag)) { DEBUG_DECLARE(app_pc old_target = new_target;) new_target = get_alternate_ibl_routine(dcontext, target_tag, target_flags); ASSERT(new_target != NULL); /* for stats on traces, we assume if target_flags contains * FRAG_IS_TRACE then we are extending a trace */ DODEBUG({ LOG(THREAD, LOG_MONITOR, DF_LOGLEVEL(dcontext) - 1, "%s: %s ibl_routine " PFX " with %s_target=" PFX "\n", TEST(FRAG_IS_TRACE, target_flags) ? "extend_trace" : "decode_fragment", new_target == old_target ? "maintaining" : "replacing", old_target, new_target == old_target ? "old" : "new", new_target); STATS_INC(num_traces_ibl_extended); }); #ifdef WINDOWS DOSTATS({ if (TEST(FRAG_IS_TRACE, target_flags) && old_target == shared_syscall_routine(dcontext)) STATS_INC(num_traces_shared_syscall_extended); }); #endif } instr_set_target(instr, opnd_create_pc(new_target)); if (instr_is_cti_short(instr)) { /* make sure non-mangled short ctis, which are generated by * us and never left there from apps, are not marked as exit ctis */ instr_set_meta(instr); } } instrlist_append(ilist, instr); if (TEST(FRAG_FAKE, f->flags)) { /* Assumption: coarse-grain bbs have 1 ind exit or 2 direct, * and no code beyond the last exit! Of course frozen bbs * can have their final jmp elided, which we handle above. */ if (instr_is_ubr(instr)) { break; } } if (l != NULL) /* if NULL keep going: discovering exits as we go */ l = LINKSTUB_NEXT_EXIT(l); } /* end while(true) loop through exit stubs */ /* now fix up intra-trace cti targets */ if (instrlist_first(&intra_ctis) != NULL) { /* We have to undo all of our level 0 blocks by expanding. * Any instrs that need re-relativization should already be * separate, so this should not affect rip-rel instrs. */ int offs = 0; for (instr = instrlist_first_expanded(dcontext, ilist); instr != NULL; instr = instr_get_next_expanded(dcontext, ilist, instr)) { for (cti = instrlist_first(&intra_ctis); cti != NULL; cti = instr_get_next(cti)) { /* The clone we put in intra_ctis has raw bits equal to the * original bits, so its target will be in original fragment body. * We can't rely on the raw bits of the new instrs (since the * non-level-0 ones may have allocated raw bits) so we * calculate a running offset as we go. */ if (opnd_get_pc(instr_get_target(cti)) - start_pc == offs) { /* cti targets this instr */ instr_t *real_cti = (instr_t *)instr_get_note(cti); /* PR 333691: do not preserve raw bits of real_cti, since * instrlist may change (e.g., inserted nops). Must re-encode * once instrlist is finalized. */ instr_set_target(real_cti, opnd_create_instr(instr)); DOLOG(DF_LOGLEVEL(dcontext), LOG_MONITOR, { d_r_loginst(dcontext, 4, real_cti, "\tre-set intra-fragment target"); }); break; } } offs += instr_length(dcontext, instr); } } instrlist_clear(dcontext, &intra_ctis); DOLOG(DF_LOGLEVEL(dcontext), LOG_INTERP, { LOG(THREAD, LOG_INTERP, DF_LOGLEVEL(dcontext), "Decoded F%d (" PFX "." PFX ") into:\n", f->id, f->tag, FCACHE_ENTRY_PC(f)); instrlist_disassemble(dcontext, f->tag, ilist, THREAD); }); ok = dr_set_isa_mode(dcontext, old_mode, NULL); ASSERT(ok); if (dir_exits != NULL) *dir_exits = num_dir; if (indir_exits != NULL) *indir_exits = num_indir; if (buf != NULL) { IF_X64(ASSERT(CHECK_TRUNCATE_TYPE_uint((top_buf - buf)))); *bufsz = (uint)(top_buf - buf); } return ilist; } #undef DF_LOGLEVEL /* Just like decode_fragment() but marks any instrs missing in the cache * as do-not-emit */ instrlist_t * decode_fragment_exact(dcontext_t *dcontext, fragment_t *f, byte *buf, /*IN/OUT*/ uint *bufsz, uint target_flags, /*OUT*/ uint *dir_exits, /*OUT*/ uint *indir_exits) { instrlist_t *ilist = decode_fragment(dcontext, f, buf, bufsz, target_flags, dir_exits, indir_exits); /* If the final jmp was elided we do NOT want to count it in the size! */ if (instr_get_raw_bits(instrlist_last(ilist)) == NULL) { instr_set_ok_to_emit(instrlist_last(ilist), false); } return ilist; } /* Makes a new copy of fragment f * If replace is true, * removes f from the fcache and adds the new copy in its place * Else * creates f as an invisible fragment (caller is responsible for linking * the new fragment!) */ fragment_t * copy_fragment(dcontext_t *dcontext, fragment_t *f, bool replace) { instrlist_t *trace = instrlist_create(dcontext); instr_t *instr; uint *trace_buf; int trace_buf_top; /* index of next free location in trace_buf */ linkstub_t *l; byte *p; cache_pc start_pc; int num_bytes; fragment_t *new_f; void *vmlist = NULL; app_pc target_tag; DEBUG_DECLARE(bool ok;) trace_buf = heap_alloc(dcontext, f->size * 2 HEAPACCT(ACCT_FRAGMENT)); start_pc = FCACHE_ENTRY_PC(f); trace_buf_top = 0; p = ((byte *)trace_buf) + trace_buf_top; IF_X64(ASSERT_NOT_IMPLEMENTED(false)); /* must re-relativize when copying! */ for (l = FRAGMENT_EXIT_STUBS(f); l; l = LINKSTUB_NEXT_EXIT(l)) { /* Copy the instruction bytes up to (but not including) the first * control-transfer instruction. ***WARNING*** This code assumes * that the first link stub corresponds to the first exit branch * in the body. */ IF_X64(ASSERT(CHECK_TRUNCATE_TYPE_uint((EXIT_CTI_PC(f, l) - start_pc)))); num_bytes = (uint)(EXIT_CTI_PC(f, l) - start_pc); if (num_bytes > 0) { memcpy(p, (byte *)start_pc, num_bytes); trace_buf_top += num_bytes; start_pc += num_bytes; /* build a mongo instruction corresponding to the copied instructions */ instr = instr_create(dcontext); instr_set_raw_bits(instr, p, num_bytes); instrlist_append(trace, instr); } /* decode the exit branch */ instr = instr_create(dcontext); p = decode(dcontext, (byte *)EXIT_CTI_PC(f, l), instr); ASSERT(p != NULL); /* our own code! */ /* p now points into fragment! */ ASSERT(instr_is_ubr(instr) || instr_is_cbr(instr)); /* Replace cache_pc target with target_tag and add to trace. For * an indirect branch, the target_tag is zero. */ target_tag = EXIT_TARGET_TAG(dcontext, f, l); ASSERT(target_tag); if (instr_is_cti_short_rewrite(instr, EXIT_CTI_PC(f, l))) { p = remangle_short_rewrite(dcontext, instr, EXIT_CTI_PC(f, l), target_tag); } else { /* no short ctis that aren't mangled should be exit ctis */ ASSERT(!instr_is_cti_short(instr)); instr_set_target(instr, opnd_create_pc(target_tag)); } instrlist_append(trace, instr); start_pc += (p - (byte *)EXIT_CTI_PC(f, l)); } /* emit as invisible fragment */ /* We don't support shared fragments, where vm_area_add_to_list can fail */ ASSERT_NOT_IMPLEMENTED(!TEST(FRAG_SHARED, f->flags)); DEBUG_DECLARE(ok =) vm_area_add_to_list(dcontext, f->tag, &vmlist, f->flags, f, false /*no locks*/); ASSERT(ok); /* should never fail for private fragments */ new_f = emit_invisible_fragment(dcontext, f->tag, trace, f->flags, vmlist); if (replace) { /* link and replace old fragment */ shift_links_to_new_fragment(dcontext, f, new_f); fragment_replace(dcontext, f, new_f); } else { /* caller is responsible for linking new fragment */ } ASSERT(new_f->flags == f->flags); fragment_copy_data_fields(dcontext, f, new_f); #ifdef DEBUG if (d_r_stats->loglevel > 1) { LOG(THREAD, LOG_ALL, 2, "Copying F%d to F%d\n", f->id, new_f->id); disassemble_fragment(dcontext, f, d_r_stats->loglevel < 3); disassemble_fragment(dcontext, new_f, d_r_stats->loglevel < 3); } #endif /* DEBUG */ heap_free(dcontext, trace_buf, f->size * 2 HEAPACCT(ACCT_FRAGMENT)); /* free the instrlist_t elements */ instrlist_clear_and_destroy(dcontext, trace); if (replace) { fragment_delete(dcontext, f, FRAGDEL_NO_OUTPUT | FRAGDEL_NO_UNLINK | FRAGDEL_NO_HTABLE); STATS_INC(num_fragments_deleted_copy_and_replace); } return new_f; } /* Used when the code cache is enlarged by copying to a larger space, * and all of the relative ctis that target outside the cache need * to be shifted. Additionally, sysenter-related patching for ignore-syscalls * on XP/2003 is performed here, as the absolute code cache address pushed * onto the stack must be updated. * Assumption: old code cache has been copied to TOP of new cache, so to * detect for ctis targeting outside of old cache can look at new cache * start plus old cache size. */ void shift_ctis_in_fragment(dcontext_t *dcontext, fragment_t *f, ssize_t shift, cache_pc fcache_start, cache_pc fcache_end, size_t old_size) { cache_pc pc, prev_pc = NULL; cache_pc start_pc = FCACHE_ENTRY_PC(f); cache_pc stop_pc = fragment_stubs_end_pc(f); /* get what would have been end of cache if just shifted not resized */ cache_pc fcache_old_end = fcache_start + old_size; #ifdef WINDOWS /* The fragment could contain an ignorable sysenter instruction if * the following conditions are satisfied. */ bool possible_ignorable_sysenter = DYNAMO_OPTION(ignore_syscalls) && (get_syscall_method() == SYSCALL_METHOD_SYSENTER) && /* FIXME Traces don't have FRAG_HAS_SYSCALL set so we can't filter on * that flag for all fragments. */ (TEST(FRAG_HAS_SYSCALL, f->flags) || TEST(FRAG_IS_TRACE, f->flags)); #endif instr_t instr; instr_init(dcontext, &instr); pc = start_pc; while (pc < stop_pc) { #ifdef WINDOWS cache_pc prev_decode_pc = prev_pc; /* store the address of the * previous decode, the instr * before the one 'pc' * currently points to *before* * the call to decode_cti() just * below */ #endif prev_pc = pc; instr_reset(dcontext, &instr); pc = (cache_pc)decode_cti(dcontext, (byte *)pc, &instr); #ifdef WINDOWS /* Perform fixups for sysenter instrs when ignorable syscalls is used on * XP & 2003. These are not cache-external fixups, but it's convenient & * efficient to perform them here since decode_cti() is called on every * instruction, allowing identification of sysenters without additional * decoding. */ if (possible_ignorable_sysenter && instr_opcode_valid(&instr) && instr_is_syscall(&instr)) { cache_pc next_pc; app_pc target; DEBUG_DECLARE(app_pc old_target;) DEBUG_DECLARE(cache_pc encode_nxt;) /* Peek up to find the "mov $post-sysenter -> (%xsp)" */ instr_reset(dcontext, &instr); next_pc = decode(dcontext, prev_decode_pc, &instr); ASSERT(next_pc == prev_pc); LOG(THREAD, LOG_MONITOR, 4, "shift_ctis_in_fragment: pre-sysenter mov found @" PFX "\n", instr_get_raw_bits(&instr)); ASSERT(instr_is_mov_imm_to_tos(&instr)); target = instr_get_raw_bits(&instr) + instr_length(dcontext, &instr) + (pc - prev_pc); DODEBUG(old_target = (app_pc)opnd_get_immed_int(instr_get_src(&instr, 0));); /* PR 253943: we don't support sysenter in x64 */ IF_X64(ASSERT_NOT_IMPLEMENTED(false)); /* can't have 8-byte imm-to-mem */ instr_set_src(&instr, 0, opnd_create_immed_int((ptr_int_t)target, OPSZ_4)); ASSERT(old_target + shift == target); LOG(THREAD, LOG_MONITOR, 4, "shift_ctis_in_fragment: pre-sysenter mov now pts to @" PFX "\n", target); DEBUG_DECLARE(encode_nxt =) instr_encode_to_copy(dcontext, &instr, vmcode_get_writable_addr(prev_decode_pc), prev_decode_pc); /* must not change size! */ ASSERT(encode_nxt != NULL && vmcode_get_executable_addr(encode_nxt) == next_pc); } /* The following 'if' won't get executed since a sysenter isn't * a CTI instr, so we don't need an else. We do need to take care * that any 'else' clauses are added after the 'if' won't trigger * on a sysenter either. */ #endif /* look for a pc-relative cti (including exit ctis) w/ out-of-cache * target (anything in-cache is fine, the whole cache was moved) */ if (instr_is_cti(&instr) && /* only ret, ret_far, and iret don't have targets, and * we really shouldn't see them, except possibly if they * are inserted through instrumentation, so go ahead and * check num srcs */ instr_num_srcs(&instr) > 0 && opnd_is_near_pc(instr_get_target(&instr))) { app_pc target = opnd_get_pc(instr_get_target(&instr)); if (target < fcache_start || target > fcache_old_end) { DEBUG_DECLARE(byte * nxt_pc;) /* re-encode instr w/ new pc-relative target */ instr_set_raw_bits_valid(&instr, false); instr_set_target(&instr, opnd_create_pc(target - shift)); DEBUG_DECLARE(nxt_pc =) instr_encode_to_copy(dcontext, &instr, vmcode_get_writable_addr(prev_pc), prev_pc); /* must not change size! */ ASSERT(nxt_pc != NULL && vmcode_get_executable_addr(nxt_pc) == pc); #ifdef DEBUG if ((d_r_stats->logmask & LOG_CACHE) != 0) { d_r_loginst( dcontext, 5, &instr, "shift_ctis_in_fragment: found cti w/ out-of-cache target"); } #endif } } } instr_free(dcontext, &instr); } #ifdef PROFILE_RDTSC /* Add profile call to front of the trace in dc * Must call finalize_profile_call and pass it the fragment_t* * once the trace is turned into a fragment to fix up a few profile * call instructions. */ void add_profile_call(dcontext_t *dcontext) { monitor_data_t *md = (monitor_data_t *)dcontext->monitor_field; instrlist_t *trace = &(md->trace); byte *p = ((byte *)md->trace_buf) + md->trace_buf_top; instr_t *instr; uint num_bytes = profile_call_size(); ASSERT(num_bytes + md->trace_buf_top < md->trace_buf_size); insert_profile_call((cache_pc)p); /* use one giant BINARY instruction to hold everything, * to keep dynamo from interpreting the cti instructions as real ones */ instr = instr_create(dcontext); instr_set_raw_bits(instr, p, num_bytes); instrlist_prepend(trace, instr); md->trace_buf_top += num_bytes; } #endif /* emulates the effects of the instruction at pc with the state in mcontext * limited right now to only mov instructions * returns NULL if failed or not yet implemented, else returns the pc of the next instr. */ app_pc d_r_emulate(dcontext_t *dcontext, app_pc pc, priv_mcontext_t *mc) { instr_t instr; app_pc next_pc = NULL; uint opc; instr_init(dcontext, &instr); next_pc = decode(dcontext, pc, &instr); if (!instr_valid(&instr)) { next_pc = NULL; goto emulate_failure; } DOLOG(2, LOG_INTERP, { d_r_loginst(dcontext, 2, &instr, "emulating"); }); opc = instr_get_opcode(&instr); if (opc == OP_store) { opnd_t src = instr_get_src(&instr, 0); opnd_t dst = instr_get_dst(&instr, 0); reg_t *target; reg_t val; uint sz = opnd_size_in_bytes(opnd_get_size(dst)); ASSERT(opnd_is_memory_reference(dst)); if (sz != 4 IF_X64(&&sz != 8)) { next_pc = NULL; goto emulate_failure; } target = (reg_t *)opnd_compute_address_priv(dst, mc); if (opnd_is_reg(src)) { val = reg_get_value_priv(opnd_get_reg(src), mc); } else if (opnd_is_immed_int(src)) { val = (reg_t)opnd_get_immed_int(src); } else { next_pc = NULL; goto emulate_failure; } DOCHECK(1, { uint prot = 0; ASSERT(get_memory_info((app_pc)target, NULL, NULL, &prot)); ASSERT(TEST(MEMPROT_WRITE, prot)); }); LOG(THREAD, LOG_INTERP, 2, "\temulating store by writing " PFX " to " PFX "\n", val, target); if (sz == 4) *((int *)target) = (int)val; #ifdef X64 else if (sz == 8) *target = val; #endif } else if (opc == IF_X86_ELSE(OP_inc, OP_add) || opc == IF_X86_ELSE(OP_dec, OP_sub)) { opnd_t src = instr_get_src(&instr, 0); reg_t *target; uint sz = opnd_size_in_bytes(opnd_get_size(src)); if (sz != 4 IF_X64(&&sz != 8)) { next_pc = NULL; goto emulate_failure; } /* FIXME: handle changing register value */ ASSERT(opnd_is_memory_reference(src)); /* FIXME: change these to take in priv_mcontext_t* ? */ target = (reg_t *)opnd_compute_address_priv(src, mc); DOCHECK(1, { uint prot = 0; ASSERT(get_memory_info((app_pc)target, NULL, NULL, &prot)); ASSERT(TEST(MEMPROT_WRITE, prot)); }); LOG(THREAD, LOG_INTERP, 2, "\temulating %s to " PFX "\n", opc == IF_X86_ELSE(OP_inc, OP_add) ? "inc" : "dec", target); if (sz == 4) { if (opc == IF_X86_ELSE(OP_inc, OP_add)) (*((int *)target))++; else (*((int *)target))--; } #ifdef X64 else if (sz == 8) { if (opc == IF_X86_ELSE(OP_inc, OP_add)) (*target)++; else (*target)--; } #endif } emulate_failure: instr_free(dcontext, &instr); return next_pc; }
1
24,506
So this is a swing of 2: but I think we only need to reduce by 1? The artificial jump added on truncation is not passed to clients: it's like other mangling added later. So if `-max_bb_instrs 4` means 4 app instrs passed to clients, we'd want ==, not ==-1, right? Plus, if we did want -1 and to stop at 3 instead of 4, this is not sufficient: the inner loop above currently has `total_instrs <= cur_max_bb_instrs`.
DynamoRIO-dynamorio
c
@@ -362,7 +362,7 @@ LmExprResult CreateLmOutputExpr(const NAType &formalType, // specified type t to be converted to/from C strings. The only // SQL types that do not need to be converted to C strings are: // -// INT, SMALLINT, LARGEINT, FLOAT, REAL, DOUBLE PRECISION +// INT, SMALLINT, LARGEINT, FLOAT, REAL, DOUBLE PRECISION, BOOLEAN // // because these types map to Java primitive types: //
1
/********************************************************************** // @@@ START COPYRIGHT @@@ // // Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. // // @@@ END COPYRIGHT @@@ **********************************************************************/ /* -*-C++-*- ****************************************************************************** * * File: LmExpr.cpp * Description: Code to generate ItemExpr trees that convert UDR parameters * to/from formats acceptable as input to the Language Manager * Created: 10/28/2000 * Language: C++ * ****************************************************************************** */ /* About this code: The Language Manager does not accept all SQL datatypes. When a SQL datatype is not acceptable as input/output to LM, the value must be converted to a "normalized form" before/after LM sees it. The functions in this file create ItemExpr trees that convert values to/from LM-normal form. The two main functions are CreateLmInputExpr() and CreateLmOutputExpr(). Both can be called during codegen for a UDR node to create ItemExpr trees that convert to/from LM-normal form. See comments at the beginning of those functions for more detail. This file begins with many static helper functions. */ #include "LmExpr.h" #include "ItemExpr.h" #include "ItemFunc.h" #include "NAType.h" #include "ItemNAType.h" #include "CharType.h" #include "NumericType.h" #include "parser.h" #include "CmpContext.h" #include "DatetimeType.h" // Fix for bug 3137. // The following global is defined in SqlParserGlobals.h file and is set // when parsing result set proxy statement. It is used in allowing a // result set column type of VARCHAR(0) in proxy statement(resulting from // an empty string in SELECT statement). // It gets reset after initial parsing of the proxy statement but we need to // set it again during codegen time to avoid error 3003. extern THREAD_P NABoolean inRSProxyStmt; // Helper function to create an ItemExpr tree from a scalar // expression string. static ItemExpr *ParseExpr(NAString &s, CmpContext &c, ItemExpr &ie) { ItemExpr *result = NULL; Parser parser(&c); result = parser.getItemExprTree(s.data(), s.length(), CharInfo::UTF8 , 1, &ie); return result; } // // Helper function to get the maximum number of characters required // to represent a value of a given NAType. // static Lng32 GetDisplayLength(const NAType &t) { Lng32 result = t.getDisplayLength( t.getFSDatatype(), t.getNominalSize(), t.getPrecision(), t.getScale(), 0); return result; } // // Helper function to create an ItemExpr tree that converts a SQL // value to a SQL string without null terminator. // static ItemExpr *CreateLmString(ItemExpr &source, const NAType &sourceType, ComRoutineLanguage language, ComRoutineParamStyle style, CmpContext *cmpContext) { // // We want an ItemExpr that converts any value X to a // string. We will use this SQL syntax: // // cast(X as {CHAR|VARCHAR}(N) CHARACTER SET ISO88591) // // where N is the max display length of X. The datatype of the // result will be CHAR(N) or VARCHAR(N) depending on the language. // ItemExpr *result = NULL; NAMemory *h = cmpContext->statementHeap(); Lng32 maxLength = GetDisplayLength(sourceType); char buf[100]; sprintf(buf, "%d", maxLength); NAString *s = new (h) NAString("cast (@A1 as ", h); if (style == COM_STYLE_JAVA_CALL) (*s) += "VARCHAR("; else (*s) += "CHAR("; (*s) += buf; (*s) += ") CHARACTER SET ISO88591);"; result = ParseExpr(*s, *cmpContext, source); return result; } // // Helper function to create an ItemExpr tree that converts // a SQL value, represented by the source ItemExpr, to the // target type. // static ItemExpr *CreateCastExpr(ItemExpr &source, const NAType &target, CmpContext *cmpContext) { ItemExpr *result = NULL; NAMemory *h = cmpContext->statementHeap(); NAString *s; s = new (h) NAString("cast(@A1 as ", h); (*s) += target.getTypeSQLname(TRUE); if (!target.supportsSQLnull()) (*s) += " NOT NULL"; (*s) += ");"; result = ParseExpr(*s, *cmpContext, source); return result; } // Helper function to create an ItemExpr tree that converts a string // to an INTERVAL value. This expression tree is necessary because // SQL/MX will only convert arbitrary strings to INTERVALs when moving // values into the CLI via an input expression. static ItemExpr *CreateIntervalExpr(ItemExpr &source, const NAType &target, CmpContext *cmpContext) { // Our goal is to create the following expression tree. Assume "@A1" // is the input string // // case substring(@A1 from 1 for 1) // when '-' then // cast(-cast(substring(@A1 from 2) as interval year) as interval year) // else // cast(@A1 as interval year) // end ItemExpr *result = NULL; NAMemory *h = cmpContext->statementHeap(); NAString T = target.getTypeSQLname(TRUE); if (!target.supportsSQLnull()) T += " NOT NULL"; NAString *s = new (h) NAString("case substring(@A1 from 1 for 1) ", h); (*s) += "when '-' then cast(-cast(substring(@A1 from 2) as "; (*s) += T; (*s) += ") as "; (*s) += T; (*s) += ") else cast(@A1 as "; (*s) += T; (*s) += ") end;"; result = ParseExpr(*s, *cmpContext, source); return result; } //--------------------------------------------------------------------------- // CreateLmInputExpr // // Creates an ItemExpr tree to convert a SQL value to LM-normal form. // Returns the tree in newExpr. Right now LM-normal form is: // - SQL native format for // * binary precision integers // * floating point and // * Character types(char and varchar) // - non-null terminated C string(CHAR(N) or VARCHAR(N)) for everything else //--------------------------------------------------------------------------- LmExprResult CreateLmInputExpr(const NAType &formalType, ItemExpr &actualValue, ComRoutineLanguage language, ComRoutineParamStyle style, CmpContext *cmpContext, ItemExpr *&newExpr) { LmExprResult result = LmExprOK; NABoolean isResultSet = FALSE; if (LmTypeIsString(formalType, language, style, isResultSet)) { if (formalType.getTypeQualifier() == NA_CHARACTER_TYPE) { newExpr = &actualValue; } else { newExpr = CreateLmString(actualValue, formalType, language, style, cmpContext); } } else { newExpr = CreateCastExpr(actualValue, formalType, cmpContext); } if (newExpr == NULL) { result = LmExprError; } return result; } //--------------------------------------------------------------------------- // CreateLmOutputExpr // // Creates an ItemExpr tree to convert a value in LM-normal form to // SQL native format. Returns the tree in outputValue. The expressions // will be used by a UDR TCB to process a UDR output value in the UDR // server's reply buffer. // // One other side-effect of this function: // This function determines the SQL type that corresponds to LM-normal // form and creates an NATypeToItem instance of that type. This NATypeToItem // represents a UDR output value that has just come back from the UDR server // in a reply buffer. A UDR TCB must convert the value from LM-normal format // to the SQL formal parameter type. To allow codegen for the UDR TCB to set // up the map table correctly, we return the NATypeToItem instance in // normalizedValue. //--------------------------------------------------------------------------- LmExprResult CreateLmOutputExpr(const NAType &formalType, ComRoutineLanguage language, ComRoutineParamStyle style, CmpContext *cmpContext, ItemExpr *&normalizedValue, ItemExpr *&outputValue, NABoolean isResultSet) { LmExprResult result = LmExprError; normalizedValue = NULL; outputValue = NULL; NAMemory *h = cmpContext->statementHeap(); NAType *replyType = NULL; NABoolean isString = LmTypeIsString(formalType, language, style, isResultSet); if (isString && (formalType.getTypeQualifier() != NA_CHARACTER_TYPE)) { if (isResultSet || style != COM_STYLE_JAVA_CALL) { Lng32 maxLength = GetDisplayLength(formalType); replyType = new (h) SQLChar(maxLength); } else { Lng32 maxLength = GetDisplayLength(formalType); replyType = new (h) SQLVarChar(maxLength); } } else { replyType = formalType.newCopy(h); } if (replyType) { if (style == COM_STYLE_JAVA_CALL) { // $$$$ TBD: let's assume all CALL statement parameters are // nullable for now, until we are sure UDR server's null // processing is correct for both nullable and non-nullable // types. replyType->setNullable(TRUE); } else // Copy the nullability attribute from the formal type replyType->setNullable(formalType); normalizedValue = new (h) NATypeToItem(replyType->newCopy(h)); if (normalizedValue) { // Note that we didn't apply cast expr for CHAR and VARCHAR for // IN params. But we need to have cast expr for OUT params, even // though there is no actual casting of data, because we need to // move data from buffer to up Queue in the root node. if (formalType.getTypeQualifier() == NA_INTERVAL_TYPE && isString) outputValue = CreateIntervalExpr(*normalizedValue, formalType, cmpContext); else { // Fix for bug 3137. // Set the parser flag to allow VARCHAR(0) as proxy column type // before entering the parser. See comment above for more detail. if (isResultSet && formalType.getTypeQualifier() == NA_CHARACTER_TYPE) inRSProxyStmt = TRUE; outputValue = CreateCastExpr(*normalizedValue, formalType, cmpContext); inRSProxyStmt = FALSE; } if (outputValue) { result = LmExprOK; } else { normalizedValue = NULL; } } } return result; } //--------------------------------------------------------------------------- // LM type info functions // // LM-required types have certain attributes that are of interest during // codegen for a UDR operator. The following functions all return TRUE // or FALSE depending on whether a given NAType has one of these attributes. //--------------------------------------------------------------------------- // // This function returns TRUE if LM wants values of the // specified type t to be converted to/from C strings. The only // SQL types that do not need to be converted to C strings are: // // INT, SMALLINT, LARGEINT, FLOAT, REAL, DOUBLE PRECISION // // because these types map to Java primitive types: // // INT -> int // SMALLINT -> short // LARGEINT -> long // FLOAT -> float or double // REAL -> float // DOUBLE PREC -> double // // For the object-oriented Java and C++ parameter styles, we represent // intervals as a signed numeric of 2, 4, or 8 bytes, in the other // parameter styles it is represented as a string. // // INTERVAL -> short or int or long or string // // Note: When changing this, a change in file ../sqludr/sqludr.cpp // may be required as well (and other places, of course) NABoolean LmTypeIsString(const NAType &t, ComRoutineLanguage language, ComRoutineParamStyle style, NABoolean isResultSet) { NABoolean result = TRUE; if (t.getTypeQualifier() == NA_NUMERIC_TYPE) { const NumericType &nt = *((const NumericType *) &t); if (nt.isExact()) { if (nt.binaryPrecision()) { // INT // SMALLINT // LARGEINT result = FALSE; } else if (nt.isDecimal()) { // DECIMAL if (isResultSet) result = FALSE; else result = TRUE; } else { // NUMERIC // Cases to consider // * SPJ result sets: LM format is internal format // * Java call style: LM format is a string // * Bignum: LM format is a string // * C/C++ routines or Java object style: LM format is internal format if (isResultSet) result = FALSE; else if (language != COM_LANGUAGE_C && language != COM_LANGUAGE_CPP && style != COM_STYLE_JAVA_OBJ) result = TRUE; else if (nt.isBigNum()) result = TRUE; else result = FALSE; } } else { // FLOAT (8-byte value) // REAL (4-byte value) // DOUBLE PRECISION = FLOAT(52) result = FALSE; } } else if (t.getTypeQualifier() == NA_INTERVAL_TYPE && (style == COM_STYLE_JAVA_OBJ || style == COM_STYLE_CPP_OBJ)) { // in the object-oriented styles, use the native // interval representation as a number result = FALSE; } return result; } // // This function returns TRUE if LM requires a precision setting // for the given type. // NABoolean LmTypeSupportsPrecision(const NAType &t) { NABoolean result = FALSE; NABuiltInTypeEnum q = t.getTypeQualifier(); switch (q) { case NA_NUMERIC_TYPE: { const NumericType &nt = *((const NumericType *) &t); if (nt.isExact() && !nt.binaryPrecision()) { // NUMERIC, DECIMAL result = TRUE; } } break; case NA_DATETIME_TYPE: { // For DATE, TIME, and TIMESTAMP we store the datetime code as // precision. The datetime code indicates whether the type is // DATE, TIME, or TIMESTAMP. result = TRUE; } break; } return result; } // This function returns TRUE if LM requires a scale setting // for the given type. NABoolean LmTypeSupportsScale(const NAType &t) { NABoolean result = FALSE; NABuiltInTypeEnum q = t.getTypeQualifier(); switch (q) { case NA_NUMERIC_TYPE: { const NumericType &nt = *((const NumericType *) &t); if (nt.isExact() && !nt.binaryPrecision()) { // NUMERIC, DECIMAL result = TRUE; } } break; case NA_DATETIME_TYPE: { const DatetimeType &dt = *((const DatetimeType *) &t); DatetimeType::Subtype subtype = dt.getSubtype(); if (subtype == DatetimeType::SUBTYPE_SQLTime || subtype == DatetimeType::SUBTYPE_SQLTimestamp) { // For TIME and TIMESTAMP we store fractional precision in the // scale field result = TRUE; } } break; } return result; } // This function returns TRUE if LM considers the given type to // be an "LM object type". // // *** NOT IMPLEMENTED YET *** // Semantics for LM object types are not completely defined so // we cannot implement this function yet. NABoolean LmTypeIsObject(const NAType &t) { NABoolean result = FALSE; return result; }
1
15,408
Does Tinyint belong to this set of types? I suppose the answer is No, but thought it safer to check.
apache-trafodion
cpp
@@ -1110,6 +1110,17 @@ Blockly.WorkspaceSvg.prototype.isDeleteArea = function(e) { return Blockly.DELETE_AREA_NONE; }; +/** + * Is the mouse event outside the blocks UI, to the right of the workspace? + * @param {!Event} e Mouse move event. + * @return {boolean} True if event is outside the blocks UI. + */ +Blockly.WorkspaceSvg.prototype.isOutside = function(e) { + var mousePoint = Blockly.utils.mouseToSvg(e, this.getParentSvg(), + this.getInverseScreenCTM()); + return this.getParentSvg().width.baseVal.value < mousePoint.x; +}; + /** * Handle a mouse-down on SVG drawing surface. * @param {!Event} e Mouse down event.
1
/** * @license * Visual Blocks Editor * * Copyright 2014 Google Inc. * https://developers.google.com/blockly/ * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @fileoverview Object representing a workspace rendered as SVG. * @author [email protected] (Neil Fraser) */ 'use strict'; goog.provide('Blockly.WorkspaceSvg'); // TODO(scr): Fix circular dependencies //goog.require('Blockly.BlockSvg'); goog.require('Blockly.Colours'); goog.require('Blockly.ConnectionDB'); goog.require('Blockly.constants'); goog.require('Blockly.DataCategory'); goog.require('Blockly.DropDownDiv'); goog.require('Blockly.Events'); goog.require('Blockly.Gesture'); goog.require('Blockly.Grid'); goog.require('Blockly.Options'); goog.require('Blockly.ScrollbarPair'); goog.require('Blockly.Touch'); goog.require('Blockly.Trashcan'); //goog.require('Blockly.VerticalFlyout'); goog.require('Blockly.Workspace'); goog.require('Blockly.WorkspaceAudio'); goog.require('Blockly.WorkspaceDragSurfaceSvg'); goog.require('Blockly.Xml'); goog.require('Blockly.ZoomControls'); goog.require('goog.array'); goog.require('goog.dom'); goog.require('goog.math.Coordinate'); goog.require('goog.userAgent'); /** * Class for a workspace. This is an onscreen area with optional trashcan, * scrollbars, bubbles, and dragging. * @param {!Blockly.Options} options Dictionary of options. * @param {Blockly.BlockDragSurfaceSvg=} opt_blockDragSurface Drag surface for * blocks. * @param {Blockly.WorkspaceDragSurfaceSvg=} opt_wsDragSurface Drag surface for * the workspace. * @extends {Blockly.Workspace} * @constructor */ Blockly.WorkspaceSvg = function(options, opt_blockDragSurface, opt_wsDragSurface) { Blockly.WorkspaceSvg.superClass_.constructor.call(this, options); this.getMetrics = options.getMetrics || Blockly.WorkspaceSvg.getTopLevelWorkspaceMetrics_; this.setMetrics = options.setMetrics || Blockly.WorkspaceSvg.setTopLevelWorkspaceMetrics_; Blockly.ConnectionDB.init(this); if (opt_blockDragSurface) { this.blockDragSurface_ = opt_blockDragSurface; } if (opt_wsDragSurface) { this.workspaceDragSurface_ = opt_wsDragSurface; } this.useWorkspaceDragSurface_ = this.workspaceDragSurface_ && Blockly.utils.is3dSupported(); /** * List of currently highlighted blocks. Block highlighting is often used to * visually mark blocks currently being executed. * @type !Array.<!Blockly.BlockSvg> * @private */ this.highlightedBlocks_ = []; /** * Object in charge of loading, storing, and playing audio for a workspace. * @type {Blockly.WorkspaceAudio} * @private */ this.audioManager_ = new Blockly.WorkspaceAudio(options.parentWorkspace); /** * This workspace's grid object or null. * @type {Blockly.Grid} * @private */ this.grid_ = this.options.gridPattern ? new Blockly.Grid(options.gridPattern, options.gridOptions) : null; this.registerToolboxCategoryCallback(Blockly.VARIABLE_CATEGORY_NAME, Blockly.DataCategory); this.registerToolboxCategoryCallback(Blockly.PROCEDURE_CATEGORY_NAME, Blockly.Procedures.flyoutCategory); }; goog.inherits(Blockly.WorkspaceSvg, Blockly.Workspace); /** * A wrapper function called when a resize event occurs. * You can pass the result to `unbindEvent_`. * @type {Array.<!Array>} */ Blockly.WorkspaceSvg.prototype.resizeHandlerWrapper_ = null; /** * The render status of an SVG workspace. * Returns `true` for visible workspaces and `false` for non-visible, * or headless, workspaces. * @type {boolean} */ Blockly.WorkspaceSvg.prototype.rendered = true; /** * Is this workspace the surface for a flyout? * @type {boolean} */ Blockly.WorkspaceSvg.prototype.isFlyout = false; /** * Is this workspace the surface for a mutator? * @type {boolean} * @package */ Blockly.WorkspaceSvg.prototype.isMutator = false; /** * Whether this workspace has resizes enabled. * Disable during batch operations for a performance improvement. * @type {boolean} * @private */ Blockly.WorkspaceSvg.prototype.resizesEnabled_ = true; /** * Whether this workspace has toolbox/flyout refreshes enabled. * Disable during batch operations for a performance improvement. * @type {boolean} * @private */ Blockly.WorkspaceSvg.prototype.toolboxRefreshEnabled_ = true; /** * Current horizontal scrolling offset in pixel units. * @type {number} */ Blockly.WorkspaceSvg.prototype.scrollX = 0; /** * Current vertical scrolling offset in pixel units. * @type {number} */ Blockly.WorkspaceSvg.prototype.scrollY = 0; /** * Horizontal scroll value when scrolling started in pixel units. * @type {number} */ Blockly.WorkspaceSvg.prototype.startScrollX = 0; /** * Vertical scroll value when scrolling started in pixel units. * @type {number} */ Blockly.WorkspaceSvg.prototype.startScrollY = 0; /** * Distance from mouse to object being dragged. * @type {goog.math.Coordinate} * @private */ Blockly.WorkspaceSvg.prototype.dragDeltaXY_ = null; /** * Current scale. * @type {number} */ Blockly.WorkspaceSvg.prototype.scale = 1; /** * The workspace's trashcan (if any). * @type {Blockly.Trashcan} */ Blockly.WorkspaceSvg.prototype.trashcan = null; /** * This workspace's scrollbars, if they exist. * @type {Blockly.ScrollbarPair} */ Blockly.WorkspaceSvg.prototype.scrollbar = null; /** * The current gesture in progress on this workspace, if any. * @type {Blockly.Gesture} * @private */ Blockly.WorkspaceSvg.prototype.currentGesture_ = null; /** * This workspace's surface for dragging blocks, if it exists. * @type {Blockly.BlockDragSurfaceSvg} * @private */ Blockly.WorkspaceSvg.prototype.blockDragSurface_ = null; /** * This workspace's drag surface, if it exists. * @type {Blockly.WorkspaceDragSurfaceSvg} * @private */ Blockly.WorkspaceSvg.prototype.workspaceDragSurface_ = null; /** * Whether to move workspace to the drag surface when it is dragged. * True if it should move, false if it should be translated directly. * @type {boolean} * @private */ Blockly.WorkspaceSvg.prototype.useWorkspaceDragSurface_ = false; /** * Whether the drag surface is actively in use. When true, calls to * translate will translate the drag surface instead of the translating the * workspace directly. * This is set to true in setupDragSurface and to false in resetDragSurface. * @type {boolean} * @private */ Blockly.WorkspaceSvg.prototype.isDragSurfaceActive_ = false; /** * Last known position of the page scroll. * This is used to determine whether we have recalculated screen coordinate * stuff since the page scrolled. * @type {!goog.math.Coordinate} * @private */ Blockly.WorkspaceSvg.prototype.lastRecordedPageScroll_ = null; /** * The first parent div with 'injectionDiv' in the name, or null if not set. * Access this with getInjectionDiv. * @type {!Element} * @private */ Blockly.WorkspaceSvg.prototype.injectionDiv_ = null; /** * Map from function names to callbacks, for deciding what to do when a button * is clicked. * @type {!Object<string, function(!Blockly.FlyoutButton)>} * @private */ Blockly.WorkspaceSvg.prototype.flyoutButtonCallbacks_ = {}; /** * Map from function names to callbacks, for deciding what to do when a custom * toolbox category is opened. * @type {!Object<string, function(!Blockly.Workspace):!Array<!Element>>} * @private */ Blockly.WorkspaceSvg.prototype.toolboxCategoryCallbacks_ = {}; /** * Inverted screen CTM, for use in mouseToSvg. * @type {SVGMatrix} * @private */ Blockly.WorkspaceSvg.prototype.inverseScreenCTM_ = null; /** * Getter for the inverted screen CTM. * @return {SVGMatrix} The matrix to use in mouseToSvg */ Blockly.WorkspaceSvg.prototype.getInverseScreenCTM = function() { return this.inverseScreenCTM_; }; /** * Update the inverted screen CTM. */ Blockly.WorkspaceSvg.prototype.updateInverseScreenCTM = function() { var ctm = this.getParentSvg().getScreenCTM(); if (ctm) { this.inverseScreenCTM_ = ctm.inverse(); } }; /** * Return the absolute coordinates of the top-left corner of this element, * scales that after canvas SVG element, if it's a descendant. * The origin (0,0) is the top-left corner of the Blockly SVG. * @param {!Element} element Element to find the coordinates of. * @return {!goog.math.Coordinate} Object with .x and .y properties. * @private */ Blockly.WorkspaceSvg.prototype.getSvgXY = function(element) { var x = 0; var y = 0; var scale = 1; if (goog.dom.contains(this.getCanvas(), element) || goog.dom.contains(this.getBubbleCanvas(), element)) { // Before the SVG canvas, scale the coordinates. scale = this.scale; } do { // Loop through this block and every parent. var xy = Blockly.utils.getRelativeXY(element); if (element == this.getCanvas() || element == this.getBubbleCanvas()) { // After the SVG canvas, don't scale the coordinates. scale = 1; } x += xy.x * scale; y += xy.y * scale; element = element.parentNode; } while (element && element != this.getParentSvg()); return new goog.math.Coordinate(x, y); }; /** * Return the position of the workspace origin relative to the injection div * origin in pixels. * The workspace origin is where a block would render at position (0, 0). * It is not the upper left corner of the workspace SVG. * @return {!goog.math.Coordinate} Offset in pixels. * @package */ Blockly.WorkspaceSvg.prototype.getOriginOffsetInPixels = function() { return Blockly.utils.getInjectionDivXY_(this.svgBlockCanvas_); }; /** * Return the injection div that is a parent of this workspace. * Walks the DOM the first time it's called, then returns a cached value. * @return {!Element} The first parent div with 'injectionDiv' in the name. * @package */ Blockly.WorkspaceSvg.prototype.getInjectionDiv = function() { // NB: it would be better to pass this in at createDom, but is more likely to // break existing uses of Blockly. if (!this.injectionDiv_) { var element = this.svgGroup_; while (element) { var classes = element.getAttribute('class') || ''; if ((' ' + classes + ' ').indexOf(' injectionDiv ') != -1) { this.injectionDiv_ = element; break; } element = element.parentNode; } } return this.injectionDiv_; }; /** * Save resize handler data so we can delete it later in dispose. * @param {!Array.<!Array>} handler Data that can be passed to unbindEvent_. */ Blockly.WorkspaceSvg.prototype.setResizeHandlerWrapper = function(handler) { this.resizeHandlerWrapper_ = handler; }; /** * Create the workspace DOM elements. * @param {string=} opt_backgroundClass Either 'blocklyMainBackground' or * 'blocklyMutatorBackground'. * @return {!Element} The workspace's SVG group. */ Blockly.WorkspaceSvg.prototype.createDom = function(opt_backgroundClass) { /** * <g class="blocklyWorkspace"> * <rect class="blocklyMainBackground" height="100%" width="100%"></rect> * [Trashcan and/or flyout may go here] * <g class="blocklyBlockCanvas"></g> * <g class="blocklyBubbleCanvas"></g> * </g> * @type {SVGElement} */ this.svgGroup_ = Blockly.utils.createSvgElement('g', {'class': 'blocklyWorkspace'}, null); // Note that a <g> alone does not receive mouse events--it must have a // valid target inside it. If no background class is specified, as in the // flyout, the workspace will not receive mouse events. if (opt_backgroundClass) { /** @type {SVGElement} */ this.svgBackground_ = Blockly.utils.createSvgElement('rect', {'height': '100%', 'width': '100%', 'class': opt_backgroundClass}, this.svgGroup_); if (opt_backgroundClass == 'blocklyMainBackground' && this.grid_) { this.svgBackground_.style.fill = 'url(#' + this.grid_.getPatternId() + ')'; } } /** @type {SVGElement} */ this.svgBlockCanvas_ = Blockly.utils.createSvgElement('g', {'class': 'blocklyBlockCanvas'}, this.svgGroup_, this); /** @type {SVGElement} */ this.svgBubbleCanvas_ = Blockly.utils.createSvgElement('g', {'class': 'blocklyBubbleCanvas'}, this.svgGroup_, this); var bottom = Blockly.Scrollbar.scrollbarThickness; if (this.options.hasTrashcan) { bottom = this.addTrashcan_(bottom); } if (this.options.zoomOptions && this.options.zoomOptions.controls) { bottom = this.addZoomControls_(bottom); } if (!this.isFlyout) { Blockly.bindEventWithChecks_(this.svgGroup_, 'mousedown', this, this.onMouseDown_); if (this.options.zoomOptions && this.options.zoomOptions.wheel) { // Mouse-wheel. Blockly.bindEventWithChecks_(this.svgGroup_, 'wheel', this, this.onMouseWheel_); } } // Determine if there needs to be a category tree, or a simple list of // blocks. This cannot be changed later, since the UI is very different. if (this.options.hasCategories) { /** * @type {Blockly.Toolbox} * @private */ this.toolbox_ = new Blockly.Toolbox(this); } if (this.grid_) { this.grid_.update(this.scale); } this.recordDeleteAreas(); return this.svgGroup_; }; /** * Dispose of this workspace. * Unlink from all DOM elements to prevent memory leaks. */ Blockly.WorkspaceSvg.prototype.dispose = function() { // Stop rerendering. this.rendered = false; if (this.currentGesture_) { this.currentGesture_.cancel(); } Blockly.WorkspaceSvg.superClass_.dispose.call(this); if (this.svgGroup_) { goog.dom.removeNode(this.svgGroup_); this.svgGroup_ = null; } this.svgBlockCanvas_ = null; this.svgBubbleCanvas_ = null; if (this.toolbox_) { this.toolbox_.dispose(); this.toolbox_ = null; } if (this.flyout_) { this.flyout_.dispose(); this.flyout_ = null; } if (this.trashcan) { this.trashcan.dispose(); this.trashcan = null; } if (this.scrollbar) { this.scrollbar.dispose(); this.scrollbar = null; } if (this.zoomControls_) { this.zoomControls_.dispose(); this.zoomControls_ = null; } if (this.audioManager_) { this.audioManager_.dispose(); this.audioManager_ = null; } if (this.grid_) { this.grid_.dispose(); this.grid_ = null; } if (this.toolboxCategoryCallbacks_) { this.toolboxCategoryCallbacks_ = null; } if (this.flyoutButtonCallbacks_) { this.flyoutButtonCallbacks_ = null; } if (!this.options.parentWorkspace) { // Top-most workspace. Dispose of the div that the // svg is injected into (i.e. injectionDiv). goog.dom.removeNode(this.getParentSvg().parentNode); } if (this.resizeHandlerWrapper_) { Blockly.unbindEvent_(this.resizeHandlerWrapper_); this.resizeHandlerWrapper_ = null; } }; /** * Obtain a newly created block. * @param {?string} prototypeName Name of the language object containing * type-specific functions for this block. * @param {string=} opt_id Optional ID. Use this ID if provided, otherwise * create a new ID. * @return {!Blockly.BlockSvg} The created block. */ Blockly.WorkspaceSvg.prototype.newBlock = function(prototypeName, opt_id) { return new Blockly.BlockSvg(this, prototypeName, opt_id); }; /** * Add a trashcan. * @param {number} bottom Distance from workspace bottom to bottom of trashcan. * @return {number} Distance from workspace bottom to the top of trashcan. * @private */ Blockly.WorkspaceSvg.prototype.addTrashcan_ = function(bottom) { /** @type {Blockly.Trashcan} */ this.trashcan = new Blockly.Trashcan(this); var svgTrashcan = this.trashcan.createDom(); this.svgGroup_.insertBefore(svgTrashcan, this.svgBlockCanvas_); return this.trashcan.init(bottom); }; /** * Add zoom controls. * @param {number} bottom Distance from workspace bottom to bottom of controls. * @return {number} Distance from workspace bottom to the top of controls. * @private */ Blockly.WorkspaceSvg.prototype.addZoomControls_ = function(bottom) { /** @type {Blockly.ZoomControls} */ this.zoomControls_ = new Blockly.ZoomControls(this); var svgZoomControls = this.zoomControls_.createDom(); this.svgGroup_.appendChild(svgZoomControls); return this.zoomControls_.init(bottom); }; /** * Add a flyout element in an element with the given tag name. * @param {string} tagName What type of tag the flyout belongs in. * @return {!Element} The element containing the flyout dom. * @private */ Blockly.WorkspaceSvg.prototype.addFlyout_ = function(tagName) { var workspaceOptions = { disabledPatternId: this.options.disabledPatternId, parentWorkspace: this, RTL: this.RTL, oneBasedIndex: this.options.oneBasedIndex, horizontalLayout: this.horizontalLayout, toolboxPosition: this.options.toolboxPosition }; if (this.horizontalLayout) { this.flyout_ = new Blockly.HorizontalFlyout(workspaceOptions); } else { this.flyout_ = new Blockly.VerticalFlyout(workspaceOptions); } this.flyout_.autoClose = false; // Return the element so that callers can place it in their desired // spot in the dom. For exmaple, mutator flyouts do not go in the same place // as main workspace flyouts. return this.flyout_.createDom(tagName); }; /** * Getter for the flyout associated with this workspace. This flyout may be * owned by either the toolbox or the workspace, depending on toolbox * configuration. It will be null if there is no flyout. * @return {Blockly.Flyout} The flyout on this workspace. * @package */ Blockly.WorkspaceSvg.prototype.getFlyout = function() { if (this.flyout_) { return this.flyout_; } if (this.toolbox_) { return this.toolbox_.flyout_; } return null; }; /** * Update items that use screen coordinate calculations * because something has changed (e.g. scroll position, window size). * @private */ Blockly.WorkspaceSvg.prototype.updateScreenCalculations_ = function() { this.updateInverseScreenCTM(); this.recordDeleteAreas(); }; /** * If enabled, resize the parts of the workspace that change when the workspace * contents (e.g. block positions) change. This will also scroll the * workspace contents if needed. * @package */ Blockly.WorkspaceSvg.prototype.resizeContents = function() { if (!this.resizesEnabled_ || !this.rendered) { return; } if (this.scrollbar) { // TODO(picklesrus): Once rachel-fenichel's scrollbar refactoring // is complete, call the method that only resizes scrollbar // based on contents. this.scrollbar.resize(); } this.updateInverseScreenCTM(); }; /** * Resize and reposition all of the workspace chrome (toolbox, * trash, scrollbars etc.) * This should be called when something changes that * requires recalculating dimensions and positions of the * trash, zoom, toolbox, etc. (e.g. window resize). */ Blockly.WorkspaceSvg.prototype.resize = function() { if (this.toolbox_) { this.toolbox_.position(); } if (this.flyout_) { this.flyout_.position(); } if (this.trashcan) { this.trashcan.position(); } if (this.zoomControls_) { this.zoomControls_.position(); } if (this.scrollbar) { this.scrollbar.resize(); } this.updateScreenCalculations_(); }; /** * Resizes and repositions workspace chrome if the page has a new * scroll position. * @package */ Blockly.WorkspaceSvg.prototype.updateScreenCalculationsIfScrolled = function() { /* eslint-disable indent */ var currScroll = goog.dom.getDocumentScroll(); if (!goog.math.Coordinate.equals(this.lastRecordedPageScroll_, currScroll)) { this.lastRecordedPageScroll_ = currScroll; this.updateScreenCalculations_(); } }; /* eslint-enable indent */ /** * Get the SVG element that forms the drawing surface. * @return {!Element} SVG element. */ Blockly.WorkspaceSvg.prototype.getCanvas = function() { return this.svgBlockCanvas_; }; /** * Get the SVG element that forms the bubble surface. * @return {!SVGGElement} SVG element. */ Blockly.WorkspaceSvg.prototype.getBubbleCanvas = function() { return this.svgBubbleCanvas_; }; /** * Get the SVG element that contains this workspace. * @return {!Element} SVG element. */ Blockly.WorkspaceSvg.prototype.getParentSvg = function() { if (this.cachedParentSvg_) { return this.cachedParentSvg_; } var element = this.svgGroup_; while (element) { if (element.tagName == 'svg') { this.cachedParentSvg_ = element; return element; } element = element.parentNode; } return null; }; /** * Translate this workspace to new coordinates. * @param {number} x Horizontal translation. * @param {number} y Vertical translation. */ Blockly.WorkspaceSvg.prototype.translate = function(x, y) { if (this.useWorkspaceDragSurface_ && this.isDragSurfaceActive_) { this.workspaceDragSurface_.translateSurface(x,y); } else { var translation = 'translate(' + x + ',' + y + ') ' + 'scale(' + this.scale + ')'; this.svgBlockCanvas_.setAttribute('transform', translation); this.svgBubbleCanvas_.setAttribute('transform', translation); } // Now update the block drag surface if we're using one. if (this.blockDragSurface_) { this.blockDragSurface_.translateAndScaleGroup(x, y, this.scale); } }; /** * Called at the end of a workspace drag to take the contents * out of the drag surface and put them back into the workspace svg. * Does nothing if the workspace drag surface is not enabled. * @package */ Blockly.WorkspaceSvg.prototype.resetDragSurface = function() { // Don't do anything if we aren't using a drag surface. if (!this.useWorkspaceDragSurface_) { return; } this.isDragSurfaceActive_ = false; var trans = this.workspaceDragSurface_.getSurfaceTranslation(); this.workspaceDragSurface_.clearAndHide(this.svgGroup_); var translation = 'translate(' + trans.x + ',' + trans.y + ') ' + 'scale(' + this.scale + ')'; this.svgBlockCanvas_.setAttribute('transform', translation); this.svgBubbleCanvas_.setAttribute('transform', translation); }; /** * Called at the beginning of a workspace drag to move contents of * the workspace to the drag surface. * Does nothing if the drag surface is not enabled. * @package */ Blockly.WorkspaceSvg.prototype.setupDragSurface = function() { // Don't do anything if we aren't using a drag surface. if (!this.useWorkspaceDragSurface_) { return; } // This can happen if the user starts a drag, mouses up outside of the // document where the mouseup listener is registered (e.g. outside of an // iframe) and then moves the mouse back in the workspace. On mobile and ff, // we get the mouseup outside the frame. On chrome and safari desktop we do // not. if (this.isDragSurfaceActive_) { return; } this.isDragSurfaceActive_ = true; // Figure out where we want to put the canvas back. The order // in the is important because things are layered. var previousElement = this.svgBlockCanvas_.previousSibling; var width = this.getParentSvg().getAttribute("width"); var height = this.getParentSvg().getAttribute("height"); var coord = Blockly.utils.getRelativeXY(this.svgBlockCanvas_); this.workspaceDragSurface_.setContentsAndShow(this.svgBlockCanvas_, this.svgBubbleCanvas_, previousElement, width, height, this.scale); this.workspaceDragSurface_.translateSurface(coord.x, coord.y); }; /** * Returns the horizontal offset of the workspace. * Intended for LTR/RTL compatibility in XML. * @return {number} Width. */ Blockly.WorkspaceSvg.prototype.getWidth = function() { var metrics = this.getMetrics(); return metrics ? metrics.viewWidth / this.scale : 0; }; /** * Toggles the visibility of the workspace. * Currently only intended for main workspace. * @param {boolean} isVisible True if workspace should be visible. */ Blockly.WorkspaceSvg.prototype.setVisible = function(isVisible) { // Tell the scrollbar whether its container is visible so it can // tell when to hide itself. if (this.scrollbar) { this.scrollbar.setContainerVisible(isVisible); } // Tell the flyout whether its container is visible so it can // tell when to hide itself. if (this.getFlyout()) { this.getFlyout().setContainerVisible(isVisible); } this.getParentSvg().style.display = isVisible ? 'block' : 'none'; if (this.toolbox_) { // Currently does not support toolboxes in mutators. this.toolbox_.HtmlDiv.style.display = isVisible ? 'block' : 'none'; } if (isVisible) { this.render(); // The window may have changed size while the workspace was hidden. // Resize recalculates scrollbar position, delete areas, etc. this.resize(); } else { Blockly.hideChaff(true); Blockly.DropDownDiv.hideWithoutAnimation(); } }; /** * Render all blocks in workspace. */ Blockly.WorkspaceSvg.prototype.render = function() { // Generate list of all blocks. var blocks = this.getAllBlocks(); // Render each block. for (var i = blocks.length - 1; i >= 0; i--) { blocks[i].render(false); } }; /** * Was used back when block highlighting (for execution) and block selection * (for editing) were the same thing. * Any calls of this function can be deleted. * @deprecated October 2016 */ Blockly.WorkspaceSvg.prototype.traceOn = function() { console.warn('Deprecated call to traceOn, delete this.'); }; /** * Highlight or unhighlight a block in the workspace. Block highlighting is * often used to visually mark blocks currently being executed. * @param {?string} id ID of block to highlight/unhighlight, * or null for no block (used to unhighlight all blocks). * @param {boolean=} opt_state If undefined, highlight specified block and * automatically unhighlight all others. If true or false, manually * highlight/unhighlight the specified block. */ Blockly.WorkspaceSvg.prototype.highlightBlock = function(id, opt_state) { if (opt_state === undefined) { // Unhighlight all blocks. for (var i = 0, block; block = this.highlightedBlocks_[i]; i++) { block.setHighlighted(false); } this.highlightedBlocks_.length = 0; } // Highlight/unhighlight the specified block. var block = id ? this.getBlockById(id) : null; if (block) { var state = (opt_state === undefined) || opt_state; // Using Set here would be great, but at the cost of IE10 support. if (!state) { goog.array.remove(this.highlightedBlocks_, block); } else if (this.highlightedBlocks_.indexOf(block) == -1) { this.highlightedBlocks_.push(block); } block.setHighlighted(state); } }; /** * Glow/unglow a block in the workspace. * @param {?string} id ID of block to find. * @param {boolean} isGlowingBlock Whether to glow the block. */ Blockly.WorkspaceSvg.prototype.glowBlock = function(id, isGlowingBlock) { var block = null; if (id) { block = this.getBlockById(id); if (!block) { throw 'Tried to glow block that does not exist.'; } } block.setGlowBlock(isGlowingBlock); }; /** * Glow/unglow a stack in the workspace. * @param {?string} id ID of block which starts the stack. * @param {boolean} isGlowingStack Whether to glow the stack. */ Blockly.WorkspaceSvg.prototype.glowStack = function(id, isGlowingStack) { var block = null; if (id) { block = this.getBlockById(id); if (!block) { throw 'Tried to glow stack on block that does not exist.'; } } block.setGlowStack(isGlowingStack); }; /** * Visually report a value associated with a block. * In Scratch, appears as a pop-up next to the block when a reporter block is clicked. * @param {?string} id ID of block to report associated value. * @param {?string} value String value to visually report. */ Blockly.WorkspaceSvg.prototype.reportValue = function(id, value) { var block = this.getBlockById(id); if (!block) { throw 'Tried to report value on block that does not exist.'; } Blockly.DropDownDiv.hideWithoutAnimation(); Blockly.DropDownDiv.clearContent(); var contentDiv = Blockly.DropDownDiv.getContentDiv(); var valueReportBox = goog.dom.createElement('div'); valueReportBox.setAttribute('class', 'valueReportBox'); valueReportBox.innerHTML = Blockly.utils.encodeEntities(value); contentDiv.appendChild(valueReportBox); Blockly.DropDownDiv.setColour( Blockly.Colours.valueReportBackground, Blockly.Colours.valueReportBorder ); Blockly.DropDownDiv.showPositionedByBlock(this, block); }; /** * Paste the provided block onto the workspace. * @param {!Element} xmlBlock XML block element. */ Blockly.WorkspaceSvg.prototype.paste = function(xmlBlock) { if (!this.rendered) { return; } if (this.currentGesture_) { this.currentGesture_.cancel(); // Dragging while pasting? No. } Blockly.Events.disable(); try { var block = Blockly.Xml.domToBlock(xmlBlock, this); // Scratch-specific: Give shadow dom new IDs to prevent duplicating on paste Blockly.utils.changeObscuredShadowIds(block); // Move the duplicate to original position. var blockX = parseInt(xmlBlock.getAttribute('x'), 10); var blockY = parseInt(xmlBlock.getAttribute('y'), 10); if (!isNaN(blockX) && !isNaN(blockY)) { if (this.RTL) { blockX = -blockX; } // Offset block until not clobbering another block and not in connection // distance with neighbouring blocks. do { var collide = false; var allBlocks = this.getAllBlocks(); for (var i = 0, otherBlock; otherBlock = allBlocks[i]; i++) { var otherXY = otherBlock.getRelativeToSurfaceXY(); if (Math.abs(blockX - otherXY.x) <= 1 && Math.abs(blockY - otherXY.y) <= 1) { collide = true; break; } } if (!collide) { // Check for blocks in snap range to any of its connections. var connections = block.getConnections_(false); for (var i = 0, connection; connection = connections[i]; i++) { var neighbour = connection.closest(Blockly.SNAP_RADIUS, new goog.math.Coordinate(blockX, blockY)); if (neighbour.connection) { collide = true; break; } } } if (collide) { if (this.RTL) { blockX -= Blockly.SNAP_RADIUS; } else { blockX += Blockly.SNAP_RADIUS; } blockY += Blockly.SNAP_RADIUS * 2; } } while (collide); block.moveBy(blockX, blockY); } } finally { Blockly.Events.enable(); } if (Blockly.Events.isEnabled() && !block.isShadow()) { Blockly.Events.fire(new Blockly.Events.BlockCreate(block)); } block.select(); }; /** * Refresh the toolbox unless there's a drag in progress. * @private */ Blockly.WorkspaceSvg.prototype.refreshToolboxSelection_ = function() { // Updating the toolbox can be expensive. Don't do it when when it is // disabled. if (this.toolbox_) { if (this.toolbox_.flyout_ && !this.currentGesture_ && this.toolboxRefreshEnabled_) { this.toolbox_.refreshSelection(); } } else { var thisTarget = this.targetWorkspace; if (thisTarget && thisTarget.toolbox_ && thisTarget.toolbox_.flyout_ && !thisTarget.currentGesture_ && thisTarget.toolboxRefreshEnabled_) { thisTarget.toolbox_.refreshSelection(); } } }; /** * Rename a variable by updating its name in the variable map. Update the * flyout to show the renamed variable immediately. * @param {string} id Id of the variable to rename. * @param {string} newName New variable name. * @package */ Blockly.WorkspaceSvg.prototype.renameVariableById = function(id, newName) { Blockly.WorkspaceSvg.superClass_.renameVariableById.call(this, id, newName); this.refreshToolboxSelection_(); }; /** * Delete a variable by the passed in ID. Update the flyout to show * immediately that the variable is deleted. * @param {string} id Id of variable to delete. * @package */ Blockly.WorkspaceSvg.prototype.deleteVariableById = function(id) { Blockly.WorkspaceSvg.superClass_.deleteVariableById.call(this, id); this.refreshToolboxSelection_(); }; /** * Create a new variable with the given name. Update the flyout to show the new * variable immediately. * @param {string} name The new variable's name. * @param {string=} opt_type The type of the variable like 'int' or 'string'. * Does not need to be unique. Field_variable can filter variables based on * their type. This will default to '' which is a specific type. * @param {string=} opt_id The unique id of the variable. This will default to * a UUID. * @return {?Blockly.VariableModel} The newly created variable. * @package */ Blockly.WorkspaceSvg.prototype.createVariable = function(name, opt_type, opt_id) { var variableInMap = (this.getVariable(name, opt_type) != null); var newVar = Blockly.WorkspaceSvg.superClass_.createVariable.call(this, name, opt_type, opt_id); // For performance reasons, only refresh the the toolbox for new variables. // Variables that already exist should already be there. if (!variableInMap && (opt_type != Blockly.BROADCAST_MESSAGE_VARIABLE_TYPE)) { this.refreshToolboxSelection_(); } return newVar; }; /** * Make a list of all the delete areas for this workspace. */ Blockly.WorkspaceSvg.prototype.recordDeleteAreas = function() { if (this.trashcan) { this.deleteAreaTrash_ = this.trashcan.getClientRect(); } else { this.deleteAreaTrash_ = null; } if (this.flyout_) { this.deleteAreaToolbox_ = this.flyout_.getClientRect(); } else if (this.toolbox_) { this.deleteAreaToolbox_ = this.toolbox_.getClientRect(); } else { this.deleteAreaToolbox_ = null; } }; /** * Is the mouse event over a delete area (toolbox or non-closing flyout)? * @param {!Event} e Mouse move event. * @return {?number} Null if not over a delete area, or an enum representing * which delete area the event is over. */ Blockly.WorkspaceSvg.prototype.isDeleteArea = function(e) { var xy = new goog.math.Coordinate(e.clientX, e.clientY); if (this.deleteAreaTrash_ && this.deleteAreaTrash_.contains(xy)) { return Blockly.DELETE_AREA_TRASH; } if (this.deleteAreaToolbox_ && this.deleteAreaToolbox_.contains(xy)) { return Blockly.DELETE_AREA_TOOLBOX; } return Blockly.DELETE_AREA_NONE; }; /** * Handle a mouse-down on SVG drawing surface. * @param {!Event} e Mouse down event. * @private */ Blockly.WorkspaceSvg.prototype.onMouseDown_ = function(e) { var gesture = this.getGesture(e); if (gesture) { gesture.handleWsStart(e, this); } }; /** * Start tracking a drag of an object on this workspace. * @param {!Event} e Mouse down event. * @param {!goog.math.Coordinate} xy Starting location of object. */ Blockly.WorkspaceSvg.prototype.startDrag = function(e, xy) { // Record the starting offset between the bubble's location and the mouse. var point = Blockly.utils.mouseToSvg(e, this.getParentSvg(), this.getInverseScreenCTM()); // Fix scale of mouse event. point.x /= this.scale; point.y /= this.scale; this.dragDeltaXY_ = goog.math.Coordinate.difference(xy, point); }; /** * Track a drag of an object on this workspace. * @param {!Event} e Mouse move event. * @return {!goog.math.Coordinate} New location of object. */ Blockly.WorkspaceSvg.prototype.moveDrag = function(e) { var point = Blockly.utils.mouseToSvg(e, this.getParentSvg(), this.getInverseScreenCTM()); // Fix scale of mouse event. point.x /= this.scale; point.y /= this.scale; return goog.math.Coordinate.sum(this.dragDeltaXY_, point); }; /** * Is the user currently dragging a block or scrolling the flyout/workspace? * @return {boolean} True if currently dragging or scrolling. */ Blockly.WorkspaceSvg.prototype.isDragging = function() { return this.currentGesture_ && this.currentGesture_.isDragging(); }; /** * Is this workspace draggable and scrollable? * @return {boolean} True if this workspace may be dragged. */ Blockly.WorkspaceSvg.prototype.isDraggable = function() { return !!this.scrollbar; }; /** * Handle a mouse-wheel on SVG drawing surface. * @param {!Event} e Mouse wheel event. * @private */ Blockly.WorkspaceSvg.prototype.onMouseWheel_ = function(e) { // TODO: Remove gesture cancellation and compensate for coordinate skew during // zoom. if (this.currentGesture_) { this.currentGesture_.cancel(); } if (e.ctrlKey) { // The vertical scroll distance that corresponds to a click of a zoom button. var PIXELS_PER_ZOOM_STEP = 50; var delta = -e.deltaY / PIXELS_PER_ZOOM_STEP; var position = Blockly.utils.mouseToSvg(e, this.getParentSvg(), this.getInverseScreenCTM()); this.zoom(position.x, position.y, delta); } else { // This is a regular mouse wheel event - scroll the workspace // First hide the WidgetDiv without animation // (mouse scroll makes field out of place with div) Blockly.WidgetDiv.hide(true); Blockly.DropDownDiv.hideWithoutAnimation(); var x = this.scrollX - e.deltaX; var y = this.scrollY - e.deltaY; this.startDragMetrics = this.getMetrics(); this.scroll(x, y); } e.preventDefault(); }; /** * Calculate the bounding box for the blocks on the workspace. * Coordinate system: workspace coordinates. * * @return {Object} Contains the position and size of the bounding box * containing the blocks on the workspace. */ Blockly.WorkspaceSvg.prototype.getBlocksBoundingBox = function() { var topBlocks = this.getTopBlocks(false); // There are no blocks, return empty rectangle. if (!topBlocks.length) { return {x: 0, y: 0, width: 0, height: 0}; } // Initialize boundary using the first block. var boundary = topBlocks[0].getBoundingRectangle(); // Start at 1 since the 0th block was used for initialization for (var i = 1; i < topBlocks.length; i++) { var blockBoundary = topBlocks[i].getBoundingRectangle(); if (blockBoundary.topLeft.x < boundary.topLeft.x) { boundary.topLeft.x = blockBoundary.topLeft.x; } if (blockBoundary.bottomRight.x > boundary.bottomRight.x) { boundary.bottomRight.x = blockBoundary.bottomRight.x; } if (blockBoundary.topLeft.y < boundary.topLeft.y) { boundary.topLeft.y = blockBoundary.topLeft.y; } if (blockBoundary.bottomRight.y > boundary.bottomRight.y) { boundary.bottomRight.y = blockBoundary.bottomRight.y; } } return { x: boundary.topLeft.x, y: boundary.topLeft.y, width: boundary.bottomRight.x - boundary.topLeft.x, height: boundary.bottomRight.y - boundary.topLeft.y }; }; /** * Clean up the workspace by ordering all the blocks in a column. */ Blockly.WorkspaceSvg.prototype.cleanUp = function() { this.setResizesEnabled(false); Blockly.Events.setGroup(true); var topBlocks = this.getTopBlocks(true); var cursorY = 0; for (var i = 0, block; block = topBlocks[i]; i++) { var xy = block.getRelativeToSurfaceXY(); block.moveBy(-xy.x, cursorY - xy.y); block.snapToGrid(); cursorY = block.getRelativeToSurfaceXY().y + block.getHeightWidth().height + Blockly.BlockSvg.MIN_BLOCK_Y; } Blockly.Events.setGroup(false); this.setResizesEnabled(true); }; /** * Show the context menu for the workspace. * @param {!Event} e Mouse event. * @private */ Blockly.WorkspaceSvg.prototype.showContextMenu_ = function(e) { if (this.options.readOnly || this.isFlyout) { return; } var menuOptions = []; var topBlocks = this.getTopBlocks(true); var eventGroup = Blockly.utils.genUid(); var ws = this; // Options to undo/redo previous action. menuOptions.push(Blockly.ContextMenu.wsUndoOption(this)); menuOptions.push(Blockly.ContextMenu.wsRedoOption(this)); // Option to clean up blocks. if (this.scrollbar) { menuOptions.push( Blockly.ContextMenu.wsCleanupOption(this,topBlocks.length)); } if (this.options.collapse) { var hasCollapsedBlocks = false; var hasExpandedBlocks = false; for (var i = 0; i < topBlocks.length; i++) { var block = topBlocks[i]; while (block) { if (block.isCollapsed()) { hasCollapsedBlocks = true; } else { hasExpandedBlocks = true; } block = block.getNextBlock(); } } menuOptions.push(Blockly.ContextMenu.wsCollapseOption(hasExpandedBlocks, topBlocks)); menuOptions.push(Blockly.ContextMenu.wsExpandOption(hasCollapsedBlocks, topBlocks)); } // Option to delete all blocks. // Count the number of blocks that are deletable. var deleteList = Blockly.WorkspaceSvg.buildDeleteList_(topBlocks); // Scratch-specific: don't count shadow blocks in delete count var deleteCount = 0; for (var i = 0; i < deleteList.length; i++) { if (!deleteList[i].isShadow()) { deleteCount++; } } var DELAY = 10; function deleteNext() { Blockly.Events.setGroup(eventGroup); var block = deleteList.shift(); if (block) { if (block.workspace) { block.dispose(false, true); setTimeout(deleteNext, DELAY); } else { deleteNext(); } } Blockly.Events.setGroup(false); } var deleteOption = { text: deleteCount == 1 ? Blockly.Msg.DELETE_BLOCK : Blockly.Msg.DELETE_X_BLOCKS.replace('%1', String(deleteCount)), enabled: deleteCount > 0, callback: function() { if (ws.currentGesture_) { ws.currentGesture_.cancel(); } if (deleteList.length < 2 ) { deleteNext(); } else { Blockly.confirm(Blockly.Msg.DELETE_ALL_BLOCKS. replace('%1', String(deleteCount)), function(ok) { if (ok) { deleteNext(); } }); } } }; menuOptions.push(deleteOption); Blockly.ContextMenu.show(e, menuOptions, this.RTL); }; /** * Build a list of all deletable blocks that are reachable from the given * list of top blocks. * @param {!Array.<!Blockly.BlockSvg>} topBlocks The list of top blocks on the * workspace. * @return {!Array.<!Blockly.BlockSvg>} A list of deletable blocks on the * workspace. * @private */ Blockly.WorkspaceSvg.buildDeleteList_ = function(topBlocks) { var deleteList = []; function addDeletableBlocks(block) { if (block.isDeletable()) { deleteList = deleteList.concat(block.getDescendants()); } else { var children = block.getChildren(); for (var i = 0; i < children.length; i++) { addDeletableBlocks(children[i]); } } } for (var i = 0; i < topBlocks.length; i++) { addDeletableBlocks(topBlocks[i]); } return deleteList; }; /** * Modify the block tree on the existing toolbox. * @param {Node|string} tree DOM tree of blocks, or text representation of same. */ Blockly.WorkspaceSvg.prototype.updateToolbox = function(tree) { tree = Blockly.Options.parseToolboxTree(tree); if (!tree) { if (this.options.languageTree) { throw 'Can\'t nullify an existing toolbox.'; } return; // No change (null to null). } if (!this.options.languageTree) { throw 'Existing toolbox is null. Can\'t create new toolbox.'; } if (tree.getElementsByTagName('category').length) { if (!this.toolbox_) { throw 'Existing toolbox has no categories. Can\'t change mode.'; } this.options.languageTree = tree; this.toolbox_.populate_(tree); this.toolbox_.position(); } else { if (!this.flyout_) { throw 'Existing toolbox has categories. Can\'t change mode.'; } this.options.languageTree = tree; this.flyout_.show(tree.childNodes); } }; /** * Mark this workspace as the currently focused main workspace. */ Blockly.WorkspaceSvg.prototype.markFocused = function() { if (this.options.parentWorkspace) { this.options.parentWorkspace.markFocused(); } else { Blockly.mainWorkspace = this; // We call e.preventDefault in many event handlers which means we // need to explicitly grab focus (e.g from a textarea) because // the browser will not do it for us. How to do this is browser dependant. this.setBrowserFocus(); } }; /** * Set the workspace to have focus in the browser. * @private */ Blockly.WorkspaceSvg.prototype.setBrowserFocus = function() { // Blur whatever was focused since explcitly grabbing focus below does not // work in Edge. if (document.activeElement) { document.activeElement.blur(); } try { // Focus the workspace SVG - this is for Chrome and Firefox. this.getParentSvg().focus(); } catch (e) { // IE and Edge do not support focus on SVG elements. When that fails // above, get the injectionDiv (the workspace's parent) and focus that // instead. This doesn't work in Chrome. try { // In IE11, use setActive (which is IE only) so the page doesn't scroll // to the workspace gaining focus. this.getParentSvg().parentNode.setActive(); } catch (e) { // setActive support was discontinued in Edge so when that fails, call // focus instead. this.getParentSvg().parentNode.focus(); } } }; /** * Zooming the blocks centered in (x, y) coordinate with zooming in or out. * @param {number} x X coordinate of center. * @param {number} y Y coordinate of center. * @param {number} amount Amount of zooming * (negative zooms out and positive zooms in). */ Blockly.WorkspaceSvg.prototype.zoom = function(x, y, amount) { var speed = this.options.zoomOptions.scaleSpeed; var metrics = this.getMetrics(); var center = this.getParentSvg().createSVGPoint(); center.x = x; center.y = y; center = center.matrixTransform(this.getCanvas().getCTM().inverse()); x = center.x; y = center.y; var canvas = this.getCanvas(); // Scale factor. var scaleChange = Math.pow(speed, amount); // Clamp scale within valid range. var newScale = this.scale * scaleChange; if (newScale > this.options.zoomOptions.maxScale) { scaleChange = this.options.zoomOptions.maxScale / this.scale; } else if (newScale < this.options.zoomOptions.minScale) { scaleChange = this.options.zoomOptions.minScale / this.scale; } if (this.scale == newScale) { return; // No change in zoom. } if (this.scrollbar) { var matrix = canvas.getCTM() .translate(x * (1 - scaleChange), y * (1 - scaleChange)) .scale(scaleChange); // newScale and matrix.a should be identical (within a rounding error). // ScrollX and scrollY are in pixels. this.scrollX = matrix.e - metrics.absoluteLeft; this.scrollY = matrix.f - metrics.absoluteTop; } this.setScale(newScale); // Hide the WidgetDiv without animation (zoom makes field out of place with div) Blockly.WidgetDiv.hide(true); Blockly.DropDownDiv.hideWithoutAnimation(); }; /** * Zooming the blocks centered in the center of view with zooming in or out. * @param {number} type Type of zooming (-1 zooming out and 1 zooming in). */ Blockly.WorkspaceSvg.prototype.zoomCenter = function(type) { var metrics = this.getMetrics(); var x = metrics.viewWidth / 2; var y = metrics.viewHeight / 2; this.zoom(x, y, type); }; /** * Zoom the blocks to fit in the workspace if possible. */ Blockly.WorkspaceSvg.prototype.zoomToFit = function() { var metrics = this.getMetrics(); var blocksBox = this.getBlocksBoundingBox(); var blocksWidth = blocksBox.width; var blocksHeight = blocksBox.height; if (!blocksWidth) { return; // Prevents zooming to infinity. } var workspaceWidth = metrics.viewWidth; var workspaceHeight = metrics.viewHeight; if (this.flyout_) { workspaceWidth -= this.flyout_.width_; } if (!this.scrollbar) { // Origin point of 0,0 is fixed, blocks will not scroll to center. blocksWidth += metrics.contentLeft; blocksHeight += metrics.contentTop; } var ratioX = workspaceWidth / blocksWidth; var ratioY = workspaceHeight / blocksHeight; this.setScale(Math.min(ratioX, ratioY)); this.scrollCenter(); }; /** * Center the workspace. */ Blockly.WorkspaceSvg.prototype.scrollCenter = function() { if (!this.scrollbar) { // Can't center a non-scrolling workspace. return; } // Hide the WidgetDiv without animation (zoom makes field out of place with div) Blockly.WidgetDiv.hide(true); Blockly.DropDownDiv.hideWithoutAnimation(); Blockly.hideChaff(false); var metrics = this.getMetrics(); var x = (metrics.contentWidth - metrics.viewWidth) / 2; if (this.flyout_) { x -= this.flyout_.width_ / 2; } var y = (metrics.contentHeight - metrics.viewHeight) / 2; this.scrollbar.set(x, y); }; /** * Set the workspace's zoom factor. * @param {number} newScale Zoom factor. */ Blockly.WorkspaceSvg.prototype.setScale = function(newScale) { if (this.options.zoomOptions.maxScale && newScale > this.options.zoomOptions.maxScale) { newScale = this.options.zoomOptions.maxScale; } else if (this.options.zoomOptions.minScale && newScale < this.options.zoomOptions.minScale) { newScale = this.options.zoomOptions.minScale; } this.scale = newScale; if (this.grid_) { this.grid_.update(this.scale); } if (this.scrollbar) { this.scrollbar.resize(); } else { this.translate(this.scrollX, this.scrollY); } Blockly.hideChaff(false); if (this.flyout_) { // No toolbox, resize flyout. this.flyout_.reflow(); } }; /** * Scroll the workspace by a specified amount, keeping in the bounds. * Be sure to set this.startDragMetrics with cached metrics before calling. * @param {number} x Target X to scroll to * @param {number} y Target Y to scroll to */ Blockly.WorkspaceSvg.prototype.scroll = function(x, y) { var metrics = this.startDragMetrics; // Cached values x = Math.min(x, -metrics.contentLeft); y = Math.min(y, -metrics.contentTop); x = Math.max(x, metrics.viewWidth - metrics.contentLeft - metrics.contentWidth); y = Math.max(y, metrics.viewHeight - metrics.contentTop - metrics.contentHeight); // When the workspace starts scrolling, hide the WidgetDiv without animation. // This is to prevent a dispoal animation from happening in the wrong location. Blockly.WidgetDiv.hide(true); Blockly.DropDownDiv.hideWithoutAnimation(); // Move the scrollbars and the page will scroll automatically. this.scrollbar.set(-x - metrics.contentLeft, -y - metrics.contentTop); }; /** * Update the workspace's stack glow radius to be proportional to scale. * Ensures that stack glows always appear to be a fixed size. */ Blockly.WorkspaceSvg.prototype.updateStackGlowScale_ = function() { // No such def in the flyout workspace. if (this.options.stackGlowBlur) { this.options.stackGlowBlur.setAttribute('stdDeviation', Blockly.STACK_GLOW_RADIUS / this.scale ); } }; /** * Get the dimensions of the given workspace component, in pixels. * @param {Blockly.Toolbox|Blockly.Flyout} elem The element to get the * dimensions of, or null. It should be a toolbox or flyout, and should * implement getWidth() and getHeight(). * @return {!Object} An object containing width and height attributes, which * will both be zero if elem did not exist. * @private */ Blockly.WorkspaceSvg.getDimensionsPx_ = function(elem) { var width = 0; var height = 0; if (elem) { width = elem.getWidth(); height = elem.getHeight(); } return { width: width, height: height }; }; /** * Get the content dimensions of the given workspace, taking into account * whether or not it is scrollable and what size the workspace div is on screen. * @param {!Blockly.WorkspaceSvg} ws The workspace to measure. * @param {!Object} svgSize An object containing height and width attributes in * CSS pixels. Together they specify the size of the visible workspace, not * including areas covered up by the toolbox. * @return {!Object} The dimensions of the contents of the given workspace, as * an object containing at least * - height and width in pixels * - left and top in pixels relative to the workspace origin. * @private */ Blockly.WorkspaceSvg.getContentDimensions_ = function(ws, svgSize) { if (ws.scrollbar) { return Blockly.WorkspaceSvg.getContentDimensionsBounded_(ws, svgSize); } else { return Blockly.WorkspaceSvg.getContentDimensionsExact_(ws); } }; /** * Get the bounding box for all workspace contents, in pixels. * @param {!Blockly.WorkspaceSvg} ws The workspace to inspect. * @return {!Object} The dimensions of the contents of the given workspace, as * an object containing * - height and width in pixels * - left, right, top and bottom in pixels relative to the workspace origin. * @private */ Blockly.WorkspaceSvg.getContentDimensionsExact_ = function(ws) { // Block bounding box is in workspace coordinates. var blockBox = ws.getBlocksBoundingBox(); var scale = ws.scale; // Convert to pixels. var width = blockBox.width * scale; var height = blockBox.height * scale; var left = blockBox.x * scale; var top = blockBox.y * scale; return { left: left, top: top, right: left + width, bottom: top + height, width: width, height: height }; }; /** * Calculate the size of a scrollable workspace, which should include room for a * half screen border around the workspace contents. * @param {!Blockly.WorkspaceSvg} ws The workspace to measure. * @param {!Object} svgSize An object containing height and width attributes in * CSS pixels. Together they specify the size of the visible workspace, not * including areas covered up by the toolbox. * @return {!Object} The dimensions of the contents of the given workspace, as * an object containing * - height and width in pixels * - left and top in pixels relative to the workspace origin. * @private */ Blockly.WorkspaceSvg.getContentDimensionsBounded_ = function(ws, svgSize) { var content = Blockly.WorkspaceSvg.getContentDimensionsExact_(ws); // View height and width are both in pixels, and are the same as the svg size. var viewWidth = svgSize.width; var viewHeight = svgSize.height; var halfWidth = viewWidth / 2; var halfHeight = viewHeight / 2; // Add a border around the content that is at least half a screenful wide. // Ensure border is wide enough that blocks can scroll over entire screen. var left = Math.min(content.left - halfWidth, content.right - viewWidth); var right = Math.max(content.right + halfWidth, content.left + viewWidth); var top = Math.min(content.top - halfHeight, content.bottom - viewHeight); var bottom = Math.max(content.bottom + halfHeight, content.top + viewHeight); var dimensions = { left: left, top: top, height: bottom - top, width: right - left }; return dimensions; }; /** * Return an object with all the metrics required to size scrollbars for a * top level workspace. The following properties are computed: * Coordinate system: pixel coordinates. * .viewHeight: Height of the visible rectangle, * .viewWidth: Width of the visible rectangle, * .contentHeight: Height of the contents, * .contentWidth: Width of the content, * .viewTop: Offset of top edge of visible rectangle from parent, * .viewLeft: Offset of left edge of visible rectangle from parent, * .contentTop: Offset of the top-most content from the y=0 coordinate, * .contentLeft: Offset of the left-most content from the x=0 coordinate. * .absoluteTop: Top-edge of view. * .absoluteLeft: Left-edge of view. * .toolboxWidth: Width of toolbox, if it exists. Otherwise zero. * .toolboxHeight: Height of toolbox, if it exists. Otherwise zero. * .flyoutWidth: Width of the flyout if it is always open. Otherwise zero. * .flyoutHeight: Height of flyout if it is always open. Otherwise zero. * .toolboxPosition: Top, bottom, left or right. * @return {!Object} Contains size and position metrics of a top level * workspace. * @private * @this Blockly.WorkspaceSvg */ Blockly.WorkspaceSvg.getTopLevelWorkspaceMetrics_ = function() { var toolboxDimensions = Blockly.WorkspaceSvg.getDimensionsPx_(this.toolbox_); var flyoutDimensions = Blockly.WorkspaceSvg.getDimensionsPx_(this.flyout_); // Contains height and width in CSS pixels. // svgSize is equivalent to the size of the injectionDiv at this point. var svgSize = Blockly.svgSize(this.getParentSvg()); if (this.toolbox_) { if (this.toolboxPosition == Blockly.TOOLBOX_AT_TOP || this.toolboxPosition == Blockly.TOOLBOX_AT_BOTTOM) { svgSize.height -= toolboxDimensions.height; } else if (this.toolboxPosition == Blockly.TOOLBOX_AT_LEFT || this.toolboxPosition == Blockly.TOOLBOX_AT_RIGHT) { svgSize.width -= toolboxDimensions.width; } } // svgSize is now the space taken up by the Blockly workspace, not including // the toolbox. var contentDimensions = Blockly.WorkspaceSvg.getContentDimensions_(this, svgSize); var absoluteLeft = 0; if (this.toolbox_ && this.toolboxPosition == Blockly.TOOLBOX_AT_LEFT) { absoluteLeft = toolboxDimensions.width; } var absoluteTop = 0; if (this.toolbox_ && this.toolboxPosition == Blockly.TOOLBOX_AT_TOP) { absoluteTop = toolboxDimensions.height; } var metrics = { contentHeight: contentDimensions.height, contentWidth: contentDimensions.width, contentTop: contentDimensions.top, contentLeft: contentDimensions.left, viewHeight: svgSize.height, viewWidth: svgSize.width, viewTop: -this.scrollY, // Must be in pixels, somehow. viewLeft: -this.scrollX, // Must be in pixels, somehow. absoluteTop: absoluteTop, absoluteLeft: absoluteLeft, toolboxWidth: toolboxDimensions.width, toolboxHeight: toolboxDimensions.height, flyoutWidth: flyoutDimensions.width, flyoutHeight: flyoutDimensions.height, toolboxPosition: this.toolboxPosition }; return metrics; }; /** * Sets the X/Y translations of a top level workspace to match the scrollbars. * @param {!Object} xyRatio Contains an x and/or y property which is a float * between 0 and 1 specifying the degree of scrolling. * @private * @this Blockly.WorkspaceSvg */ Blockly.WorkspaceSvg.setTopLevelWorkspaceMetrics_ = function(xyRatio) { if (!this.scrollbar) { throw 'Attempt to set top level workspace scroll without scrollbars.'; } var metrics = this.getMetrics(); if (goog.isNumber(xyRatio.x)) { this.scrollX = -metrics.contentWidth * xyRatio.x - metrics.contentLeft; } if (goog.isNumber(xyRatio.y)) { this.scrollY = -metrics.contentHeight * xyRatio.y - metrics.contentTop; } var x = this.scrollX + metrics.absoluteLeft; var y = this.scrollY + metrics.absoluteTop; this.translate(x, y); if (this.grid_) { this.grid_.moveTo(x, y); } }; /** * Update whether this workspace has resizes enabled. * If enabled, workspace will resize when appropriate. * If disabled, workspace will not resize until re-enabled. * Use to avoid resizing during a batch operation, for performance. * @param {boolean} enabled Whether resizes should be enabled. */ Blockly.WorkspaceSvg.prototype.setResizesEnabled = function(enabled) { var reenabled = (!this.resizesEnabled_ && enabled); this.resizesEnabled_ = enabled; if (reenabled) { // Newly enabled. Trigger a resize. this.resizeContents(); } }; /** * Update whether this workspace has toolbox refreshes enabled. * If enabled, the toolbox will refresh when appropriate. * If disabled, workspace will not refresh until re-enabled. * Use to avoid refreshing during a batch operation, for performance. * @param {boolean} enabled Whether refreshes should be enabled. */ Blockly.WorkspaceSvg.prototype.setToolboxRefreshEnabled = function(enabled) { var reenabled = (!this.toolboxRefreshEnabled_ && enabled); this.toolboxRefreshEnabled_ = enabled; if (reenabled) { // Newly enabled. Trigger a refresh. this.refreshToolboxSelection_(); } }; /** * Dispose of all blocks in workspace, with an optimization to prevent resizes. */ Blockly.WorkspaceSvg.prototype.clear = function() { this.setResizesEnabled(false); Blockly.WorkspaceSvg.superClass_.clear.call(this); this.setResizesEnabled(true); }; /** * Register a callback function associated with a given key, for clicks on * buttons and labels in the flyout. * For instance, a button specified by the XML * <button text="create variable" callbackKey="CREATE_VARIABLE"></button> * should be matched by a call to * registerButtonCallback("CREATE_VARIABLE", yourCallbackFunction). * @param {string} key The name to use to look up this function. * @param {function(!Blockly.FlyoutButton)} func The function to call when the * given button is clicked. */ Blockly.WorkspaceSvg.prototype.registerButtonCallback = function(key, func) { goog.asserts.assert(goog.isFunction(func), 'Button callbacks must be functions.'); this.flyoutButtonCallbacks_[key] = func; }; /** * Get the callback function associated with a given key, for clicks on buttons * and labels in the flyout. * @param {string} key The name to use to look up the function. * @return {?function(!Blockly.FlyoutButton)} The function corresponding to the * given key for this workspace; null if no callback is registered. */ Blockly.WorkspaceSvg.prototype.getButtonCallback = function(key) { var result = this.flyoutButtonCallbacks_[key]; return result ? result : null; }; /** * Remove a callback for a click on a button in the flyout. * @param {string} key The name associated with the callback function. */ Blockly.WorkspaceSvg.prototype.removeButtonCallback = function(key) { this.flyoutButtonCallbacks_[key] = null; }; /** * Register a callback function associated with a given key, for populating * custom toolbox categories in this workspace. See the variable and procedure * categories as an example. * @param {string} key The name to use to look up this function. * @param {function(!Blockly.Workspace):!Array<!Element>} func The function to * call when the given toolbox category is opened. */ Blockly.WorkspaceSvg.prototype.registerToolboxCategoryCallback = function(key, func) { goog.asserts.assert(goog.isFunction(func), 'Toolbox category callbacks must be functions.'); this.toolboxCategoryCallbacks_[key] = func; }; /** * Get the callback function associated with a given key, for populating * custom toolbox categories in this workspace. * @param {string} key The name to use to look up the function. * @return {?function(!Blockly.Workspace):!Array<!Element>} The function * corresponding to the given key for this workspace, or null if no function * is registered. */ Blockly.WorkspaceSvg.prototype.getToolboxCategoryCallback = function(key) { var result = this.toolboxCategoryCallbacks_[key]; return result ? result : null; }; /** * Remove a callback for a click on a custom category's name in the toolbox. * @param {string} key The name associated with the callback function. */ Blockly.WorkspaceSvg.prototype.removeToolboxCategoryCallback = function(key) { this.toolboxCategoryCallbacks_[key] = null; }; /** * Look up the gesture that is tracking this touch stream on this workspace. * May create a new gesture. * @param {!Event} e Mouse event or touch event * @return {Blockly.Gesture} The gesture that is tracking this touch stream, * or null if no valid gesture exists. * @package */ Blockly.WorkspaceSvg.prototype.getGesture = function(e) { var isStart = (e.type == 'mousedown' || e.type == 'touchstart'); var gesture = this.currentGesture_; if (gesture) { if (isStart && gesture.hasStarted()) { console.warn('tried to start the same gesture twice'); // That's funny. We must have missed a mouse up. // Cancel it, rather than try to retrieve all of the state we need. gesture.cancel(); return null; } return gesture; } // No gesture existed on this workspace, but this looks like the start of a // new gesture. if (isStart) { this.currentGesture_ = new Blockly.Gesture(e, this); return this.currentGesture_; } // No gesture existed and this event couldn't be the start of a new gesture. return null; }; /** * Clear the reference to the current gesture. * @package */ Blockly.WorkspaceSvg.prototype.clearGesture = function() { this.currentGesture_ = null; }; /** * Cancel the current gesture, if one exists. * @package */ Blockly.WorkspaceSvg.prototype.cancelCurrentGesture = function() { if (this.currentGesture_) { this.currentGesture_.cancel(); } }; /** * Don't even think about using this function before talking to rachel-fenichel. * * Force a drag to start without clicking and dragging the block itself. Used * to attach duplicated blocks to the mouse pointer. * @param {!Object} fakeEvent An object with the properties needed to start a * drag, including clientX and clientY. * @param {!Blockly.BlockSvg} block The block to start dragging. * @package */ Blockly.WorkspaceSvg.prototype.startDragWithFakeEvent = function(fakeEvent, block) { Blockly.Touch.clearTouchIdentifier(); Blockly.Touch.checkTouchIdentifier(fakeEvent); var gesture = block.workspace.getGesture(fakeEvent); gesture.forceStartBlockDrag(fakeEvent, block); }; /** * Get the audio manager for this workspace. * @return {Blockly.WorkspaceAudio} The audio manager for this workspace. */ Blockly.WorkspaceSvg.prototype.getAudioManager = function() { return this.audioManager_; }; /** * Get the grid object for this workspace, or null if there is none. * @return {Blockly.Grid} The grid object for this workspace. * @package */ Blockly.WorkspaceSvg.prototype.getGrid = function() { return this.grid_; }; // Export symbols that would otherwise be renamed by Closure compiler. Blockly.WorkspaceSvg.prototype['setVisible'] = Blockly.WorkspaceSvg.prototype.setVisible;
1
9,093
I'm a bit leery of this implementation, because it assumes that the only "outside" is to the right side. Note that Blockly assumes that there's nothing useful to the "outside" of the toolbox, based on the configuration of the workspace, so it's not like we're doing the right thing either. I think this will break in RTL. If you keep the editor in the same place but inject with RTL, everything you're looking for will be in the delete area. If the editor and stage swap places, this will just be looking on the wrong side.
LLK-scratch-blocks
js
@@ -180,6 +180,7 @@ type Config struct { FailsafeOutboundHostPorts []ProtoPort `config:"port-list;udp:53,udp:67,tcp:179,tcp:2379,tcp:2380,tcp:6666,tcp:6667;die-on-fail"` KubeNodePortRanges []numorstring.Port `config:"portrange-list;30000:32767"` + NatPortRange numorstring.Port `config:"portrange;;local"` UsageReportingEnabled bool `config:"bool;true"` UsageReportingInitialDelaySecs time.Duration `config:"seconds;300"`
1
// Copyright (c) 2016-2018 Tigera, Inc. All rights reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package config import ( "errors" "fmt" "net" "os" "reflect" "regexp" "strconv" "strings" "time" log "github.com/sirupsen/logrus" "github.com/projectcalico/libcalico-go/lib/apiconfig" "github.com/projectcalico/libcalico-go/lib/names" "github.com/projectcalico/libcalico-go/lib/numorstring" ) var ( IfaceListRegexp = regexp.MustCompile(`^[a-zA-Z0-9_-]{1,15}(,[a-zA-Z0-9_-]{1,15})*$`) AuthorityRegexp = regexp.MustCompile(`^[^:/]+:\d+$`) HostnameRegexp = regexp.MustCompile(`^[a-zA-Z0-9_.-]+$`) StringRegexp = regexp.MustCompile(`^.*$`) ) const ( maxUint = ^uint(0) maxInt = int(maxUint >> 1) minInt = -maxInt - 1 ) // Source of a config value. Values from higher-numbered sources override // those from lower-numbered sources. Note: some parameters (such as those // needed to connect to the datastore) can only be set from a local source. type Source uint8 const ( Default = iota DatastoreGlobal DatastorePerHost ConfigFile EnvironmentVariable ) var SourcesInDescendingOrder = []Source{EnvironmentVariable, ConfigFile, DatastorePerHost, DatastoreGlobal} func (source Source) String() string { switch source { case Default: return "<default>" case DatastoreGlobal: return "datastore (global)" case DatastorePerHost: return "datastore (per-host)" case ConfigFile: return "config file" case EnvironmentVariable: return "environment variable" } return fmt.Sprintf("<unknown(%v)>", uint8(source)) } func (source Source) Local() bool { switch source { case Default, ConfigFile, EnvironmentVariable: return true default: return false } } // Config contains the best, parsed config values loaded from the various sources. // We use tags to control the parsing and validation. type Config struct { // Configuration parameters. UseInternalDataplaneDriver bool `config:"bool;true"` DataplaneDriver string `config:"file(must-exist,executable);calico-iptables-plugin;non-zero,die-on-fail,skip-default-validation"` DatastoreType string `config:"oneof(kubernetes,etcdv3);etcdv3;non-zero,die-on-fail,local"` FelixHostname string `config:"hostname;;local,non-zero"` EtcdAddr string `config:"authority;127.0.0.1:2379;local"` EtcdScheme string `config:"oneof(http,https);http;local"` EtcdKeyFile string `config:"file(must-exist);;local"` EtcdCertFile string `config:"file(must-exist);;local"` EtcdCaFile string `config:"file(must-exist);;local"` EtcdEndpoints []string `config:"endpoint-list;;local"` TyphaAddr string `config:"authority;;local"` TyphaK8sServiceName string `config:"string;;local"` TyphaK8sNamespace string `config:"string;kube-system;non-zero,local"` TyphaReadTimeout time.Duration `config:"seconds;30;local"` TyphaWriteTimeout time.Duration `config:"seconds;10;local"` // Client-side TLS config for Felix's communication with Typha. If any of these are // specified, they _all_ must be - except that either TyphaCN or TyphaURISAN may be left // unset. Felix will then initiate a secure (TLS) connection to Typha. Typha must present // a certificate signed by a CA in TyphaCAFile, and with CN matching TyphaCN or URI SAN // matching TyphaURISAN. TyphaKeyFile string `config:"file(must-exist);;local"` TyphaCertFile string `config:"file(must-exist);;local"` TyphaCAFile string `config:"file(must-exist);;local"` TyphaCN string `config:"string;;local"` TyphaURISAN string `config:"string;;local"` Ipv6Support bool `config:"bool;true"` IgnoreLooseRPF bool `config:"bool;false"` RouteRefreshInterval time.Duration `config:"seconds;90"` IptablesRefreshInterval time.Duration `config:"seconds;90"` IptablesPostWriteCheckIntervalSecs time.Duration `config:"seconds;1"` IptablesLockFilePath string `config:"file;/run/xtables.lock"` IptablesLockTimeoutSecs time.Duration `config:"seconds;0"` IptablesLockProbeIntervalMillis time.Duration `config:"millis;50"` IpsetsRefreshInterval time.Duration `config:"seconds;10"` MaxIpsetSize int `config:"int;1048576;non-zero"` PolicySyncPathPrefix string `config:"file;;"` NetlinkTimeoutSecs time.Duration `config:"seconds;10"` MetadataAddr string `config:"hostname;127.0.0.1;die-on-fail"` MetadataPort int `config:"int(0,65535);8775;die-on-fail"` InterfacePrefix string `config:"iface-list;cali;non-zero,die-on-fail"` InterfaceExclude string `config:"iface-list;kube-ipvs0"` ChainInsertMode string `config:"oneof(insert,append);insert;non-zero,die-on-fail"` DefaultEndpointToHostAction string `config:"oneof(DROP,RETURN,ACCEPT);DROP;non-zero,die-on-fail"` IptablesFilterAllowAction string `config:"oneof(ACCEPT,RETURN);ACCEPT;non-zero,die-on-fail"` IptablesMangleAllowAction string `config:"oneof(ACCEPT,RETURN);ACCEPT;non-zero,die-on-fail"` LogPrefix string `config:"string;calico-packet"` LogFilePath string `config:"file;/var/log/calico/felix.log;die-on-fail"` LogSeverityFile string `config:"oneof(DEBUG,INFO,WARNING,ERROR,FATAL);INFO"` LogSeverityScreen string `config:"oneof(DEBUG,INFO,WARNING,ERROR,FATAL);INFO"` LogSeveritySys string `config:"oneof(DEBUG,INFO,WARNING,ERROR,FATAL);INFO"` IpInIpEnabled bool `config:"bool;false"` IpInIpMtu int `config:"int;1440;non-zero"` IpInIpTunnelAddr net.IP `config:"ipv4;"` ReportingIntervalSecs time.Duration `config:"seconds;30"` ReportingTTLSecs time.Duration `config:"seconds;90"` EndpointReportingEnabled bool `config:"bool;false"` EndpointReportingDelaySecs time.Duration `config:"seconds;1"` IptablesMarkMask uint32 `config:"mark-bitmask;0xffff0000;non-zero,die-on-fail"` DisableConntrackInvalidCheck bool `config:"bool;false"` HealthEnabled bool `config:"bool;false"` HealthPort int `config:"int(0,65535);9099"` HealthHost string `config:"string;localhost"` PrometheusMetricsEnabled bool `config:"bool;false"` PrometheusMetricsPort int `config:"int(0,65535);9091"` PrometheusGoMetricsEnabled bool `config:"bool;true"` PrometheusProcessMetricsEnabled bool `config:"bool;true"` FailsafeInboundHostPorts []ProtoPort `config:"port-list;tcp:22,udp:68,tcp:179,tcp:2379,tcp:2380,tcp:6666,tcp:6667;die-on-fail"` FailsafeOutboundHostPorts []ProtoPort `config:"port-list;udp:53,udp:67,tcp:179,tcp:2379,tcp:2380,tcp:6666,tcp:6667;die-on-fail"` KubeNodePortRanges []numorstring.Port `config:"portrange-list;30000:32767"` UsageReportingEnabled bool `config:"bool;true"` UsageReportingInitialDelaySecs time.Duration `config:"seconds;300"` UsageReportingIntervalSecs time.Duration `config:"seconds;86400"` ClusterGUID string `config:"string;baddecaf"` ClusterType string `config:"string;"` CalicoVersion string `config:"string;"` DebugMemoryProfilePath string `config:"file;;"` DebugCPUProfilePath string `config:"file;/tmp/felix-cpu-<timestamp>.pprof;"` DebugDisableLogDropping bool `config:"bool;false"` DebugSimulateCalcGraphHangAfter time.Duration `config:"seconds;0"` DebugSimulateDataplaneHangAfter time.Duration `config:"seconds;0"` // State tracking. // nameToSource tracks where we loaded each config param from. sourceToRawConfig map[Source]map[string]string rawValues map[string]string Err error } type ProtoPort struct { Protocol string Port uint16 } // Load parses and merges the rawData from one particular source into this config object. // If there is a config value already loaded from a higher-priority source, then // the new value will be ignored (after validation). func (config *Config) UpdateFrom(rawData map[string]string, source Source) (changed bool, err error) { log.Infof("Merging in config from %v: %v", source, rawData) // Defensively take a copy of the raw data, in case we've been handed // a mutable map by mistake. rawDataCopy := make(map[string]string) for k, v := range rawData { if v == "" { log.WithFields(log.Fields{ "name": k, "source": source, }).Info("Ignoring empty configuration parameter. Use value 'none' if " + "your intention is to explicitly disable the default value.") continue } rawDataCopy[k] = v } config.sourceToRawConfig[source] = rawDataCopy changed, err = config.resolve() return } func (c *Config) InterfacePrefixes() []string { return strings.Split(c.InterfacePrefix, ",") } func (c *Config) InterfaceExcludes() []string { return strings.Split(c.InterfaceExclude, ",") } func (config *Config) OpenstackActive() bool { if strings.Contains(strings.ToLower(config.ClusterType), "openstack") { // OpenStack is explicitly known to be present. Newer versions of the OpenStack plugin // set this flag. log.Debug("Cluster type contains OpenStack") return true } // If we get here, either OpenStack isn't present or we're running against an old version // of the OpenStack plugin, which doesn't set the flag. Use heuristics based on the // presence of the OpenStack-related parameters. if config.MetadataAddr != "" && config.MetadataAddr != "127.0.0.1" { log.Debug("OpenStack metadata IP set to non-default, assuming OpenStack active") return true } if config.MetadataPort != 0 && config.MetadataPort != 8775 { log.Debug("OpenStack metadata port set to non-default, assuming OpenStack active") return true } for _, prefix := range config.InterfacePrefixes() { if prefix == "tap" { log.Debug("Interface prefix list contains 'tap', assuming OpenStack") return true } } log.Debug("No evidence this is an OpenStack deployment; disabling OpenStack special-cases") return false } func (config *Config) resolve() (changed bool, err error) { newRawValues := make(map[string]string) nameToSource := make(map[string]Source) for _, source := range SourcesInDescendingOrder { valueLoop: for rawName, rawValue := range config.sourceToRawConfig[source] { currentSource := nameToSource[rawName] param, ok := knownParams[strings.ToLower(rawName)] if !ok { if source >= currentSource { // Stash the raw value in case it's useful for // a plugin. Since we don't know the canonical // name, use the raw name. newRawValues[rawName] = rawValue nameToSource[rawName] = source } log.WithField("raw name", rawName).Info( "Ignoring unknown config param.") continue valueLoop } metadata := param.GetMetadata() name := metadata.Name if metadata.Local && !source.Local() { log.Warningf("Ignoring local-only configuration for %v from %v", name, source) continue valueLoop } log.Infof("Parsing value for %v: %v (from %v)", name, rawValue, source) var value interface{} if strings.ToLower(rawValue) == "none" { // Special case: we allow a value of "none" to force the value to // the zero value for a field. The zero value often differs from // the default value. Typically, the zero value means "turn off // the feature". if metadata.NonZero { err = errors.New("Non-zero field cannot be set to none") log.Errorf( "Failed to parse value for %v: %v from source %v. %v", name, rawValue, source, err) config.Err = err return } value = metadata.ZeroValue log.Infof("Value set to 'none', replacing with zero-value: %#v.", value) } else { value, err = param.Parse(rawValue) if err != nil { logCxt := log.WithError(err).WithField("source", source) if metadata.DieOnParseFailure { logCxt.Error("Invalid (required) config value.") config.Err = err return } else { logCxt.WithField("default", metadata.Default).Warn( "Replacing invalid value with default") value = metadata.Default err = nil } } } log.Infof("Parsed value for %v: %v (from %v)", name, value, source) if source < currentSource { log.Infof("Skipping config value for %v from %v; "+ "already have a value from %v", name, source, currentSource) continue } field := reflect.ValueOf(config).Elem().FieldByName(name) field.Set(reflect.ValueOf(value)) newRawValues[name] = rawValue nameToSource[name] = source } } changed = !reflect.DeepEqual(newRawValues, config.rawValues) config.rawValues = newRawValues return } func (config *Config) setBy(name string, source Source) bool { _, set := config.sourceToRawConfig[source][name] return set } func (config *Config) setByConfigFileOrEnvironment(name string) bool { return config.setBy(name, ConfigFile) || config.setBy(name, EnvironmentVariable) } func (config *Config) DatastoreConfig() apiconfig.CalicoAPIConfig { // We want Felix's datastore connection to be fully configurable using the same // CALICO_XXX_YYY (or just XXX_YYY) environment variables that work for any libcalico-go // client - for both the etcdv3 and KDD cases. However, for the etcd case, Felix has for a // long time supported FELIX_XXXYYY environment variables, and we want those to keep working // too. // To achieve that, first build a CalicoAPIConfig using libcalico-go's // LoadClientConfigFromEnvironment - which means incorporating defaults and CALICO_XXX_YYY // and XXX_YYY variables. cfg, err := apiconfig.LoadClientConfigFromEnvironment() if err != nil { log.WithError(err).Panic("Failed to create datastore config") } // Now allow FELIX_XXXYYY variables or XxxYyy config file settings to override that, in the // etcd case. if config.setByConfigFileOrEnvironment("DatastoreType") && config.DatastoreType == "etcdv3" { cfg.Spec.DatastoreType = apiconfig.EtcdV3 // Endpoints. if config.setByConfigFileOrEnvironment("EtcdEndpoints") && len(config.EtcdEndpoints) > 0 { cfg.Spec.EtcdEndpoints = strings.Join(config.EtcdEndpoints, ",") } else if config.setByConfigFileOrEnvironment("EtcdAddr") { cfg.Spec.EtcdEndpoints = config.EtcdScheme + "://" + config.EtcdAddr } // TLS. if config.setByConfigFileOrEnvironment("EtcdKeyFile") { cfg.Spec.EtcdKeyFile = config.EtcdKeyFile } if config.setByConfigFileOrEnvironment("EtcdCertFile") { cfg.Spec.EtcdCertFile = config.EtcdCertFile } if config.setByConfigFileOrEnvironment("EtcdCaFile") { cfg.Spec.EtcdCACertFile = config.EtcdCaFile } } if !config.IpInIpEnabled { // Polling k8s for node updates is expensive (because we get many superfluous // updates) so disable if we don't need it. log.Info("IPIP disabled, disabling node poll (if KDD is in use).") cfg.Spec.K8sDisableNodePoll = true } return *cfg } // Validate() performs cross-field validation. func (config *Config) Validate() (err error) { if config.FelixHostname == "" { err = errors.New("Failed to determine hostname") } if config.DatastoreType == "etcdv3" && len(config.EtcdEndpoints) == 0 { if config.EtcdScheme == "" { err = errors.New("EtcdEndpoints and EtcdScheme both missing") } if config.EtcdAddr == "" { err = errors.New("EtcdEndpoints and EtcdAddr both missing") } } // If any client-side TLS config parameters are specified, they _all_ must be - except that // either TyphaCN or TyphaURISAN may be left unset. if config.TyphaCAFile != "" || config.TyphaCertFile != "" || config.TyphaKeyFile != "" || config.TyphaCN != "" || config.TyphaURISAN != "" { // Some TLS config specified. if config.TyphaKeyFile == "" || config.TyphaCertFile == "" || config.TyphaCAFile == "" || (config.TyphaCN == "" && config.TyphaURISAN == "") { err = errors.New("If any Felix-Typha TLS config parameters are specified," + " they _all_ must be" + " - except that either TyphaCN or TyphaURISAN may be left unset.") } } if err != nil { config.Err = err } return } var knownParams map[string]param func loadParams() { knownParams = make(map[string]param) config := Config{} kind := reflect.TypeOf(config) metaRegexp := regexp.MustCompile(`^([^;(]+)(?:\(([^)]*)\))?;` + `([^;]*)(?:;` + `([^;]*))?$`) for ii := 0; ii < kind.NumField(); ii++ { field := kind.Field(ii) tag := field.Tag.Get("config") if tag == "" { continue } captures := metaRegexp.FindStringSubmatch(tag) if len(captures) == 0 { log.Panicf("Failed to parse metadata for config param %v", field.Name) } log.Debugf("%v: metadata captures: %#v", field.Name, captures) kind := captures[1] // Type: "int|oneof|bool|port-list|..." kindParams := captures[2] // Parameters for the type: e.g. for oneof "http,https" defaultStr := captures[3] // Default value e.g "1.0" flags := captures[4] var param param var err error switch kind { case "bool": param = &BoolParam{} case "int": min := minInt max := maxInt if kindParams != "" { minAndMax := strings.Split(kindParams, ",") min, err = strconv.Atoi(minAndMax[0]) if err != nil { log.Panicf("Failed to parse min value for %v", field.Name) } max, err = strconv.Atoi(minAndMax[1]) if err != nil { log.Panicf("Failed to parse max value for %v", field.Name) } } param = &IntParam{Min: min, Max: max} case "int32": param = &Int32Param{} case "mark-bitmask": param = &MarkBitmaskParam{} case "float": param = &FloatParam{} case "seconds": param = &SecondsParam{} case "millis": param = &MillisParam{} case "iface-list": param = &RegexpParam{Regexp: IfaceListRegexp, Msg: "invalid Linux interface name"} case "file": param = &FileParam{ MustExist: strings.Contains(kindParams, "must-exist"), Executable: strings.Contains(kindParams, "executable"), } case "authority": param = &RegexpParam{Regexp: AuthorityRegexp, Msg: "invalid URL authority"} case "ipv4": param = &Ipv4Param{} case "endpoint-list": param = &EndpointListParam{} case "port-list": param = &PortListParam{} case "portrange-list": param = &PortRangeListParam{} case "hostname": param = &RegexpParam{Regexp: HostnameRegexp, Msg: "invalid hostname"} case "oneof": options := strings.Split(kindParams, ",") lowerCaseToCanon := make(map[string]string) for _, option := range options { lowerCaseToCanon[strings.ToLower(option)] = option } param = &OneofListParam{ lowerCaseOptionsToCanonical: lowerCaseToCanon} case "string": param = &RegexpParam{Regexp: StringRegexp, Msg: "invalid string"} default: log.Panicf("Unknown type of parameter: %v", kind) } metadata := param.GetMetadata() metadata.Name = field.Name metadata.ZeroValue = reflect.ValueOf(config).FieldByName(field.Name).Interface() if strings.Index(flags, "non-zero") > -1 { metadata.NonZero = true } if strings.Index(flags, "die-on-fail") > -1 { metadata.DieOnParseFailure = true } if strings.Index(flags, "local") > -1 { metadata.Local = true } if defaultStr != "" { if strings.Index(flags, "skip-default-validation") > -1 { metadata.Default = defaultStr } else { // Parse the default value and save it in the metadata. Doing // that here ensures that we syntax-check the defaults now. defaultVal, err := param.Parse(defaultStr) if err != nil { log.Panicf("Invalid default value: %v", err) } metadata.Default = defaultVal } } else { metadata.Default = metadata.ZeroValue } knownParams[strings.ToLower(field.Name)] = param } } func (config *Config) RawValues() map[string]string { return config.rawValues } func New() *Config { if knownParams == nil { loadParams() } p := &Config{ rawValues: make(map[string]string), sourceToRawConfig: make(map[Source]map[string]string), } for _, param := range knownParams { param.setDefault(p) } hostname, err := names.Hostname() if err != nil { log.Warningf("Failed to get hostname from kernel, "+ "trying HOSTNAME variable: %v", err) hostname = strings.ToLower(os.Getenv("HOSTNAME")) } p.FelixHostname = hostname return p } type param interface { GetMetadata() *Metadata Parse(raw string) (result interface{}, err error) setDefault(*Config) }
1
16,561
Just spotted the `local` on here; that shouldn't be needed - no reason to limit this config to env vars only
projectcalico-felix
go
@@ -2322,7 +2322,7 @@ bool DiscoveryDataBase::from_json( } // Add Participant - auto wit = writers_.insert(std::make_pair(guid_aux, dei)); + writers_.insert(std::make_pair(guid_aux, dei)); // Extra configurations for writers // Add writer to writers_by_topic. This will create the topic if necessary
1
// Copyright 2020 Proyectos y Sistemas de Mantenimiento SL (eProsima). // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /** * @file DiscoveryDataBase.cpp * */ #include <mutex> #include <fastdds/dds/log/Log.hpp> #include <fastdds/rtps/common/EntityId_t.hpp> #include <fastdds/rtps/common/GuidPrefix_t.hpp> #include <fastdds/rtps/common/RemoteLocators.hpp> #include "./DiscoveryDataBase.hpp" #include "backup/json.hpp" #include "backup/SharedBackupFunctions.hpp" namespace eprosima { namespace fastdds { namespace rtps { namespace ddb { DiscoveryDataBase::DiscoveryDataBase( fastrtps::rtps::GuidPrefix_t server_guid_prefix, std::vector<fastrtps::rtps::GuidPrefix_t> servers) : server_guid_prefix_(server_guid_prefix) , server_acked_by_all_(servers.size() == 0) , servers_(servers) , enabled_(true) , processing_backup_(false) , is_persistent_ (false) { } DiscoveryDataBase::~DiscoveryDataBase() { if (!clear().empty()) { logError(DISCOVERY_DATABASE, "Destroying a NOT cleared database"); } // TODO close file } std::vector<fastrtps::rtps::CacheChange_t*> DiscoveryDataBase::clear() { // Cannot clear an enabled database, since there could be inconsistencies after the process if (enabled_) { logError(DISCOVERY_DATABASE, "Cannot clear an enabled database"); return std::vector<fastrtps::rtps::CacheChange_t*>({}); } logInfo(DISCOVERY_DATABASE, "Clearing DiscoveryDataBase"); std::unique_lock<std::recursive_mutex> lock(mutex_); /* Clear receive queues. Set changes inside to release */ while (!pdp_data_queue_.Empty()) { DiscoveryPDPDataQueueInfo data_queue_info = pdp_data_queue_.Front(); changes_to_release_.push_back(data_queue_info.change()); pdp_data_queue_.Pop(); } pdp_data_queue_.Clear( ); while (!edp_data_queue_.Empty()) { DiscoveryEDPDataQueueInfo data_queue_info = edp_data_queue_.Front(); changes_to_release_.push_back(data_queue_info.change()); edp_data_queue_.Pop(); } edp_data_queue_.Clear(); /* Clear by_topic collections */ writers_by_topic_.clear(); readers_by_topic_.clear(); /* Clear list of dirty topics */ dirty_topics_.clear(); /* Clear disposals list */ disposals_.clear(); /* Clear to_send collections */ pdp_to_send_.clear(); edp_publications_to_send_.clear(); edp_subscriptions_to_send_.clear(); /* Clear writers_ */ for (auto writers_it = writers_.begin(); writers_it != writers_.end();) { writers_it = delete_writer_entity_(writers_it); } /* Clear readers_ */ for (auto readers_it = readers_.begin(); readers_it != readers_.end();) { readers_it = delete_reader_entity_(readers_it); } /* Clear participants_ */ for (auto participants_it = participants_.begin(); participants_it != participants_.end();) { participants_it = delete_participant_entity_(participants_it); } /* Reset state parameters */ server_acked_by_all_ = true; /* Clear changes to release */ std::vector<fastrtps::rtps::CacheChange_t*> leftover_changes = changes_to_release_; changes_to_release_.clear(); servers_.clear(); /* Return the collection of changes that are no longer owned by the database */ return leftover_changes; } bool DiscoveryDataBase::pdp_is_relevant( const eprosima::fastrtps::rtps::CacheChange_t& change, const eprosima::fastrtps::rtps::GUID_t& reader_guid) const { // Get identity of the participant that generated the DATA(p|Up) fastrtps::rtps::GuidPrefix_t change_guid_prefix = guid_from_change(&change).guidPrefix; // Own DATA(p|Up) is always relevant for remote PDP readers. Server's PDP ReaderProxy will never // be queried for relevance, since Participant's own PDP writer and reader are not matched, // and there for there is no ReaderProxy for participant's own PDP reader. if (server_guid_prefix_ == change_guid_prefix) { return true; } // Lock(shared mode) mutex locally std::unique_lock<std::recursive_mutex> lock(mutex_); logInfo(DISCOVERY_DATABASE, "PDP is " << change.instanceHandle << " relevant to " << reader_guid); auto it = participants_.find(change_guid_prefix); if (it != participants_.end()) { // it is relevant if the ack has not been received yet // in NOT_ALIVE case the set_disposal unmatches every participant return (it->second.is_relevant_participant(reader_guid.guidPrefix) && !it->second.is_matched(reader_guid.guidPrefix)); } // Not relevant return false; } bool DiscoveryDataBase::edp_publications_is_relevant( const eprosima::fastrtps::rtps::CacheChange_t& change, const eprosima::fastrtps::rtps::GUID_t& reader_guid) const { // Get identity of the participant that generated the DATA fastrtps::rtps::GUID_t change_guid = guid_from_change(&change); // Lock(shared mode) mutex locally std::unique_lock<std::recursive_mutex> lock(mutex_); auto itp = participants_.find(change_guid.guidPrefix); if (itp == participants_.end()) { // not relevant return false; } else if (!itp->second.is_matched(reader_guid.guidPrefix)) { // not relevant return false; } auto itw = writers_.find(change_guid); if (itw != writers_.end()) { // it is relevant if the ack has not been received yet return (itw->second.is_relevant_participant(reader_guid.guidPrefix) && !itw->second.is_matched(reader_guid.guidPrefix)); } // not relevant return false; } bool DiscoveryDataBase::edp_subscriptions_is_relevant( const eprosima::fastrtps::rtps::CacheChange_t& change, const eprosima::fastrtps::rtps::GUID_t& reader_guid) const { // Get identity of the participant that generated the DATA fastrtps::rtps::GUID_t change_guid = guid_from_change(&change); // Lock(shared mode) mutex locally std::unique_lock<std::recursive_mutex> lock(mutex_); auto itp = participants_.find(change_guid.guidPrefix); if (itp == participants_.end()) { // not relevant return false; } else if (!itp->second.is_matched(reader_guid.guidPrefix)) { // not relevant return false; } auto itr = readers_.find(change_guid); if (itr != readers_.end()) { // it is relevant if the ack has not been received yet return (itr->second.is_relevant_participant(reader_guid.guidPrefix) && !itr->second.is_matched(reader_guid.guidPrefix)); } // not relevant return false; } void DiscoveryDataBase::update_change_and_unmatch_( fastrtps::rtps::CacheChange_t* new_change, ddb::DiscoverySharedInfo& entity) { changes_to_release_.push_back(entity.update_and_unmatch(new_change)); // Manually set relevant participants ACK status of this server, and of the participant that sent the // change, to 1. This way, we avoid backprogation of the data. entity.add_or_update_ack_participant(server_guid_prefix_, true); entity.add_or_update_ack_participant(new_change->writerGUID.guidPrefix, true); } void DiscoveryDataBase::add_ack_( const eprosima::fastrtps::rtps::CacheChange_t* change, const eprosima::fastrtps::rtps::GuidPrefix_t& acked_entity) { if (!enabled_) { logInfo(DISCOVERY_DATABASE, "Discovery Database is disabled"); return; } if (is_participant(change)) { logInfo(DISCOVERY_DATABASE, "Adding DATA(p) ACK for change " << change->instanceHandle << " to " << acked_entity); auto it = participants_.find(guid_from_change(change).guidPrefix); if (it != participants_.end()) { // Only add ACK if the change in the database is the same as the incoming change. Else, the change in the // database has been updated, so this ACK is not relevant anymore if (it->second.change()->write_params.sample_identity() == change->write_params.sample_identity()) { it->second.add_or_update_ack_participant(acked_entity, true); } } } else if (is_writer(change)) { logInfo(DISCOVERY_DATABASE, "Adding DATA(w) ACK for change " << change->instanceHandle << " to " << acked_entity); auto it = writers_.find(guid_from_change(change)); if (it != writers_.end()) { // Only add ACK if the change in the database is the same as the incoming change. Else, the change in the // database has been updated, so this ACK is not relevant anymore if (it->second.change()->write_params.sample_identity() == change->write_params.sample_identity()) { it->second.add_or_update_ack_participant(acked_entity, true); } } } else if (is_reader(change)) { logInfo(DISCOVERY_DATABASE, "Adding DATA(r) ACK for change " << change->instanceHandle << " to " << acked_entity); auto it = readers_.find(guid_from_change(change)); if (it != readers_.end()) { // Only add ACK if the change in the database is the same as the incoming change. Else, the change in the // database has been updated, so this ACK is not relevant anymore if (it->second.change()->write_params.sample_identity() == change->write_params.sample_identity()) { it->second.add_or_update_ack_participant(acked_entity, true); } } } } bool DiscoveryDataBase::update( eprosima::fastrtps::rtps::CacheChange_t* change, DiscoveryParticipantChangeData participant_change_data) { // in case the ddb is persistent, we store every cache in queue in a file if (is_persistent_) { // Does not allow to the server to erase the ddb before this message has been processed std::unique_lock<std::recursive_mutex> lock(data_queues_mutex_); nlohmann::json j; ddb::to_json(j, *change); backup_file_ << j; backup_file_.flush(); } if (!enabled_) { logInfo(DISCOVERY_DATABASE, "Discovery Database is disabled"); return false; } if (!is_participant(change)) { logError(DISCOVERY_DATABASE, "Change is not a DATA(p|Up): " << change->instanceHandle); return false; } logInfo(DISCOVERY_DATABASE, "Adding DATA(p|Up) to the queue: " << change->instanceHandle); // Add the DATA(p|Up) to the PDP queue to process pdp_data_queue_.Push(eprosima::fastdds::rtps::ddb::DiscoveryPDPDataQueueInfo(change, participant_change_data)); return true; } bool DiscoveryDataBase::update( eprosima::fastrtps::rtps::CacheChange_t* change, std::string topic_name) { // in case the ddb is persistent, we store every cache in queue in a file if (is_persistent_) { // Does not allow to the server to erase the ddb before this message has been process std::unique_lock<std::recursive_mutex> lock(data_queues_mutex_); nlohmann::json j; ddb::to_json(j, *change); backup_file_ << j; backup_file_.flush(); } if (!enabled_) { logInfo(DISCOVERY_DATABASE, "Discovery Database is disabled"); return false; } if (!is_writer(change) && !is_reader(change)) { logError(DISCOVERY_DATABASE, "Change is not a DATA(w|Uw|r|Ur): " << change->instanceHandle); return false; } logInfo(DISCOVERY_DATABASE, "Adding DATA(w|Uw|r|Ur) to the queue: " << change->instanceHandle); // add the DATA(w|Uw|r|Ur) to the EDP queue to process edp_data_queue_.Push(eprosima::fastdds::rtps::ddb::DiscoveryEDPDataQueueInfo(change, topic_name)); return true; } const std::vector<eprosima::fastrtps::rtps::CacheChange_t*> DiscoveryDataBase::changes_to_dispose() { // lock(sharing mode) mutex locally std::unique_lock<std::recursive_mutex> lock(mutex_); return disposals_; } void DiscoveryDataBase::clear_changes_to_dispose() { // lock(exclusive mode) mutex locally std::unique_lock<std::recursive_mutex> lock(mutex_); disposals_.clear(); } //////////// // Functions to process_to_send_lists() const std::vector<eprosima::fastrtps::rtps::CacheChange_t*> DiscoveryDataBase::pdp_to_send() { // lock(sharing mode) mutex locally std::unique_lock<std::recursive_mutex> lock(mutex_); return pdp_to_send_; } void DiscoveryDataBase::clear_pdp_to_send() { // lock(exclusive mode) mutex locally std::unique_lock<std::recursive_mutex> lock(mutex_); pdp_to_send_.clear(); } const std::vector<eprosima::fastrtps::rtps::CacheChange_t*> DiscoveryDataBase::edp_publications_to_send() { // lock(sharing mode) mutex locally std::unique_lock<std::recursive_mutex> lock(mutex_); return edp_publications_to_send_; } void DiscoveryDataBase::clear_edp_publications_to_send() { // lock(exclusive mode) mutex locally std::unique_lock<std::recursive_mutex> lock(mutex_); edp_publications_to_send_.clear(); } const std::vector<eprosima::fastrtps::rtps::CacheChange_t*> DiscoveryDataBase::edp_subscriptions_to_send() { // lock(sharing mode) mutex locally std::unique_lock<std::recursive_mutex> lock(mutex_); return edp_subscriptions_to_send_; } void DiscoveryDataBase::clear_edp_subscriptions_to_send() { // lock(exclusive mode) mutex locally std::unique_lock<std::recursive_mutex> lock(mutex_); edp_subscriptions_to_send_.clear(); } const std::vector<eprosima::fastrtps::rtps::CacheChange_t*> DiscoveryDataBase::changes_to_release() { // lock(sharing mode) mutex locally std::unique_lock<std::recursive_mutex> lock(mutex_); return changes_to_release_; } void DiscoveryDataBase::clear_changes_to_release() { // lock(exclusive mode) mutex locally std::unique_lock<std::recursive_mutex> lock(mutex_); changes_to_release_.clear(); } //////////// // Functions to process PDP and EDP data queues void DiscoveryDataBase::process_pdp_data_queue() { if (!enabled_) { logInfo(DISCOVERY_DATABASE, "Discovery Database is disabled"); return; } // Lock(exclusive mode) mutex locally std::unique_lock<std::recursive_mutex> lock(mutex_); // Swap DATA queues pdp_data_queue_.Swap(); // Process all messages in the queque while (!pdp_data_queue_.Empty()) { // Process each message with Front() DiscoveryPDPDataQueueInfo data_queue_info = pdp_data_queue_.Front(); // If the change is a DATA(p) if (data_queue_info.change()->kind == eprosima::fastrtps::rtps::ALIVE) { // Update participants map logInfo(DISCOVERY_DATABASE, "DATA(p) of entity " << data_queue_info.change()->instanceHandle << " received from: " << data_queue_info.change()->writerGUID); create_participant_from_change_(data_queue_info.change(), data_queue_info.participant_change_data()); } // If the change is a DATA(Up) else { logInfo(DISCOVERY_DATABASE, "DATA(Up) of entity " << data_queue_info.change()->instanceHandle << " received from: " << data_queue_info.change()->writerGUID); process_dispose_participant_(data_queue_info.change()); } // Pop the message from the queue pdp_data_queue_.Pop(); } } bool DiscoveryDataBase::process_edp_data_queue() { if (!enabled_) { logInfo(DISCOVERY_DATABASE, "Discovery Database is disabled"); return false; } bool is_dirty_topic = false; // Lock(exclusive mode) mutex locally std::unique_lock<std::recursive_mutex> lock(mutex_); // Swap DATA queues edp_data_queue_.Swap(); eprosima::fastrtps::rtps::CacheChange_t* change; std::string topic_name; // Process all messages in the queque while (!edp_data_queue_.Empty()) { // Process each message with Front() DiscoveryEDPDataQueueInfo data_queue_info = edp_data_queue_.Front(); change = data_queue_info.change(); topic_name = data_queue_info.topic(); // If the change is a DATA(w|r) if (change->kind == eprosima::fastrtps::rtps::ALIVE) { logInfo(DISCOVERY_DATABASE, "ALIVE change received from: " << change->instanceHandle); // DATA(w) case if (is_writer(change)) { logInfo(DISCOVERY_DATABASE, "DATA(w) in topic " << topic_name << " received from: " << change->instanceHandle); create_writers_from_change_(change, topic_name); } // DATA(r) case else if (is_reader(change)) { logInfo(DISCOVERY_DATABASE, "DATA(r) in topic " << topic_name << " received from: " << change->instanceHandle); create_readers_from_change_(change, topic_name); } } // If the change is a DATA(Uw|Ur) else { // DATA(Uw) case if (is_writer(change)) { logInfo(DISCOVERY_DATABASE, "DATA(Uw) received from: " << change->instanceHandle); process_dispose_writer_(change); } // DATA(Ur) case else if (is_reader(change)) { logInfo(DISCOVERY_DATABASE, "DATA(Ur) received from: " << change->instanceHandle); process_dispose_reader_(change); } } // Pop the message from the queue edp_data_queue_.Pop(); } return is_dirty_topic; } void DiscoveryDataBase::create_participant_from_change_( eprosima::fastrtps::rtps::CacheChange_t* ch, const DiscoveryParticipantChangeData& change_data) { fastrtps::rtps::GUID_t change_guid = guid_from_change(ch); auto participant_it = participants_.find(change_guid.guidPrefix); // The participant was already known in the database if (participant_it != participants_.end()) { // Only update database if the change is newer than the one we already have if (ch->write_params.sample_identity().sequence_number() > participant_it->second.change()->write_params.sample_identity().sequence_number()) { // Update the change related to the participant and return the old change to the pool logInfo(DISCOVERY_DATABASE, "Participant updating. Marking old change to release"); // Update participant's change in the database, set all relevant participants ACK status to 0, and add // old change to changes_to_release_. update_change_and_unmatch_(ch, participant_it->second); // If it is an update of our own server, is already in history // Else, it needs to be sent in case it has unacked participants if (change_guid.guidPrefix != server_guid_prefix_ && !participant_it->second.is_acked_by_all()) { add_pdp_to_send_(ch); } } // if the cache is not new we have to release it, because it is repeated or outdated else { // if the change is the same that we already have, we update the ack list. This is because we have // received the data from two servers, so we have to set that both of them already know this data if (ch->write_params.sample_identity().sequence_number() == participant_it->second.change()->write_params.sample_identity().sequence_number()) { participant_it->second.add_or_update_ack_participant(ch->writerGUID.guidPrefix, true); } // we release it if it's the same or if it is lower changes_to_release_.push_back(ch); } } // New participant else { DiscoveryParticipantInfo part(ch, server_guid_prefix_, change_data); std::pair<std::map<eprosima::fastrtps::rtps::GuidPrefix_t, DiscoveryParticipantInfo>::iterator, bool> ret = participants_.insert(std::make_pair(change_guid.guidPrefix, part)); // If insert was successful if (ret.second) { logInfo(DISCOVERY_DATABASE, "New participant added: " << change_guid.guidPrefix); // Manually set to 1 the relevant participants ACK status of the participant that sent the change. This way, // we avoid backprogation of the data. ret.first->second.add_or_update_ack_participant(ch->writerGUID.guidPrefix, true); // If the DATA(p) it's from this server, it is already in history and we do nothing here if (change_guid.guidPrefix != server_guid_prefix_) { // If the participant is a new participant, mark that not everyone has ACKed this server's DATA(p) // TODO if the new participant is a server it may be that our DATA(p) is already acked because he is // our server and we have pinged it. But also if we are its server it could be the case that // our DATA(p) is not acked even when he is our server. Solution: see in PDPServer2 how the change has // arrived, if because our ping or because their DATA(p). MINOR PROBLEM server_acked_by_all(false); } // If it is local and server we have to create virtual endpoints, except for our own server if (change_guid.guidPrefix != server_guid_prefix_ && !ret.first->second.is_client() && ret.first->second.is_local()) { logInfo(DISCOVERY_DATABASE, "Creating virtual entities for " << change_guid.guidPrefix); /* Create virtual writer */ // Create a GUID for the virtual writer from the local server GUID prefix and the virtual writer entity // ID. fastrtps::rtps::GUID_t virtual_writer_guid(change_guid.guidPrefix, fastrtps::rtps::ds_server_virtual_writer); // Create a populate the Cache Change with the necessary information. fastrtps::rtps::CacheChange_t* virtual_writer_change = new fastrtps::rtps::CacheChange_t(); virtual_writer_change->kind = fastrtps::rtps::ChangeKind_t::ALIVE; virtual_writer_change->writerGUID.guidPrefix = ch->writerGUID.guidPrefix; virtual_writer_change->writerGUID.entityId = fastrtps::rtps::ds_server_virtual_writer; virtual_writer_change->instanceHandle = fastrtps::rtps::InstanceHandle_t(virtual_writer_guid); // Populate sample identity fastrtps::rtps::SampleIdentity virtual_writer_sample_id; virtual_writer_sample_id.writer_guid(virtual_writer_guid); virtual_writer_sample_id.sequence_number(eprosima::fastrtps::rtps::SequenceNumber_t(0)); // Set write params eprosima::fastrtps::rtps::WriteParams virtual_writer_writer_params; virtual_writer_writer_params.sample_identity(virtual_writer_sample_id); virtual_writer_writer_params.related_sample_identity(virtual_writer_sample_id); virtual_writer_change->write_params = std::move(virtual_writer_writer_params); // Create the virtual writer create_writers_from_change_(virtual_writer_change, virtual_topic_); /* Create virtual reader */ // Create a GUID for the virtual reader from the local server GUID prefix and the virtual reader entity // ID. fastrtps::rtps::GUID_t virtual_reader_guid(change_guid.guidPrefix, fastrtps::rtps::ds_server_virtual_reader); // Create a populate the Cache Change with the necessary information. fastrtps::rtps::CacheChange_t* virtual_reader_change = new fastrtps::rtps::CacheChange_t(); virtual_reader_change->kind = fastrtps::rtps::ChangeKind_t::ALIVE; virtual_reader_change->writerGUID.guidPrefix = ch->writerGUID.guidPrefix; virtual_reader_change->writerGUID.entityId = fastrtps::rtps::ds_server_virtual_reader; virtual_reader_change->instanceHandle = fastrtps::rtps::InstanceHandle_t(virtual_reader_guid); // Populate sample identity fastrtps::rtps::SampleIdentity virtual_reader_sample_id; virtual_reader_sample_id.writer_guid(virtual_reader_guid); virtual_reader_sample_id.sequence_number(eprosima::fastrtps::rtps::SequenceNumber_t(0)); // Set write params eprosima::fastrtps::rtps::WriteParams virtual_reader_writer_params; virtual_reader_writer_params.sample_identity(virtual_reader_sample_id); virtual_reader_writer_params.related_sample_identity(virtual_reader_sample_id); virtual_reader_change->write_params = std::move(virtual_reader_writer_params); // Create the virtual reader create_readers_from_change_(virtual_reader_change, virtual_topic_); } } else { logError(DISCOVERY_DATABASE, "Failed adding new participant " << change_guid.guidPrefix); } } } void DiscoveryDataBase::create_writers_from_change_( eprosima::fastrtps::rtps::CacheChange_t* ch, const std::string& topic_name) { const eprosima::fastrtps::rtps::GUID_t& writer_guid = guid_from_change(ch); auto writer_it = writers_.find(writer_guid); // The writer was already known in the database if (writer_it != writers_.end()) { // Only update database if the change is newer than the one we already have if (ch->write_params.sample_identity().sequence_number() > writer_it->second.change()->write_params.sample_identity().sequence_number()) { // Update the change related to the writer and return the old change to the pool // TODO (Paris): when updating, be careful of not to do unmatch if the only endpoint in the other // participant is NOT ALIVE. This means that you still have to send your Data(Ux) to him but not the // updates update_change_and_unmatch_(ch, writer_it->second); // It needs to be sent in case it has unacked participants if (!writer_it->second.is_acked_by_all()) { add_edp_publications_to_send_(ch); } } // if the cache is not new we have to release it, because it is repeated or outdated else { // if the change is the same that we already have, we update the ack list. This is because we have // received the data from two servers, so we have to set that both of them already know this data if (ch->write_params.sample_identity().sequence_number() == writer_it->second.change()->write_params.sample_identity().sequence_number()) { writer_it->second.add_or_update_ack_participant(ch->writerGUID.guidPrefix, true); } // we release it if it's the same or if it is lower changes_to_release_.push_back(ch); } } // The writer was NOT known by the database else { // Add entry to writers_ DiscoveryEndpointInfo tmp_writer( ch, topic_name, topic_name == virtual_topic_, server_guid_prefix_); std::pair<std::map<eprosima::fastrtps::rtps::GUID_t, DiscoveryEndpointInfo>::iterator, bool> ret = writers_.insert(std::make_pair(writer_guid, tmp_writer)); if (!ret.second) { logError(DISCOVERY_DATABASE, "Error inserting writer " << writer_guid); return; } writer_it = ret.first; // Add entry to participants_[guid_prefix]::writers std::map<eprosima::fastrtps::rtps::GuidPrefix_t, DiscoveryParticipantInfo>::iterator writer_part_it = participants_.find(writer_guid.guidPrefix); if (writer_part_it != participants_.end()) { writer_part_it->second.add_writer(writer_guid); } else { logError(DISCOVERY_DATABASE, "Writer " << writer_guid << " as no associated participant. Skipping"); return; } // Add writer to writers_by_topic_[topic_name] add_writer_to_topic_(writer_guid, topic_name); // Manually set to 1 the relevant participants ACK status of the participant that sent the change. This way, // we avoid backprogation of the data. writer_it->second.add_or_update_ack_participant(ch->writerGUID.guidPrefix, true); // if topic is virtual, it must iterate over all readers if (topic_name == virtual_topic_) { for (auto reader_it : readers_) { match_writer_reader_(writer_guid, reader_it.first); } } else { auto readers_it = readers_by_topic_.find(topic_name); if (readers_it == readers_by_topic_.end()) { logError(DISCOVERY_DATABASE, "Topic error: " << topic_name << ". Must exist."); return; } for (auto reader : readers_it->second) { match_writer_reader_(writer_guid, reader); } } // Update set of dirty_topics set_dirty_topic_(topic_name); } } void DiscoveryDataBase::create_readers_from_change_( eprosima::fastrtps::rtps::CacheChange_t* ch, const std::string& topic_name) { const eprosima::fastrtps::rtps::GUID_t& reader_guid = guid_from_change(ch); auto reader_it = readers_.find(reader_guid); // The reader was already known in the database if (reader_it != readers_.end()) { // Only update database if the change is newer than the one we already have if (ch->write_params.sample_identity().sequence_number() > reader_it->second.change()->write_params.sample_identity().sequence_number()) { // Update the change related to the reader and return the old change to the pool // TODO (Paris): when updating, be careful of not to do unmatch if the only endpoint in the other // participant is NOT ALIVE. This means that you still have to send your Data(Ux) to him but not the // updates update_change_and_unmatch_(ch, reader_it->second); // It needs to be sent in case it has unacked participants if (!reader_it->second.is_acked_by_all()) { add_edp_subscriptions_to_send_(ch); } } // if the cache is not new we have to release it, because it is repeated or outdated else { // if the change is the same that we already have, we update the ack list. This is because we have // received the data from two servers, so we have to set that both of them already know this data if (ch->write_params.sample_identity().sequence_number() == reader_it->second.change()->write_params.sample_identity().sequence_number()) { reader_it->second.add_or_update_ack_participant(ch->writerGUID.guidPrefix, true); } // we release it if it's the same or if it is lower changes_to_release_.push_back(ch); } } // The reader was NOT known by the database else { // Add entry to readers_ DiscoveryEndpointInfo tmp_reader( ch, topic_name, topic_name == virtual_topic_, server_guid_prefix_); std::pair<std::map<eprosima::fastrtps::rtps::GUID_t, DiscoveryEndpointInfo>::iterator, bool> ret = readers_.insert(std::make_pair(reader_guid, tmp_reader)); if (!ret.second) { logError(DISCOVERY_DATABASE, "Error inserting reader " << reader_guid); return; } reader_it = ret.first; // Add entry to participants_[guid_prefix]::readers std::map<eprosima::fastrtps::rtps::GuidPrefix_t, DiscoveryParticipantInfo>::iterator reader_part_it = participants_.find(reader_guid.guidPrefix); if (reader_part_it != participants_.end()) { reader_part_it->second.add_reader(reader_guid); } else { logError(DISCOVERY_DATABASE, "Writer " << reader_guid << " as no associated participant. Skipping"); return; } // Add reader to readers_by_topic_[topic_name] add_reader_to_topic_(reader_guid, topic_name); // Manually set to 1 the relevant participants ACK status of the participant that sent the change. This way, // we avoid backprogation of the data. reader_it->second.add_or_update_ack_participant(ch->writerGUID.guidPrefix, true); // if topic is virtual, it must iterate over all readers if (topic_name == virtual_topic_) { for (auto writer_it : writers_) { match_writer_reader_(writer_it.first, reader_guid); } } else { auto writers_it = writers_by_topic_.find(topic_name); if (writers_it == writers_by_topic_.end()) { logError(DISCOVERY_DATABASE, "Topic error: " << topic_name << ". Must exist."); return; } for (auto writer : writers_it->second) { match_writer_reader_(writer, reader_guid); } } // Update set of dirty_topics set_dirty_topic_(topic_name); } } void DiscoveryDataBase::match_writer_reader_( const eprosima::fastrtps::rtps::GUID_t& writer_guid, const eprosima::fastrtps::rtps::GUID_t& reader_guid) { logInfo(DISCOVERY_DATABASE, "Matching writer " << writer_guid << " with reader " << reader_guid); // writer entity auto wit = writers_.find(writer_guid); if (wit == writers_.end()) { logError(DISCOVERY_DATABASE, "Matching unexisting writer " << writer_guid); return; } DiscoveryEndpointInfo& writer_info = wit->second; // writer participant auto p_wit = participants_.find(writer_guid.guidPrefix); if (p_wit == participants_.end()) { logError(DISCOVERY_DATABASE, "Matching unexisting participant from writer " << writer_guid); return; } DiscoveryParticipantInfo& writer_participant_info = p_wit->second; // reader entity auto rit = readers_.find(reader_guid); if (rit == readers_.end()) { logError(DISCOVERY_DATABASE, "Matching unexisting reader " << reader_guid); return; } DiscoveryEndpointInfo& reader_info = rit->second; // reader participant auto p_rit = participants_.find(reader_guid.guidPrefix); if (p_rit == participants_.end()) { logError(DISCOVERY_DATABASE, "Matching unexisting participant from reader " << reader_guid); return; } DiscoveryParticipantInfo& reader_participant_info = p_rit->second; // virtual - needs info and give none // local - needs info and give info // external - needs none and give info // writer needs info = add writer participant in reader ack list // writer give info = add reader participant in writer ack list // TODO reduce number of cases. This is more visual, but can be reduce joining them if (writer_info.is_virtual()) { // writer virtual // if reader is virtual do not exchange info // if not, writer needs all the info from this endpoint if (!reader_info.is_virtual()) { // only if they do not have the info yet if (!reader_participant_info.is_relevant_participant(writer_guid.guidPrefix)) { reader_participant_info.add_or_update_ack_participant(writer_guid.guidPrefix); } if (!reader_info.is_relevant_participant(writer_guid.guidPrefix)) { reader_info.add_or_update_ack_participant(writer_guid.guidPrefix); } } } else if (writer_participant_info.is_local()) { // writer local if (reader_info.is_virtual()) { // reader virtual // writer gives info to reader // only if they do not have the info yet if (!writer_participant_info.is_relevant_participant(reader_guid.guidPrefix)) { writer_participant_info.add_or_update_ack_participant(reader_guid.guidPrefix); } if (!writer_info.is_relevant_participant(reader_guid.guidPrefix)) { writer_info.add_or_update_ack_participant(reader_guid.guidPrefix); } } else if (reader_participant_info.is_local()) { // reader local // both exchange info // only if they do not have the info yet if (!writer_participant_info.is_relevant_participant(reader_guid.guidPrefix)) { writer_participant_info.add_or_update_ack_participant(reader_guid.guidPrefix); } if (!writer_info.is_relevant_participant(reader_guid.guidPrefix)) { writer_info.add_or_update_ack_participant(reader_guid.guidPrefix); } if (!reader_participant_info.is_relevant_participant(writer_guid.guidPrefix)) { reader_participant_info.add_or_update_ack_participant(writer_guid.guidPrefix); } if (!reader_info.is_relevant_participant(writer_guid.guidPrefix)) { reader_info.add_or_update_ack_participant(writer_guid.guidPrefix); } } else { // reader external // reader gives info to writer // only if they do not have the info yet if (!reader_participant_info.is_relevant_participant(writer_guid.guidPrefix)) { reader_participant_info.add_or_update_ack_participant(writer_guid.guidPrefix); } if (!reader_info.is_relevant_participant(writer_guid.guidPrefix)) { reader_info.add_or_update_ack_participant(writer_guid.guidPrefix); } } } else { // writer external // if reader is external do not exchange info // if not, reader needs all the info from this endpoint if (reader_participant_info.is_local()) { // only if they do not have the info yet if (!writer_participant_info.is_relevant_participant(reader_guid.guidPrefix)) { writer_participant_info.add_or_update_ack_participant(reader_guid.guidPrefix); } if (!writer_info.is_relevant_participant(reader_guid.guidPrefix)) { writer_info.add_or_update_ack_participant(reader_guid.guidPrefix); } } } } bool DiscoveryDataBase::set_dirty_topic_( std::string topic) { logInfo(DISCOVERY_DATABASE, "Setting topic " << topic << " as dirty"); // If topic is virtual, we need to set as dirty all the other (non-virtual) topics if (topic == virtual_topic_) { // Set all topics to dirty dirty_topics_.clear(); // It is enough to use writers_by_topic because the topics are simetrical in writers and readers: // if a topic exists in one, it exists in the other for (auto topic_it : writers_by_topic_) { if (topic_it.first != virtual_topic_) { dirty_topics_.push_back(topic_it.first); } } return true; } else { if (std::find( dirty_topics_.begin(), dirty_topics_.end(), topic) == dirty_topics_.end()) { dirty_topics_.push_back(topic); return true; } } return false; } void DiscoveryDataBase::process_dispose_participant_( eprosima::fastrtps::rtps::CacheChange_t* ch) { const eprosima::fastrtps::rtps::GUID_t& participant_guid = guid_from_change(ch); // Change DATA(p) with DATA(Up) in participants map std::map<eprosima::fastrtps::rtps::GuidPrefix_t, DiscoveryParticipantInfo>::iterator pit = participants_.find(participant_guid.guidPrefix); if (pit != participants_.end()) { // Check if this participant is already NOT ALIVE // Due to the way of announce a server, it is common to receive two DATA(Up) from the same server if (pit->second.change()->kind != fastrtps::rtps::ChangeKind_t::ALIVE) { logInfo(DISCOVERY_DATABASE, "Ignoring second DATA(Up)" << participant_guid.guidPrefix); return; } // Only update DATA(p), leaving the change info untouched. This is because DATA(Up) does not have the // participant's meta-information, but we don't want to loose it here. update_change_and_unmatch_(ch, pit->second); } else { // This is not an error. It could be because we have already receive and process the DATA(Up) // from this participant and it is no longer in the database logInfo(DISCOVERY_DATABASE, "Processing disposal from an unexisting Participant" << participant_guid.guidPrefix); return; } // Delete entries from writers_ belonging to the participant while (!pit->second.writers().empty()) { auto writer_guid = pit->second.writers().back(); // erase writer from topic unmatch_writer_(writer_guid); // release the change and remove entity without setting Data(Uw) delete_writer_entity_(writer_guid); } // Delete entries from readers_ belonging to the participant while (!pit->second.readers().empty()) { auto reader_guid = pit->second.readers().back(); // this unmatch must erase the entity from writers unmatch_reader_(reader_guid); // release the change and remove entity without setting Data(Ur) delete_reader_entity_(reader_guid); } // all participant endoints must be already unmatched in others endopoints relevant_ack maps // unmatch own participant unmatch_participant_(participant_guid.guidPrefix); // Add entry to disposals_ if (std::find(disposals_.begin(), disposals_.end(), ch) == disposals_.end()) { disposals_.push_back(ch); } } void DiscoveryDataBase::process_dispose_writer_( eprosima::fastrtps::rtps::CacheChange_t* ch) { const eprosima::fastrtps::rtps::GUID_t& writer_guid = guid_from_change(ch); // check if the writer is still alive (if DATA(Up) is processed before it will be erased) std::map<eprosima::fastrtps::rtps::GUID_t, DiscoveryEndpointInfo>::iterator wit = writers_.find(writer_guid); if (wit != writers_.end()) { // Change DATA(w) with DATA(Uw) update_change_and_unmatch_(ch, wit->second); // remove writer from topic remove_writer_from_topic_(writer_guid, wit->second.topic()); // Add entry to disposals_ if (wit->second.topic() != virtual_topic_) { if (std::find(disposals_.begin(), disposals_.end(), ch) == disposals_.end()) { disposals_.push_back(ch); } } } } void DiscoveryDataBase::process_dispose_reader_( eprosima::fastrtps::rtps::CacheChange_t* ch) { const eprosima::fastrtps::rtps::GUID_t& reader_guid = guid_from_change(ch); // check if the writer is still alive (if DATA(Up) is processed before it will be erased) std::map<eprosima::fastrtps::rtps::GUID_t, DiscoveryEndpointInfo>::iterator rit = readers_.find(reader_guid); if (rit != readers_.end()) { // Change DATA(r) with DATA(Ur) update_change_and_unmatch_(ch, rit->second); // remove reader from topic remove_reader_from_topic_(reader_guid, rit->second.topic()); // Add entry to disposals_ if (rit->second.topic() != virtual_topic_) { if (std::find(disposals_.begin(), disposals_.end(), ch) == disposals_.end()) { disposals_.push_back(ch); } } } } bool DiscoveryDataBase::process_dirty_topics() { if (!enabled_) { logInfo(DISCOVERY_DATABASE, "Discovery Database is disabled"); return false; } // logInfo(DISCOVERY_DATABASE, "process_dirty_topics start"); // Get shared lock std::unique_lock<std::recursive_mutex> lock(mutex_); // Iterator objects are declared here because they are reused in each iteration of the loops std::map<eprosima::fastrtps::rtps::GuidPrefix_t, DiscoveryParticipantInfo>::iterator parts_reader_it; std::map<eprosima::fastrtps::rtps::GuidPrefix_t, DiscoveryParticipantInfo>::iterator parts_writer_it; std::map<eprosima::fastrtps::rtps::GUID_t, DiscoveryEndpointInfo>::iterator readers_it; std::map<eprosima::fastrtps::rtps::GUID_t, DiscoveryEndpointInfo>::iterator writers_it; // Iterate over dirty_topics_ for (auto topic_it = dirty_topics_.begin(); topic_it != dirty_topics_.end();) { logInfo(DISCOVERY_DATABASE, "Processing topic: " << *topic_it); // Flag to store whether a topic can be cleared. bool is_clearable = true; // Get all the writers in the topic std::vector<fastrtps::rtps::GUID_t> writers; auto ret = writers_by_topic_.find(*topic_it); if (ret != writers_by_topic_.end()) { writers = ret->second; } // Get all the readers in the topic std::vector<fastrtps::rtps::GUID_t> readers; ret = readers_by_topic_.find(*topic_it); if (ret != readers_by_topic_.end()) { readers = ret->second; } for (fastrtps::rtps::GUID_t writer: writers) // Iterate over writers in the topic: { logInfo(DISCOVERY_DATABASE, "[" << *topic_it << "]" << " Processing writer: " << writer); // Iterate over readers in the topic: for (fastrtps::rtps::GUID_t reader : readers) { logInfo(DISCOVERY_DATABASE, "[" << *topic_it << "]" << " Processing reader: " << reader); // Find participants with writer info and participant with reader info in participants_ parts_reader_it = participants_.find(reader.guidPrefix); parts_writer_it = participants_.find(writer.guidPrefix); // Find reader info in readers_ readers_it = readers_.find(reader); // Find writer info in writers_ writers_it = writers_.find(writer); // Check in `participants_` whether the client with the reader has acknowledge the PDP of the client // with the writer. if (parts_reader_it != participants_.end()) { if (parts_reader_it->second.is_matched(writer.guidPrefix)) { // Check the status of the writer in `readers_[reader]::relevant_participants_builtin_ack_status`. if (readers_it != readers_.end() && readers_it->second.is_relevant_participant(writer.guidPrefix) && !readers_it->second.is_matched(writer.guidPrefix)) { // If the status is 0, add DATA(r) to a `edp_publications_to_send_` (if it's not there). if (add_edp_subscriptions_to_send_(readers_it->second.change())) { logInfo(DISCOVERY_DATABASE, "Addind DATA(r) to send: " << readers_it->second.change()->instanceHandle); } } } else if (parts_reader_it->second.is_relevant_participant(writer.guidPrefix)) { // Add DATA(p) of the client with the writer to `pdp_to_send_` (if it's not there). if (add_pdp_to_send_(parts_reader_it->second.change())) { logInfo(DISCOVERY_DATABASE, "Addind readers' DATA(p) to send: " << parts_reader_it->second.change()->instanceHandle); } // Set topic as not-clearable. is_clearable = false; } } // Check in `participants_` whether the client with the writer has acknowledge the PDP of the client // with the reader. if (parts_writer_it != participants_.end()) { if (parts_writer_it->second.is_matched(reader.guidPrefix)) { // Check the status of the reader in `writers_[writer]::relevant_participants_builtin_ack_status`. if (writers_it != writers_.end() && writers_it->second.is_relevant_participant(reader.guidPrefix) && !writers_it->second.is_matched(reader.guidPrefix)) { // If the status is 0, add DATA(w) to a `edp_subscriptions_to_send_` (if it's not there). if (add_edp_publications_to_send_(writers_it->second.change())) { logInfo(DISCOVERY_DATABASE, "Addind DATA(w) to send: " << writers_it->second.change()->instanceHandle); } } } else if (parts_writer_it->second.is_relevant_participant(reader.guidPrefix)) { // Add DATA(p) of the client with the reader to `pdp_to_send_` (if it's not there). if (add_pdp_to_send_(parts_writer_it->second.change())) { logInfo(DISCOVERY_DATABASE, "Addind writers' DATA(p) to send: " << parts_writer_it->second.change()->instanceHandle); } // Set topic as not-clearable. is_clearable = false; } } } } // Check whether the topic is still dirty or it can be cleared if (is_clearable) { // Delete topic from dirty_topics_ logInfo(DISCOVERY_DATABASE, "Topic " << *topic_it << " has been cleaned"); topic_it = dirty_topics_.erase(topic_it); } else { // Proceed with next topic logInfo(DISCOVERY_DATABASE, "Topic " << *topic_it << " is still dirty"); ++topic_it; } } // Return whether there still are dirty topics logInfo(DISCOVERY_DATABASE, "Are there dirty topics? " << !dirty_topics_.empty()); return !dirty_topics_.empty(); } bool DiscoveryDataBase::delete_entity_of_change( fastrtps::rtps::CacheChange_t* change) { if (!enabled_) { logInfo(DISCOVERY_DATABASE, "Discovery Database is disabled"); return false; } // Lock(exclusive mode) mutex locally std::unique_lock<std::recursive_mutex> lock(mutex_); if (change->kind == fastrtps::rtps::ChangeKind_t::ALIVE) { logWarning(DISCOVERY_DATABASE, "Attempting to delete information of an ALIVE entity: " << guid_from_change(change)); return false; } if (is_participant(change)) { // The information related to this participant is cleaned up in process_data_queue() // when a disposal arrives, and it cleans also its children entities return delete_participant_entity_(guid_from_change(change).guidPrefix); } else if (is_reader(change)) { // The information related to this reader is cleaned up in process_data_queue() return delete_reader_entity_(guid_from_change(change)); } else if (is_writer(change)) { // The information related to this writer is cleaned up in process_data_queue() return delete_writer_entity_(guid_from_change(change)); } return false; } bool DiscoveryDataBase::data_queue_empty() { return (pdp_data_queue_.BothEmpty() && edp_data_queue_.BothEmpty()); } bool DiscoveryDataBase::is_participant( const eprosima::fastrtps::rtps::GUID_t& guid) { return eprosima::fastrtps::rtps::c_EntityId_RTPSParticipant == guid.entityId; } bool DiscoveryDataBase::is_writer( const eprosima::fastrtps::rtps::GUID_t& guid) { // RTPS Specification v2.3 // For writers: NO_KEY = 0x03, WITH_KEY = 0x02 // For built-in writers: NO_KEY = 0xc3, WITH_KEY = 0xc2 const eprosima::fastrtps::rtps::octet identifier = guid.entityId.value[3]; return ((identifier == 0x02) || (identifier == 0xc2) || (identifier == 0x03) || (identifier == 0xc3)); } bool DiscoveryDataBase::is_reader( const eprosima::fastrtps::rtps::GUID_t& guid) { // RTPS Specification v2.3 // For readers: NO_KEY = 0x04, WITH_KEY = 0x07 // For built-in readers: NO_KEY = 0xc4, WITH_KEY = 0xc7 const eprosima::fastrtps::rtps::octet identifier = guid.entityId.value[3]; return ((identifier == 0x04) || (identifier == 0xc4) || (identifier == 0x07) || (identifier == 0xc7)); } bool DiscoveryDataBase::is_participant( const eprosima::fastrtps::rtps::CacheChange_t* ch) { return is_participant(guid_from_change(ch)); } bool DiscoveryDataBase::is_writer( const eprosima::fastrtps::rtps::CacheChange_t* ch) { return is_writer(guid_from_change(ch)); } bool DiscoveryDataBase::is_reader( const eprosima::fastrtps::rtps::CacheChange_t* ch) { return is_reader(guid_from_change(ch)); } eprosima::fastrtps::rtps::GUID_t DiscoveryDataBase::guid_from_change( const eprosima::fastrtps::rtps::CacheChange_t* ch) { return fastrtps::rtps::iHandle2GUID(ch->instanceHandle); } fastrtps::rtps::CacheChange_t* DiscoveryDataBase::cache_change_own_participant() { auto part_it = participants_.find(server_guid_prefix_); if (part_it != participants_.end()) { return part_it->second.change(); } return nullptr; } const std::vector<fastrtps::rtps::GuidPrefix_t> DiscoveryDataBase::direct_clients_and_servers() { std::vector<fastrtps::rtps::GuidPrefix_t> direct_clients_and_servers; // Iterate over participants to add the remote ones that are direct clients or servers for (auto participant: participants_) { // Only add participants other than the server if (server_guid_prefix_ != participant.first) { // Only add direct clients or server that are alive, not relayed ones. if (participant.second.is_local() && participant.second.change()->kind == eprosima::fastrtps::rtps::ALIVE) { direct_clients_and_servers.push_back(participant.first); } } } return direct_clients_and_servers; } bool DiscoveryDataBase::server_acked_by_my_servers() { if (servers_.size() == 0) { return true; } // Find the server's participant and check whether all its servers have ACKed the server's DATA(p) auto this_server = participants_.find(server_guid_prefix_); for (auto prefix : servers_) { if (!this_server->second.is_matched(prefix)) { return false; } } return true; } std::vector<fastrtps::rtps::GuidPrefix_t> DiscoveryDataBase::ack_pending_servers() { std::vector<fastrtps::rtps::GuidPrefix_t> ack_pending_servers; // Find the server's participant and check whether all its servers have ACKed the server's DATA(p) auto this_server = participants_.find(server_guid_prefix_); for (auto prefix : servers_) { if (!this_server->second.is_matched(prefix)) { ack_pending_servers.push_back(prefix); } } return ack_pending_servers; } fastrtps::rtps::LocatorList_t DiscoveryDataBase::participant_metatraffic_locators( fastrtps::rtps::GuidPrefix_t participant_guid_prefix) { fastrtps::rtps::LocatorList_t locators; auto part_it = participants_.find(participant_guid_prefix); if (part_it != participants_.end()) { for (auto locator : part_it->second.metatraffic_locators().unicast) { locators.push_back(locator); } } return locators; } DiscoveryDataBase::AckedFunctor DiscoveryDataBase::functor( eprosima::fastrtps::rtps::CacheChange_t* change) { return DiscoveryDataBase::AckedFunctor(this, change); } DiscoveryDataBase::AckedFunctor::AckedFunctor( DiscoveryDataBase* db, eprosima::fastrtps::rtps::CacheChange_t* change) : db_(db) , change_(change) , pending_(false) // references its own state , external_pending_(pending_) { // RAII only for the stateful object db_->lock_(); } DiscoveryDataBase::AckedFunctor::AckedFunctor( const DiscoveryDataBase::AckedFunctor& r) // references original state : external_pending_(r.external_pending_) { db_ = r.db_; change_ = r.change_; } DiscoveryDataBase::AckedFunctor::~AckedFunctor() { if (&external_pending_ == &pending_) { // only the stateful object manages the lock db_->unlock_(); } } void DiscoveryDataBase::AckedFunctor::operator () ( const eprosima::fastrtps::rtps::ReaderProxy* reader_proxy) { logInfo(DISCOVERY_DATABASE, "functor operator in change: " << change_->instanceHandle); logInfo(DISCOVERY_DATABASE, "for reader proxy: " << reader_proxy->guid()); // Check whether the change has been acknowledged by a given reader if (reader_proxy->rtps_is_relevant(change_)) { logInfo(DISCOVERY_DATABASE, "is relevant, sequence number " << change_->sequenceNumber); if (reader_proxy->change_is_acked(change_->sequenceNumber)) { // In the discovery database, mark the change as acknowledged by the reader db_->add_ack_(change_, reader_proxy->guid().guidPrefix); } else { // if the reader proxy is from a server that we are pinging, the data is set as acked for (auto it = db_->servers_.begin(); it < db_->servers_.end(); ++it) { if (reader_proxy->guid().guidPrefix == *it) { return; } } // This change is relevant and has not been acked, and does not belongs to the reader proxy // of a server that has not been paired yet, so there are pending acknowledgements external_pending_ = true; } } } void DiscoveryDataBase::unmatch_participant_( const eprosima::fastrtps::rtps::GuidPrefix_t& guid_prefix) { logInfo(DISCOVERY_DATABASE, "unmatching participant: " << guid_prefix); auto pit = participants_.find(guid_prefix); if (pit == participants_.end()) { logWarning(DISCOVERY_DATABASE, "Attempting to unmatch an unexisting participant: " << guid_prefix); } // for each relevant participant make not relevant for (eprosima::fastrtps::rtps::GuidPrefix_t relevant_participant : pit->second.relevant_participants()) { if (relevant_participant != guid_prefix) { auto rpit = participants_.find(relevant_participant); if (rpit == participants_.end()) { // This is not an error. Remote participants will try to unmatch with participants even // when the match is not reciprocal logInfo(DISCOVERY_DATABASE, "Participant " << relevant_participant << " matched with an unexisting participant: " << guid_prefix); } else { rpit->second.remove_participant(guid_prefix); } } } } void DiscoveryDataBase::unmatch_writer_( const eprosima::fastrtps::rtps::GUID_t& guid) { logInfo(DISCOVERY_DATABASE, "unmatching writer: " << guid); auto wit = writers_.find(guid); if (wit == writers_.end()) { logWarning(DISCOVERY_DATABASE, "Attempting to unmatch an unexisting writer: " << guid); return; } // get writer topic std::string topic = wit->second.topic(); // remove it from writer by topic remove_writer_from_topic_(guid, topic); // it there are more than one writer in this topic in the same participant we do not unmatch the endpoints if (!repeated_writer_topic_(guid.guidPrefix, topic)) { // for each reader in same topic make not relevant. It could be none in readers auto tit = readers_by_topic_.find(topic); if (tit != readers_by_topic_.end()) { for (auto reader : tit->second) { auto rit = readers_.find(reader); if (rit == readers_.end()) { logWarning(DISCOVERY_DATABASE, "Unexisting reader " << reader << " in topic: " << topic); } else { rit->second.remove_participant(guid.guidPrefix); } } } } } void DiscoveryDataBase::unmatch_reader_( const eprosima::fastrtps::rtps::GUID_t& guid) { logInfo(DISCOVERY_DATABASE, "unmatching reader: " << guid); auto rit = readers_.find(guid); if (rit == readers_.end()) { logWarning(DISCOVERY_DATABASE, "Attempting to unmatch an unexisting reader: " << guid); return; } // get reader topic std::string topic = rit->second.topic(); // remove it from reader by topic remove_reader_from_topic_(guid, topic); // it there are more than one reader in this topic in the same participant we do not unmatch the endpoints if (!repeated_reader_topic_(guid.guidPrefix, topic)) { // for each writer in same topic make not relevant. It could be none in writers auto tit = writers_by_topic_.find(topic); if (tit != writers_by_topic_.end()) { for (auto writer : tit->second) { auto wit = writers_.find(writer); if (wit == writers_.end()) { logWarning(DISCOVERY_DATABASE, "Unexisting writer " << writer << " in topic: " << topic); } else { wit->second.remove_participant(guid.guidPrefix); } } } } } bool DiscoveryDataBase::repeated_writer_topic_( const eprosima::fastrtps::rtps::GuidPrefix_t& participant, const std::string& topic_name) { int count = 0; auto pit = participants_.find(participant); if (pit == participants_.end()) { logWarning(DISCOVERY_DATABASE, "Checking repeated writer topics in an unexisting participant: " << participant); return false; } for (auto writer_guid : pit->second.writers()) { auto wit = writers_.find(writer_guid); if (wit == writers_.end()) { logWarning(DISCOVERY_DATABASE, "writer missing: " << writer_guid); } if (wit->second.topic() == topic_name) { ++count; if (count > 1) { return true; } } } // we already know is false. Safety check return count > 1; } // return if there are more than one reader in the participant in the same topic bool DiscoveryDataBase::repeated_reader_topic_( const eprosima::fastrtps::rtps::GuidPrefix_t& participant, const std::string& topic_name) { int count = 0; auto pit = participants_.find(participant); if (pit == participants_.end()) { logWarning(DISCOVERY_DATABASE, "Checking repeated reader topics in an unexisting participant: " << participant); return false; } for (auto reader_guid : pit->second.readers()) { auto rit = readers_.find(reader_guid); if (rit == readers_.end()) { logWarning(DISCOVERY_DATABASE, "reader missing: " << reader_guid); return false; } if (rit->second.topic() == topic_name) { ++count; if (count > 1) { return true; } } } // we already know is false. Safety check return count > 1; } void DiscoveryDataBase::remove_writer_from_topic_( const eprosima::fastrtps::rtps::GUID_t& writer_guid, const std::string& topic_name) { if (topic_name == virtual_topic_) { std::map<std::string, std::vector<eprosima::fastrtps::rtps::GUID_t>>::iterator topic_it; for (topic_it = writers_by_topic_.begin(); topic_it != writers_by_topic_.end(); topic_it++) { for (std::vector<eprosima::fastrtps::rtps::GUID_t>::iterator writer_it = topic_it->second.begin(); writer_it != topic_it->second.end(); ++writer_it) { if (*writer_it == writer_guid) { topic_it->second.erase(writer_it); break; } } } } else { std::map<std::string, std::vector<eprosima::fastrtps::rtps::GUID_t>>::iterator topic_it = writers_by_topic_.find(topic_name); if (topic_it != writers_by_topic_.end()) { for (std::vector<eprosima::fastrtps::rtps::GUID_t>::iterator writer_it = topic_it->second.begin(); writer_it != topic_it->second.end(); ++writer_it) { if (*writer_it == writer_guid) { topic_it->second.erase(writer_it); break; } } // The topic wont be deleted to avoid creating and matching again all the virtual endpoints // This only affects a virtual endpoint, that will be added in this topic, but nothing will be matched // This also helps because topics are symetrical, and removing one implies check the other's emptyness first. } } } void DiscoveryDataBase::remove_reader_from_topic_( const eprosima::fastrtps::rtps::GUID_t& reader_guid, const std::string& topic_name) { logInfo(DISCOVERY_DATABASE, "removing: " << reader_guid << " from topic " << topic_name); if (topic_name == virtual_topic_) { std::map<std::string, std::vector<eprosima::fastrtps::rtps::GUID_t>>::iterator topic_it; for (topic_it = readers_by_topic_.begin(); topic_it != readers_by_topic_.end(); topic_it++) { for (std::vector<eprosima::fastrtps::rtps::GUID_t>::iterator reader_it = topic_it->second.begin(); reader_it != topic_it->second.end(); ++reader_it) { if (*reader_it == reader_guid) { topic_it->second.erase(reader_it); break; } } } } else { std::map<std::string, std::vector<eprosima::fastrtps::rtps::GUID_t>>::iterator topic_it = readers_by_topic_.find(topic_name); if (topic_it != readers_by_topic_.end()) { for (std::vector<eprosima::fastrtps::rtps::GUID_t>::iterator reader_it = topic_it->second.begin(); reader_it != topic_it->second.end(); ++reader_it) { if (*reader_it == reader_guid) { topic_it->second.erase(reader_it); break; } } // the topic wont be deleted to avoid creating and matching again all the virtual endpoints // this only affects a virtual endpoint, that will be added in this topic, but nothing will be matched } } } void DiscoveryDataBase::create_topic_( const std::string& topic_name) { // create writers topic auto wit = writers_by_topic_.insert( std::pair<std::string, std::vector<fastrtps::rtps::GUID_t>>( topic_name, std::vector<fastrtps::rtps::GUID_t>())); if (wit.second) { // find virtual topic auto v_wit = writers_by_topic_.find(virtual_topic_); if (v_wit != writers_by_topic_.end()) { // add all virtual writers // in case virtual topic does not exist do nothing for (fastrtps::rtps::GUID_t virtual_writer : v_wit->second) { wit.first->second.push_back(virtual_writer); } } } // else topic already existed // create readers topic auto rit = readers_by_topic_.insert( std::pair<std::string, std::vector<fastrtps::rtps::GUID_t>>( topic_name, std::vector<fastrtps::rtps::GUID_t>())); if (rit.second) { // find virtual topic auto v_rit = readers_by_topic_.find(virtual_topic_); if (v_rit != readers_by_topic_.end()) { // add all virtual readers // in case virtual topic does not exist do nothing for (fastrtps::rtps::GUID_t virtual_reader : v_rit->second) { rit.first->second.push_back(virtual_reader); } } } // else topic already existed logInfo(DISCOVERY_DATABASE, "New topic " << topic_name << " created"); } void DiscoveryDataBase::add_writer_to_topic_( const eprosima::fastrtps::rtps::GUID_t& writer_guid, const std::string& topic_name) { // check if the topic exists already, if not create it auto it = writers_by_topic_.find(topic_name); if (it == writers_by_topic_.end()) { create_topic_(topic_name); it = writers_by_topic_.find(topic_name); } // if the topic is virtual, add it in every topic, included virtual // could be recursive but it will call too many find functions if (topic_name == virtual_topic_) { for (auto it_topics = writers_by_topic_.begin(); it_topics != writers_by_topic_.end(); ++it_topics) { // This find should be useless because right now we only call this function from // create_writer_from_change, so the entity must be always new std::vector<eprosima::fastrtps::rtps::GUID_t>::iterator writer_by_topic_it = std::find(it_topics->second.begin(), it_topics->second.end(), writer_guid); if (writer_by_topic_it == it_topics->second.end()) { logInfo(DISCOVERY_DATABASE, "New virtual writer " << writer_guid << " in writers_by_topic: " << it_topics->first); it_topics->second.push_back(writer_guid); } } // The writer has been already added to every topic, avoid try to add it again in virtual topic return; } // add the writer in the topic std::vector<eprosima::fastrtps::rtps::GUID_t>::iterator writer_by_topic_it = std::find(it->second.begin(), it->second.end(), writer_guid); if (writer_by_topic_it == it->second.end()) { logInfo(DISCOVERY_DATABASE, "New writer " << writer_guid << " in writers_by_topic: " << topic_name); it->second.push_back(writer_guid); } } void DiscoveryDataBase::add_reader_to_topic_( const eprosima::fastrtps::rtps::GUID_t& reader_guid, const std::string& topic_name) { // check if the topic exists already, if not create it auto it = readers_by_topic_.find(topic_name); if (it == readers_by_topic_.end()) { create_topic_(topic_name); it = readers_by_topic_.find(topic_name); } // if the topic is virtual, add it in every topic, included virtual // could be recursive but it will call too many find functions if (topic_name == virtual_topic_) { for (auto it_topics = readers_by_topic_.begin(); it_topics != readers_by_topic_.end(); ++it_topics) { // This find should be useless because right now we only call this function from // create_reader_from_change, so the entity must be always new std::vector<eprosima::fastrtps::rtps::GUID_t>::iterator reader_by_topic_it = std::find(it_topics->second.begin(), it_topics->second.end(), reader_guid); if (reader_by_topic_it == it_topics->second.end()) { logInfo(DISCOVERY_DATABASE, "New virtual reader " << reader_guid << " in readers_by_topic: " << it_topics->first); it_topics->second.push_back(reader_guid); } } // The reader has been already added to every topic, avoid try to add it again in virtual topic return; } // add the reader in the topic std::vector<eprosima::fastrtps::rtps::GUID_t>::iterator reader_by_topic_it = std::find(it->second.begin(), it->second.end(), reader_guid); if (reader_by_topic_it == it->second.end()) { logInfo(DISCOVERY_DATABASE, "New reader " << reader_guid << " in readers_by_topic: " << topic_name); it->second.push_back(reader_guid); } } bool DiscoveryDataBase::delete_participant_entity_( const fastrtps::rtps::GuidPrefix_t& guid_prefix) { auto it = participants_.find(guid_prefix); if (it == participants_.end()) { return false; } delete_participant_entity_(it); return true; } std::map<eprosima::fastrtps::rtps::GuidPrefix_t, DiscoveryParticipantInfo>::iterator DiscoveryDataBase::delete_participant_entity_( std::map<eprosima::fastrtps::rtps::GuidPrefix_t, DiscoveryParticipantInfo>::iterator it) { logInfo(DISCOVERY_DATABASE, "Deleting participant: " << it->first); if (it == participants_.end()) { return participants_.end(); } changes_to_release_.push_back(it->second.change()); return participants_.erase(it); } bool DiscoveryDataBase::delete_reader_entity_( const fastrtps::rtps::GUID_t& guid) { // find own reader auto it = readers_.find(guid); if (it == readers_.end()) { return false; } delete_reader_entity_(it); return true; } std::map<eprosima::fastrtps::rtps::GUID_t, DiscoveryEndpointInfo>::iterator DiscoveryDataBase::delete_reader_entity_( std::map<eprosima::fastrtps::rtps::GUID_t, DiscoveryEndpointInfo>::iterator it) { logInfo(DISCOVERY_DATABASE, "Deleting reader: " << it->first.guidPrefix); if (it == readers_.end()) { return readers_.end(); } // Remove entity from participant readers vector auto pit = participants_.find(it->first.guidPrefix); if (pit == participants_.end()) { logError(DISCOVERY_DATABASE, "Attempting to delete and orphan reader"); } else { pit->second.remove_reader(it->first); } if (it->second.is_virtual()) { // If the change is virtual, we can simply delete it delete it->second.change(); } else { // Mark change to release changes_to_release_.push_back(it->second.change()); } // remove entity in readers_ map return readers_.erase(it); } bool DiscoveryDataBase::delete_writer_entity_( const fastrtps::rtps::GUID_t& guid) { // find own writer auto it = writers_.find(guid); if (it == writers_.end()) { return false; } delete_writer_entity_(it); return true; } std::map<eprosima::fastrtps::rtps::GUID_t, DiscoveryEndpointInfo>::iterator DiscoveryDataBase::delete_writer_entity_( std::map<eprosima::fastrtps::rtps::GUID_t, DiscoveryEndpointInfo>::iterator it) { logInfo(DISCOVERY_DATABASE, "Deleting writer: " << it->first.guidPrefix); if (it == writers_.end()) { return writers_.end(); } // Remove entity from participant writers vector auto pit = participants_.find(it->first.guidPrefix); if (pit == participants_.end()) { logError(DISCOVERY_DATABASE, "Attempting to delete and orphan writer"); return it; } else { pit->second.remove_writer(it->first); } if (it->second.is_virtual()) { // If the change is virtual, we can simply delete it delete it->second.change(); } else { // Mark change to release changes_to_release_.push_back(it->second.change()); } // remove entity in writers_ map return writers_.erase(it); } bool DiscoveryDataBase::add_pdp_to_send_( eprosima::fastrtps::rtps::CacheChange_t* change) { // Add DATA(p) to send in next iteration if it is not already there if (std::find( pdp_to_send_.begin(), pdp_to_send_.end(), change) == pdp_to_send_.end()) { logInfo(DISCOVERY_DATABASE, "Addind DATA(p) to send: " << change->instanceHandle); pdp_to_send_.push_back(change); return true; } return false; } bool DiscoveryDataBase::add_edp_publications_to_send_( eprosima::fastrtps::rtps::CacheChange_t* change) { // Add DATA(w) to send in next iteration if it is not already there if (std::find( edp_publications_to_send_.begin(), edp_publications_to_send_.end(), change) == edp_publications_to_send_.end()) { logInfo(DISCOVERY_DATABASE, "Addind DATA(w) to send: " << change->instanceHandle); edp_publications_to_send_.push_back(change); return true; } return false; } bool DiscoveryDataBase::add_edp_subscriptions_to_send_( eprosima::fastrtps::rtps::CacheChange_t* change) { // Add DATA(r) to send in next iteration if it is not already there if (std::find( edp_subscriptions_to_send_.begin(), edp_subscriptions_to_send_.end(), change) == edp_subscriptions_to_send_.end()) { logInfo(DISCOVERY_DATABASE, "Addind DATA(r) to send: " << change->instanceHandle); edp_subscriptions_to_send_.push_back(change); return true; } return false; } void DiscoveryDataBase::to_json(nlohmann::json& j) const { // participants auto pit = participants_.begin(); j["participants"] = nlohmann::json({}); while(pit != participants_.end()) { if (pit->first != server_guid_prefix_) { nlohmann::json j_part; pit->second.to_json(j_part); j["participants"][ddb::object_to_string(pit->first)] = j_part; } ++pit; } // writers auto wit = writers_.begin(); if (wit == writers_.end()) { j["writers"] = nlohmann::json({}); } while(wit != writers_.end()) { nlohmann::json j_w; wit->second.to_json(j_w); j["writers"][ddb::object_to_string(wit->first)] = j_w; ++wit; } // readers auto rit = readers_.begin(); if (rit == readers_.end()) { j["readers"] = nlohmann::json({}); } while(rit != readers_.end()) { nlohmann::json j_r; rit->second.to_json(j_r); j["readers"][ddb::object_to_string(rit->first)] = j_r; ++rit; } // TODO add version } bool DiscoveryDataBase::from_json( nlohmann::json& j, std::map<eprosima::fastrtps::rtps::InstanceHandle_t, fastrtps::rtps::CacheChange_t*>& changes_map) { // This function will parse each attribute in json backup, casting it to istringstream // (std::istringstream) j[""] >> obj; // Changes are taken from changes_map, with already created changes // Auxiliar variables to deserialize and create new objects of the ddb fastrtps::rtps::InstanceHandle_t instance_handle_aux; fastrtps::rtps::GuidPrefix_t prefix_aux; fastrtps::rtps::GuidPrefix_t prefix_aux_ack; fastrtps::rtps::GUID_t guid_aux; logInfo(DISCOVERY_DATABASE, "Raising DDB from json Backup"); try { // Participants for (auto it = j["participants"].begin(); it != j["participants"].end(); ++it) { // Populate info from participant to charge its change (std::istringstream) it.key() >> prefix_aux; (std::istringstream) it.value()["change"]["instance_handle"].get<std::string>() >> instance_handle_aux; // Get change fastrtps::rtps::CacheChange_t* change; change = changes_map[instance_handle_aux]; // Populate RemoteLocatorList fastrtps::rtps::RemoteLocatorList rll; (std::istringstream) it.value()["metatraffic_locators"].get<std::string>() >> rll; // Populate DiscoveryParticipantChangeData DiscoveryParticipantChangeData dpcd( rll, it.value()["is_client"].get<bool>(), it.value()["is_local"].get<bool>()); // Populate DiscoveryParticipantInfo DiscoveryParticipantInfo dpi(change, server_guid_prefix_, dpcd); // Add acks for (auto it_ack = it.value()["ack_status"].begin(); it_ack != it.value()["ack_status"].end(); ++it_ack) { // Populate GuidPrefix_t (std::istringstream) it_ack.key() >> prefix_aux_ack; dpi.add_or_update_ack_participant(prefix_aux_ack, it_ack.value().get<bool>()); } // Add Participant participants_.insert(std::make_pair(prefix_aux, dpi)); logInfo(DISCOVERY_DATABASE, "Participant " << prefix_aux << " created"); // In case the change is NOT ALIVE it must be set as dispose so it can be communicate to others and erased if(change->kind != fastrtps::rtps::ALIVE) { disposals_.push_back(change); } } // Writers for (auto it = j["writers"].begin(); it != j["writers"].end(); ++it) { // Populate GUID_t (std::istringstream) it.key() >> guid_aux; (std::istringstream) it.value()["change"]["instance_handle"].get<std::string>() >> instance_handle_aux; // Get change fastrtps::rtps::CacheChange_t* change; change = changes_map[instance_handle_aux]; // Populate topic std::string topic = it.value()["topic"].get<std::string>(); // Populate DiscoveryEndpointInfo DiscoveryEndpointInfo dei(change, topic, topic == virtual_topic_, server_guid_prefix_); // Add acks for (auto it_ack = it.value()["ack_status"].begin(); it_ack != it.value()["ack_status"].end(); ++it_ack) { // Populate GuidPrefix_t (std::istringstream) it_ack.key() >> prefix_aux_ack; dei.add_or_update_ack_participant(prefix_aux_ack, it_ack.value().get<bool>()); } // Add Participant auto wit = writers_.insert(std::make_pair(guid_aux, dei)); // Extra configurations for writers // Add writer to writers_by_topic. This will create the topic if necessary add_writer_to_topic_(guid_aux, topic); // add writer to its participant std::map<eprosima::fastrtps::rtps::GuidPrefix_t, DiscoveryParticipantInfo>::iterator writer_part_it = participants_.find(guid_aux.guidPrefix); if (writer_part_it != participants_.end()) { writer_part_it->second.add_writer(guid_aux); } else { // Endpoint without participant, corrupted DDB logError(DISCOVERY_DATABASE, "Writer " << guid_aux << " without participant"); // TODO handle error return false; } logInfo(DISCOVERY_DATABASE, "Writer " << guid_aux << " created with instance handle " << wit.first->second.change()->instanceHandle); if(change->kind != fastrtps::rtps::ALIVE) { disposals_.push_back(change); } } // Readers for (auto it = j["readers"].begin(); it != j["readers"].end(); ++it) { // Populate GUID_t (std::istringstream) it.key() >> guid_aux; (std::istringstream) it.value()["change"]["instance_handle"].get<std::string>() >> instance_handle_aux; // Get change fastrtps::rtps::CacheChange_t* change; change = changes_map[instance_handle_aux]; // Populate topic std::string topic = it.value()["topic"].get<std::string>(); // Populate DiscoveryEndpointInfo DiscoveryEndpointInfo dei(change, topic, topic == virtual_topic_, server_guid_prefix_); // Add acks for (auto it_ack = it.value()["ack_status"].begin(); it_ack != it.value()["ack_status"].end(); ++it_ack) { // Populate GuidPrefix_t (std::istringstream) it_ack.key() >> prefix_aux_ack; dei.add_or_update_ack_participant(prefix_aux_ack, it_ack.value().get<bool>()); } // Add Participant readers_.insert(std::make_pair(guid_aux, dei)); // Extra configurations for readers // Add reader to readers_by_topic. This will create the topic if necessary add_reader_to_topic_(guid_aux, topic); // add reader to its participant std::map<eprosima::fastrtps::rtps::GuidPrefix_t, DiscoveryParticipantInfo>::iterator reader_part_it = participants_.find(guid_aux.guidPrefix); if (reader_part_it != participants_.end()) { reader_part_it->second.add_reader(guid_aux); } else { // Endpoint without participant, corrupted DDB // TODO handle error return false; } logInfo(DISCOVERY_DATABASE, "Reader " << guid_aux << " created"); if(change->kind != fastrtps::rtps::ALIVE) { disposals_.push_back(change); } } } catch (std::ios_base::failure&) { logError(DISCOVERY_DATABASE, "BACKUP CORRUPTED"); } // set dirty topics to all, so next iteration every message pending is sent set_dirty_topic_(virtual_topic_); // announce own server server_acked_by_all(false); return true; } void DiscoveryDataBase::clean_backup() { logInfo(DISCOVERY_DATABASE, "Restoring queue DDB in json backup"); // This will erase the last backup stored backup_file_.close(); backup_file_.open(backup_file_name_, std::ios_base::out); } void DiscoveryDataBase::persistence_enable(std::string backup_file_name) { is_persistent_ = true; backup_file_name_ = backup_file_name; // It opens the file in append mode because the info in it has not been yet backup_file_.open(backup_file_name_, std::ios::app); } } // namespace ddb } // namespace rtps } // namespace fastdds } // namespace eprosima
1
20,426
That's a tricky one, as the variable is indeed used in line 2346, which is a `logInfo` message that is not compiled unless `INTERNAL_DEBUG` is set or `CMAKE_BUILD_TYPE` is set to `Debug`. Our CI builds with `INTERNAL_DEBUG` to verify that all `logInfo` are correct, and thus we don't get the warning. The best is probably to remove the iterator (as you did), as well as the part of the log that uses it.
eProsima-Fast-DDS
cpp
@@ -600,7 +600,7 @@ class L3PacketSocket(L2Socket): # type: (Packet) -> int iff = x.route()[0] if iff is None: - iff = conf.iface + iff = network_name(conf.iface) sdto = (iff, self.type) self.outs.bind(sdto) sn = self.outs.getsockname()
1
# This file is part of Scapy # See http://www.secdev.org/projects/scapy for more information # Copyright (C) Philippe Biondi <[email protected]> # This program is published under a GPLv2 license """ Linux specific functions. """ from __future__ import absolute_import from fcntl import ioctl from select import select import array import ctypes import os import socket import struct import subprocess import sys import time import scapy.utils import scapy.utils6 from scapy.compat import raw, plain_str from scapy.consts import LINUX from scapy.arch.common import ( _iff_flags, compile_filter, get_if, get_if_raw_hwaddr, ) from scapy.config import conf from scapy.data import MTU, ETH_P_ALL, SOL_PACKET, SO_ATTACH_FILTER, \ SO_TIMESTAMPNS from scapy.error import ( ScapyInvalidPlatformException, Scapy_Exception, log_loading, log_runtime, warning, ) from scapy.interfaces import IFACES, InterfaceProvider, NetworkInterface, \ network_name from scapy.libs.structures import sock_fprog from scapy.packet import Packet, Padding from scapy.pton_ntop import inet_ntop from scapy.supersocket import SuperSocket import scapy.modules.six as six from scapy.modules.six.moves import range # Typing imports from scapy.compat import ( Any, Callable, Dict, List, NoReturn, Optional, Tuple, Type, Union, ) # From bits/ioctls.h SIOCGIFHWADDR = 0x8927 # Get hardware address SIOCGIFADDR = 0x8915 # get PA address SIOCGIFNETMASK = 0x891b # get network PA mask SIOCGIFNAME = 0x8910 # get iface name SIOCSIFLINK = 0x8911 # set iface channel SIOCGIFCONF = 0x8912 # get iface list SIOCGIFFLAGS = 0x8913 # get flags SIOCSIFFLAGS = 0x8914 # set flags SIOCGIFINDEX = 0x8933 # name -> if_index mapping SIOCGIFCOUNT = 0x8938 # get number of devices SIOCGSTAMP = 0x8906 # get packet timestamp (as a timeval) # From if.h IFF_UP = 0x1 # Interface is up. IFF_BROADCAST = 0x2 # Broadcast address valid. IFF_DEBUG = 0x4 # Turn on debugging. IFF_LOOPBACK = 0x8 # Is a loopback net. IFF_POINTOPOINT = 0x10 # Interface is point-to-point link. IFF_NOTRAILERS = 0x20 # Avoid use of trailers. IFF_RUNNING = 0x40 # Resources allocated. IFF_NOARP = 0x80 # No address resolution protocol. IFF_PROMISC = 0x100 # Receive all packets. # From netpacket/packet.h PACKET_ADD_MEMBERSHIP = 1 PACKET_DROP_MEMBERSHIP = 2 PACKET_RECV_OUTPUT = 3 PACKET_RX_RING = 5 PACKET_STATISTICS = 6 PACKET_MR_MULTICAST = 0 PACKET_MR_PROMISC = 1 PACKET_MR_ALLMULTI = 2 # From net/route.h RTF_UP = 0x0001 # Route usable RTF_REJECT = 0x0200 # From if_packet.h PACKET_HOST = 0 # To us PACKET_BROADCAST = 1 # To all PACKET_MULTICAST = 2 # To group PACKET_OTHERHOST = 3 # To someone else PACKET_OUTGOING = 4 # Outgoing of any type PACKET_LOOPBACK = 5 # MC/BRD frame looped back PACKET_USER = 6 # To user space PACKET_KERNEL = 7 # To kernel space PACKET_AUXDATA = 8 PACKET_FASTROUTE = 6 # Fastrouted frame # Unused, PACKET_FASTROUTE and PACKET_LOOPBACK are invisible to user space # Utils def get_if_raw_addr(iff): # type: (Union[NetworkInterface, str]) -> bytes r""" Return the raw IPv4 address of an interface. If unavailable, returns b"\0\0\0\0" """ try: return get_if(iff, SIOCGIFADDR)[20:24] except IOError: return b"\0\0\0\0" def _get_if_list(): # type: () -> List[str] """ Function to read the interfaces from /proc/net/dev """ try: f = open("/proc/net/dev", "rb") except IOError: try: f.close() except Exception: pass log_loading.critical("Can't open /proc/net/dev !") return [] lst = [] f.readline() f.readline() for line in f: lst.append(plain_str(line).split(":")[0].strip()) f.close() return lst def attach_filter(sock, bpf_filter, iface): # type: (socket.socket, str, Union[NetworkInterface, str]) -> None """ Compile bpf filter and attach it to a socket :param sock: the python socket :param bpf_filter: the bpf string filter to compile :param iface: the interface used to compile """ bp = compile_filter(bpf_filter, iface) if conf.use_pypy and sys.pypy_version_info <= (7, 3, 2): # type: ignore # PyPy < 7.3.2 has a broken behavior # https://foss.heptapod.net/pypy/pypy/-/issues/3298 bp = struct.pack( 'HL', bp.bf_len, ctypes.addressof(bp.bf_insns.contents) ) else: bp = sock_fprog(bp.bf_len, bp.bf_insns) sock.setsockopt(socket.SOL_SOCKET, SO_ATTACH_FILTER, bp) def set_promisc(s, iff, val=1): # type: (socket.socket, Union[NetworkInterface, str], int) -> None mreq = struct.pack("IHH8s", get_if_index(iff), PACKET_MR_PROMISC, 0, b"") if val: cmd = PACKET_ADD_MEMBERSHIP else: cmd = PACKET_DROP_MEMBERSHIP s.setsockopt(SOL_PACKET, cmd, mreq) def get_alias_address(iface_name, # type: str ip_mask, # type: int gw_str, # type: str metric # type: int ): # type: (...) -> Optional[Tuple[int, int, str, str, str, int]] """ Get the correct source IP address of an interface alias """ # Detect the architecture if scapy.consts.IS_64BITS: offset, name_len = 16, 40 else: offset, name_len = 32, 32 # Retrieve interfaces structures sck = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) names_ar = array.array('B', b'\0' * 4096) ifreq = ioctl(sck.fileno(), SIOCGIFCONF, struct.pack("iL", len(names_ar), names_ar.buffer_info()[0])) # Extract interfaces names out = struct.unpack("iL", ifreq)[0] names_b = names_ar.tobytes() if six.PY3 else names_ar.tostring() # type: ignore # noqa: E501 names = [names_b[i:i + offset].split(b'\0', 1)[0] for i in range(0, out, name_len)] # noqa: E501 # Look for the IP address for ifname_b in names: ifname = plain_str(ifname_b) # Only look for a matching interface name if not ifname.startswith(iface_name): continue # Retrieve and convert addresses ifreq = ioctl(sck, SIOCGIFADDR, struct.pack("16s16x", ifname_b)) ifaddr = struct.unpack(">I", ifreq[20:24])[0] # type: int ifreq = ioctl(sck, SIOCGIFNETMASK, struct.pack("16s16x", ifname_b)) msk = struct.unpack(">I", ifreq[20:24])[0] # type: int # Get the full interface name if ':' in ifname: ifname = ifname[:ifname.index(':')] else: continue # Check if the source address is included in the network if (ifaddr & msk) == ip_mask: sck.close() return (ifaddr & msk, msk, gw_str, ifname, scapy.utils.ltoa(ifaddr), metric) sck.close() return None def read_routes(): # type: () -> List[Tuple[int, int, str, str, str, int]] try: f = open("/proc/net/route", "rb") except IOError: log_loading.critical("Can't open /proc/net/route !") return [] routes = [] s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) try: ifreq = ioctl(s, SIOCGIFADDR, struct.pack("16s16x", conf.loopback_name.encode("utf8"))) # noqa: E501 addrfamily = struct.unpack("h", ifreq[16:18])[0] if addrfamily == socket.AF_INET: ifreq2 = ioctl(s, SIOCGIFNETMASK, struct.pack("16s16x", conf.loopback_name.encode("utf8"))) # noqa: E501 msk = socket.ntohl(struct.unpack("I", ifreq2[20:24])[0]) dst = socket.ntohl(struct.unpack("I", ifreq[20:24])[0]) & msk ifaddr = scapy.utils.inet_ntoa(ifreq[20:24]) routes.append((dst, msk, "0.0.0.0", conf.loopback_name, ifaddr, 1)) # noqa: E501 else: warning("Interface %s: unknown address family (%i)" % (conf.loopback_name, addrfamily)) # noqa: E501 except IOError as err: if err.errno == 99: warning("Interface %s: no address assigned" % conf.loopback_name) # noqa: E501 else: warning("Interface %s: failed to get address config (%s)" % (conf.loopback_name, str(err))) # noqa: E501 for line_b in f.readlines()[1:]: line = plain_str(line_b) iff, dst_b, gw, flags_b, _, _, metric_b, msk_b, _, _, _ = line.split() flags = int(flags_b, 16) if flags & RTF_UP == 0: continue if flags & RTF_REJECT: continue try: ifreq = ioctl(s, SIOCGIFADDR, struct.pack("16s16x", iff.encode("utf8"))) # noqa: E501 except IOError: # interface is present in routing tables but does not have any assigned IP # noqa: E501 ifaddr = "0.0.0.0" ifaddr_int = 0 else: addrfamily = struct.unpack("h", ifreq[16:18])[0] if addrfamily == socket.AF_INET: ifaddr = scapy.utils.inet_ntoa(ifreq[20:24]) ifaddr_int = struct.unpack("!I", ifreq[20:24])[0] else: warning("Interface %s: unknown address family (%i)", iff, addrfamily) # noqa: E501 continue # Attempt to detect an interface alias based on addresses inconsistencies # noqa: E501 dst_int = socket.htonl(int(dst_b, 16)) & 0xffffffff msk_int = socket.htonl(int(msk_b, 16)) & 0xffffffff gw_str = scapy.utils.inet_ntoa(struct.pack("I", int(gw, 16))) metric = int(metric_b) route = (dst_int, msk_int, gw_str, iff, ifaddr, metric) if ifaddr_int & msk_int != dst_int: tmp_route = get_alias_address(iff, dst_int, gw_str, metric) if tmp_route: route = tmp_route routes.append(route) f.close() s.close() return routes ############ # IPv6 # ############ def in6_getifaddr(): # type: () -> List[Tuple[str, int, str]] """ Returns a list of 3-tuples of the form (addr, scope, iface) where 'addr' is the address of scope 'scope' associated to the interface 'iface'. This is the list of all addresses of all interfaces available on the system. """ ret = [] # type: List[Tuple[str, int, str]] try: fdesc = open("/proc/net/if_inet6", "rb") except IOError: return ret for line in fdesc: # addr, index, plen, scope, flags, ifname tmp = plain_str(line).split() addr = scapy.utils6.in6_ptop( b':'.join( struct.unpack('4s4s4s4s4s4s4s4s', tmp[0].encode()) ).decode() ) # (addr, scope, iface) ret.append((addr, int(tmp[3], 16), tmp[5])) fdesc.close() return ret def read_routes6(): # type: () -> List[Tuple[str, int, str, str, List[str], int]] try: f = open("/proc/net/ipv6_route", "rb") except IOError: return [] # 1. destination network # 2. destination prefix length # 3. source network displayed # 4. source prefix length # 5. next hop # 6. metric # 7. reference counter (?!?) # 8. use counter (?!?) # 9. flags # 10. device name routes = [] def proc2r(p): # type: (bytes) -> str ret = struct.unpack('4s4s4s4s4s4s4s4s', p) addr = b':'.join(ret).decode() return scapy.utils6.in6_ptop(addr) lifaddr = in6_getifaddr() for line in f.readlines(): d_b, dp_b, _, _, nh_b, metric_b, rc, us, fl_b, dev_b = line.split() metric = int(metric_b, 16) fl = int(fl_b, 16) dev = plain_str(dev_b) if fl & RTF_UP == 0: continue if fl & RTF_REJECT: continue d = proc2r(d_b) dp = int(dp_b, 16) nh = proc2r(nh_b) cset = [] # candidate set (possible source addresses) if dev == conf.loopback_name: if d == '::': continue cset = ['::1'] else: devaddrs = (x for x in lifaddr if x[2] == dev) cset = scapy.utils6.construct_source_candidate_set(d, dp, devaddrs) if len(cset) != 0: routes.append((d, dp, nh, dev, cset, metric)) f.close() return routes def get_if_index(iff): # type: (Union[NetworkInterface, str]) -> int return int(struct.unpack("I", get_if(iff, SIOCGIFINDEX)[16:20])[0]) class LinuxInterfaceProvider(InterfaceProvider): name = "sys" def _is_valid(self, dev): # type: (NetworkInterface) -> bool return bool(dev.flags & IFF_UP) def load(self): # type: () -> Dict[str, NetworkInterface] from scapy.fields import FlagValue data = {} ips = in6_getifaddr() for i in _get_if_list(): ifflags = struct.unpack("16xH14x", get_if(i, SIOCGIFFLAGS))[0] index = get_if_index(i) mac = scapy.utils.str2mac( get_if_raw_hwaddr(i, siocgifhwaddr=SIOCGIFHWADDR)[1] ) ip = None # type: Optional[str] ip = inet_ntop(socket.AF_INET, get_if_raw_addr(i)) if ip == "0.0.0.0": ip = None ifflags = FlagValue(ifflags, _iff_flags) if_data = { "name": i, "network_name": i, "description": i, "flags": ifflags, "index": index, "ip": ip, "ips": [x[0] for x in ips if x[2] == i] + [ip] if ip else [], "mac": mac } data[i] = NetworkInterface(self, if_data) return data IFACES.register_provider(LinuxInterfaceProvider) if os.uname()[4] in ['x86_64', 'aarch64']: def get_last_packet_timestamp(sock): # type: (socket.socket) -> float ts = ioctl(sock, SIOCGSTAMP, "1234567890123456") # type: ignore s, us = struct.unpack("QQ", ts) # type: Tuple[int, int] return s + us / 1000000.0 else: def get_last_packet_timestamp(sock): # type: (socket.socket) -> float ts = ioctl(sock, SIOCGSTAMP, "12345678") # type: ignore s, us = struct.unpack("II", ts) # type: Tuple[int, int] return s + us / 1000000.0 def _flush_fd(fd): # type: (int) -> None while True: r, w, e = select([fd], [], [], 0) if r: os.read(fd, MTU) else: break class L2Socket(SuperSocket): desc = "read/write packets at layer 2 using Linux PF_PACKET sockets" def __init__(self, iface=None, # type: Optional[Union[str, NetworkInterface]] type=ETH_P_ALL, # type: int promisc=None, # type: Optional[Any] filter=None, # type: Optional[Any] nofilter=0, # type: int monitor=None, # type: Optional[Any] ): # type: (...) -> None self.iface = network_name(iface or conf.iface) self.type = type self.promisc = conf.sniff_promisc if promisc is None else promisc if monitor is not None: log_runtime.info( "The 'monitor' argument has no effect on native linux sockets." ) self.ins = socket.socket( socket.AF_PACKET, socket.SOCK_RAW, socket.htons(type)) self.ins.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 0) if not nofilter: if conf.except_filter: if filter: filter = "(%s) and not (%s)" % (filter, conf.except_filter) else: filter = "not (%s)" % conf.except_filter if filter is not None: try: attach_filter(self.ins, filter, self.iface) except ImportError as ex: log_runtime.error("Cannot set filter: %s", ex) if self.promisc: set_promisc(self.ins, self.iface) self.ins.bind((self.iface, type)) _flush_fd(self.ins.fileno()) self.ins.setsockopt( socket.SOL_SOCKET, socket.SO_RCVBUF, conf.bufsize ) if not six.PY2: # Receive Auxiliary Data (VLAN tags) try: self.ins.setsockopt(SOL_PACKET, PACKET_AUXDATA, 1) self.ins.setsockopt( socket.SOL_SOCKET, SO_TIMESTAMPNS, 1 ) self.auxdata_available = True except OSError: # Note: Auxiliary Data is only supported since # Linux 2.6.21 msg = "Your Linux Kernel does not support Auxiliary Data!" log_runtime.info(msg) if not isinstance(self, L2ListenSocket): self.outs = self.ins # type: socket.socket self.outs.setsockopt( socket.SOL_SOCKET, socket.SO_SNDBUF, conf.bufsize ) else: self.outs = None # type: ignore sa_ll = self.ins.getsockname() if sa_ll[3] in conf.l2types: self.LL = conf.l2types.num2layer[sa_ll[3]] self.lvl = 2 elif sa_ll[1] in conf.l3types: self.LL = conf.l3types.num2layer[sa_ll[1]] self.lvl = 3 else: self.LL = conf.default_l2 self.lvl = 2 warning("Unable to guess type (interface=%s protocol=%#x family=%i). Using %s", sa_ll[0], sa_ll[1], sa_ll[3], self.LL.name) # noqa: E501 def close(self): # type: () -> None if self.closed: return try: if self.promisc and self.ins: set_promisc(self.ins, self.iface, 0) except (AttributeError, OSError): pass SuperSocket.close(self) def recv_raw(self, x=MTU): # type: (int) -> Tuple[Optional[Type[Packet]], Optional[bytes], Optional[float]] # noqa: E501 """Receives a packet, then returns a tuple containing (cls, pkt_data, time)""" # noqa: E501 pkt, sa_ll, ts = self._recv_raw(self.ins, x) if self.outs and sa_ll[2] == socket.PACKET_OUTGOING: return None, None, None if ts is None: ts = get_last_packet_timestamp(self.ins) return self.LL, pkt, ts def send(self, x): # type: (Packet) -> int try: return SuperSocket.send(self, x) except socket.error as msg: if msg.errno == 22 and len(x) < conf.min_pkt_size: padding = b"\x00" * (conf.min_pkt_size - len(x)) if isinstance(x, Packet): return SuperSocket.send(self, x / Padding(load=padding)) else: return SuperSocket.send(self, raw(x) + padding) raise class L2ListenSocket(L2Socket): desc = "read packets at layer 2 using Linux PF_PACKET sockets. Also receives the packets going OUT" # noqa: E501 def send(self, x): # type: (Packet) -> NoReturn raise Scapy_Exception("Can't send anything with L2ListenSocket") class L3PacketSocket(L2Socket): desc = "read/write packets at layer 3 using Linux PF_PACKET sockets" def recv(self, x=MTU): # type: (int) -> Optional[Packet] pkt = SuperSocket.recv(self, x) if pkt and self.lvl == 2: pkt.payload.time = pkt.time return pkt.payload return pkt def send(self, x): # type: (Packet) -> int iff = x.route()[0] if iff is None: iff = conf.iface sdto = (iff, self.type) self.outs.bind(sdto) sn = self.outs.getsockname() ll = lambda x: x # type: Callable[[Packet], Packet] type_x = type(x) if type_x in conf.l3types: sdto = (iff, conf.l3types.layer2num[type_x]) if sn[3] in conf.l2types: ll = lambda x: conf.l2types.num2layer[sn[3]]() / x if self.lvl == 3 and type_x != self.LL: warning("Incompatible L3 types detected using %s instead of %s !", type_x, self.LL) self.LL = type_x sx = raw(ll(x)) x.sent_time = time.time() try: return self.outs.sendto(sx, sdto) except socket.error as msg: if msg.errno == 22 and len(sx) < conf.min_pkt_size: return self.outs.send( sx + b"\x00" * (conf.min_pkt_size - len(sx)) ) elif conf.auto_fragment and msg.errno == 90: i = 0 for p in x.fragment(): i += self.outs.sendto(raw(ll(p)), sdto) return i else: raise class VEthPair(object): """ encapsulates a virtual Ethernet interface pair """ def __init__(self, iface_name, peer_name): # type: (str, str) -> None if not LINUX: # ToDo: do we need a kernel version check here? raise ScapyInvalidPlatformException( 'Virtual Ethernet interface pair only available on Linux' ) self.ifaces = [iface_name, peer_name] def iface(self): # type: () -> str return self.ifaces[0] def peer(self): # type: () -> str return self.ifaces[1] def setup(self): # type: () -> None """ create veth pair links :raises subprocess.CalledProcessError if operation fails """ subprocess.check_call(['ip', 'link', 'add', self.ifaces[0], 'type', 'veth', 'peer', 'name', self.ifaces[1]]) # noqa: E501 def destroy(self): # type: () -> None """ remove veth pair links :raises subprocess.CalledProcessError if operation fails """ subprocess.check_call(['ip', 'link', 'del', self.ifaces[0]]) def up(self): # type: () -> None """ set veth pair links up :raises subprocess.CalledProcessError if operation fails """ for idx in [0, 1]: subprocess.check_call(["ip", "link", "set", self.ifaces[idx], "up"]) # noqa: E501 def down(self): # type: () -> None """ set veth pair links down :raises subprocess.CalledProcessError if operation fails """ for idx in [0, 1]: subprocess.check_call(["ip", "link", "set", self.ifaces[idx], "down"]) # noqa: E501 def __enter__(self): # type: () -> VEthPair self.setup() self.up() conf.ifaces.reload() return self def __exit__(self, exc_type, exc_val, exc_tb): # type: (Any, Any, Any) -> None self.destroy() conf.ifaces.reload()
1
19,620
I suspect L2Socket needs a similar change (line 502) for the case where `iface` is None (line 479)
secdev-scapy
py