patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -66,7 +66,7 @@ static void on_setup_ostream(h2o_filter_t *_self, h2o_req_t *req, h2o_ostream_t
if (!req->res.mime_attr->is_compressible)
goto Next;
/* 100 is a rough estimate */
- if (req->res.content_length <= 100)
+ if (req->res.content_length <= self->args.min_size)
goto Next;
/* skip if failed to gather the list of compressible types */
if ((compressible_types = h2o_get_compressible_types(&req->headers)) == 0) | 1 | /*
* Copyright (c) 2015,2016 Justin Zhu, DeNA Co., Ltd., Kazuho Oku
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <assert.h>
#include <stdlib.h>
#include "h2o.h"
#ifndef BUF_SIZE
#define BUF_SIZE 8192
#endif
struct st_compress_filter_t {
h2o_filter_t super;
h2o_compress_args_t args;
};
struct st_compress_encoder_t {
h2o_ostream_t super;
h2o_compress_context_t *compressor;
};
static void do_send(h2o_ostream_t *_self, h2o_req_t *req, h2o_iovec_t *inbufs, size_t inbufcnt, int is_final)
{
struct st_compress_encoder_t *self = (void *)_self;
h2o_iovec_t *outbufs;
size_t outbufcnt;
self->compressor->compress(self->compressor, inbufs, inbufcnt, is_final, &outbufs, &outbufcnt);
h2o_ostream_send_next(&self->super, req, outbufs, outbufcnt, is_final);
}
static void on_setup_ostream(h2o_filter_t *_self, h2o_req_t *req, h2o_ostream_t **slot)
{
struct st_compress_filter_t *self = (void *)_self;
struct st_compress_encoder_t *encoder;
int compressible_types;
h2o_compress_context_t *compressor;
ssize_t i;
if (req->version < 0x101)
goto Next;
if (req->res.status != 200)
goto Next;
if (h2o_memis(req->input.method.base, req->input.method.len, H2O_STRLIT("HEAD")))
goto Next;
if (req->res.mime_attr == NULL)
h2o_req_fill_mime_attributes(req);
if (!req->res.mime_attr->is_compressible)
goto Next;
/* 100 is a rough estimate */
if (req->res.content_length <= 100)
goto Next;
/* skip if failed to gather the list of compressible types */
if ((compressible_types = h2o_get_compressible_types(&req->headers)) == 0)
goto Next;
/* skip if content-encoding header is being set (as well as obtain the location of accept-ranges) */
size_t content_encoding_header_index = -1, accept_ranges_header_index = -1;
for (i = 0; i != req->res.headers.size; ++i) {
if (req->res.headers.entries[i].name == &H2O_TOKEN_CONTENT_ENCODING->buf)
content_encoding_header_index = i;
else if (req->res.headers.entries[i].name == &H2O_TOKEN_ACCEPT_RANGES->buf)
accept_ranges_header_index = i;
else
continue;
}
if (content_encoding_header_index != -1)
goto Next;
/* open the compressor */
#if H2O_USE_BROTLI
if (self->args.brotli.quality != -1 && (compressible_types & H2O_COMPRESSIBLE_BROTLI) != 0) {
compressor = h2o_compress_brotli_open(&req->pool, self->args.brotli.quality, req->res.content_length);
} else
#endif
if (self->args.gzip.quality != -1 && (compressible_types & H2O_COMPRESSIBLE_GZIP) != 0) {
compressor = h2o_compress_gzip_open(&req->pool, self->args.gzip.quality);
} else {
goto Next;
}
/* adjust the response headers */
req->res.content_length = SIZE_MAX;
h2o_add_header(&req->pool, &req->res.headers, H2O_TOKEN_CONTENT_ENCODING, compressor->name.base, compressor->name.len);
h2o_set_header_token(&req->pool, &req->res.headers, H2O_TOKEN_VARY, H2O_STRLIT("accept-encoding"));
if (accept_ranges_header_index != -1) {
req->res.headers.entries[accept_ranges_header_index].value = h2o_iovec_init(H2O_STRLIT("none"));
} else {
h2o_add_header(&req->pool, &req->res.headers, H2O_TOKEN_ACCEPT_RANGES, H2O_STRLIT("none"));
}
/* setup filter */
encoder = (void *)h2o_add_ostream(req, sizeof(*encoder), slot);
encoder->super.do_send = do_send;
slot = &encoder->super.next;
encoder->compressor = compressor;
/* adjust preferred chunk size (compress by 8192 bytes) */
if (req->preferred_chunk_size > BUF_SIZE)
req->preferred_chunk_size = BUF_SIZE;
Next:
h2o_setup_next_ostream(req, slot);
}
void h2o_compress_register(h2o_pathconf_t *pathconf, h2o_compress_args_t *args)
{
struct st_compress_filter_t *self = (void *)h2o_create_filter(pathconf, sizeof(*self));
self->super.on_setup_ostream = on_setup_ostream;
self->args = *args;
}
| 1 | 11,118 | Should we better change the operator to `<` since the variable defines the minimum size that gets compressed? | h2o-h2o | c |
@@ -233,6 +233,14 @@ public class StreamTest extends AbstractLinearSeqTest {
assertThat(actual).isEqualTo(3);
}
+ // -- append
+
+ @Test
+ public void shouldAppendMillionTimes() {
+ final int bigNum = 1_000_000;
+ assertThat(Stream.range(0, bigNum).foldLeft(Stream.empty(), Stream::append).length()).isEqualTo(bigNum);
+ }
+
// -- combinations
@Test | 1 | /* / \____ _ _ ____ ______ / \ ____ __ _ _____
* / / \/ \ / \/ \ / /\__\/ // \/ \ / / _ \ Javaslang
* _/ / /\ \ \/ / /\ \\__\\ \ // /\ \ /\\/ \__/ / Copyright 2014-now Daniel Dietrich
* /___/\_/ \_/\____/\_/ \_/\__\/__/___\_/ \_// \__/_____/ Licensed under the Apache License, Version 2.0
*/
package javaslang.collection;
import javaslang.Serializables;
import javaslang.control.Try;
import org.junit.Ignore;
import org.junit.Test;
import java.io.InvalidObjectException;
import java.math.BigDecimal;
import java.util.ArrayList;
import java.util.function.Function;
import java.util.function.Supplier;
import java.util.stream.Collector;
public class StreamTest extends AbstractLinearSeqTest {
// -- construction
@Override
protected <T> Collector<T, ArrayList<T>, Stream<T>> collector() {
return Stream.collector();
}
@Override
protected <T> Stream<T> empty() {
return Stream.empty();
}
@Override
protected <T> Stream<T> of(T element) {
return Stream.of(element);
}
@SuppressWarnings("varargs")
@SafeVarargs
@Override
protected final <T> Stream<T> of(T... elements) {
return Stream.of(elements);
}
@Override
protected <T> Stream<T> ofAll(Iterable<? extends T> elements) {
return Stream.ofAll(elements);
}
@Override
protected Stream<Boolean> ofAll(boolean[] array) {
return Stream.ofAll(array);
}
@Override
protected Stream<Byte> ofAll(byte[] array) {
return Stream.ofAll(array);
}
@Override
protected Stream<Character> ofAll(char[] array) {
return Stream.ofAll(array);
}
@Override
protected Stream<Double> ofAll(double[] array) {
return Stream.ofAll(array);
}
@Override
protected Stream<Float> ofAll(float[] array) {
return Stream.ofAll(array);
}
@Override
protected Stream<Integer> ofAll(int[] array) {
return Stream.ofAll(array);
}
@Override
protected Stream<Long> ofAll(long[] array) {
return Stream.ofAll(array);
}
@Override
protected Stream<Short> ofAll(short[] array) {
return Stream.ofAll(array);
}
@Override
protected <T> Stream<T> tabulate(int n, Function<? super Integer, ? extends T> f) {
return Stream.tabulate(n, f);
}
@Override
protected <T> Stream<T> fill(int n, Supplier<? extends T> s) {
return Stream.fill(n, s);
}
@Override
protected Stream<Character> range(char from, char toExclusive) {
return Stream.range(from, toExclusive);
}
@Override
protected Stream<Character> rangeBy(char from, char toExclusive, int step) {
return Stream.rangeBy(from, toExclusive, step);
}
@Override
protected Stream<Double> rangeBy(double from, double toExclusive, double step) {
return Stream.rangeBy(from, toExclusive, step);
}
@Override
protected Stream<Integer> range(int from, int toExclusive) {
return Stream.range(from, toExclusive);
}
@Override
protected Stream<Integer> rangeBy(int from, int toExclusive, int step) {
return Stream.rangeBy(from, toExclusive, step);
}
@Override
protected Stream<Long> range(long from, long toExclusive) {
return Stream.range(from, toExclusive);
}
@Override
protected Stream<Long> rangeBy(long from, long toExclusive, long step) {
return Stream.rangeBy(from, toExclusive, step);
}
@Override
protected Stream<Character> rangeClosed(char from, char toInclusive) {
return Stream.rangeClosed(from, toInclusive);
}
@Override
protected Stream<Character> rangeClosedBy(char from, char toInclusive, int step) {
return Stream.rangeClosedBy(from, toInclusive, step);
}
@Override
protected Stream<Double> rangeClosedBy(double from, double toInclusive, double step) {
return Stream.rangeClosedBy(from, toInclusive, step);
}
@Override
protected Stream<Integer> rangeClosed(int from, int toInclusive) {
return Stream.rangeClosed(from, toInclusive);
}
@Override
protected Stream<Integer> rangeClosedBy(int from, int toInclusive, int step) {
return Stream.rangeClosedBy(from, toInclusive, step);
}
@Override
protected Stream<Long> rangeClosed(long from, long toInclusive) {
return Stream.rangeClosed(from, toInclusive);
}
@Override
protected Stream<Long> rangeClosedBy(long from, long toInclusive, long step) {
return Stream.rangeClosedBy(from, toInclusive, step);
}
// -- static from(int)
@Test
public void shouldGenerateIntStream() {
assertThat(Stream.from(-1).take(3)).isEqualTo(Stream.of(-1, 0, 1));
}
@Test
public void shouldGenerateTerminatingIntStream() {
//noinspection NumericOverflow
assertThat(Stream.from(Integer.MAX_VALUE).take(2))
.isEqualTo(Stream.of(Integer.MAX_VALUE, Integer.MAX_VALUE + 1));
}
// -- static from(long)
@Test
public void shouldGenerateLongStream() {
assertThat(Stream.from(-1L).take(3)).isEqualTo(Stream.of(-1L, 0L, 1L));
}
@Test
public void shouldGenerateTerminatingLongStream() {
//noinspection NumericOverflow
assertThat(Stream.from(Long.MAX_VALUE).take(2)).isEqualTo(Stream.of(Long.MAX_VALUE, Long.MAX_VALUE + 1));
}
// -- static gen(Supplier)
@Test
public void shouldGenerateInfiniteStreamBasedOnSupplier() {
assertThat(Stream.gen(() -> 1).take(13).reduce((i, j) -> i + j)).isEqualTo(13);
}
// -- static gen(T, Function)
@Test
public void shouldGenerateInfiniteStreamBasedOnSupplierWithAccessToPreviousValue() {
assertThat(Stream.gen(2, (i) -> i + 2).take(3).reduce((i, j) -> i + j)).isEqualTo(12);
}
// -- static repeat(T)
@Test
public void shouldGenerateInfiniteStreamBasedOnRepeatedElement() {
assertThat(Stream.repeat(2).take(3).reduce((i, j) -> i + j)).isEqualTo(6);
}
// -- static cons(T, Supplier)
@Test
public void shouldBuildStreamBasedOnHeadAndTailSupplierWithAccessToHead() {
assertThat(Stream.cons(1, () -> Stream.cons(2, Stream::empty))).isEqualTo(Stream.of(1, 2));
}
// -- static narrow
@Test
public void shouldNarrowStream() {
final Stream<Double> doubles = of(1.0d);
final Stream<Number> numbers = Stream.narrow(doubles);
final int actual = numbers.append(new BigDecimal("2.0")).sum().intValue();
assertThat(actual).isEqualTo(3);
}
// -- combinations
@Test
public void shouldComputeCombinationsOfEmptyStream() {
assertThat(Stream.empty().combinations()).isEqualTo(Stream.of(Stream.empty()));
}
@Test
public void shouldComputeCombinationsOfNonEmptyStream() {
assertThat(Stream.of(1, 2, 3).combinations()).isEqualTo(Stream.of(Stream.empty(), Stream.of(1), Stream.of(2),
Stream.of(3), Stream.of(1, 2), Stream.of(1, 3), Stream.of(2, 3), Stream.of(1, 2, 3)));
}
// -- combinations(k)
@Test
public void shouldComputeKCombinationsOfEmptyStream() {
assertThat(Stream.empty().combinations(1)).isEqualTo(Stream.empty());
}
@Test
public void shouldComputeKCombinationsOfNonEmptyStream() {
assertThat(Stream.of(1, 2, 3).combinations(2))
.isEqualTo(Stream.of(Stream.of(1, 2), Stream.of(1, 3), Stream.of(2, 3)));
}
// -- flatMap
@Test
public void shouldFlatMapInfiniteTraversable() {
assertThat(Stream.gen(1, i -> i + 1).flatMap(i -> List.of(i, 2 * i)).take(7))
.isEqualTo(Stream.of(1, 2, 2, 4, 3, 6, 4));
}
// -- peek
@Override
protected int getPeekNonNilPerformingAnAction() {
return 3;
}
// -- permutations
@Test
public void shouldComputePermutationsOfEmptyStream() {
assertThat(Stream.empty().permutations()).isEqualTo(Stream.empty());
}
@Test
public void shouldComputePermutationsOfNonEmptyStream() {
assertThat(Stream.of(1, 2, 3).permutations()).isEqualTo(Stream.ofAll(Stream.of(Stream.of(1, 2, 3),
Stream.of(1, 3, 2), Stream.of(2, 1, 3), Stream.of(2, 3, 1), Stream.of(3, 1, 2), Stream.of(3, 2, 1))));
}
// -- appendSelf
@Test
public void shouldRecurrentlyCalculateFibonacci() {
assertThat(Stream.of(1, 1).appendSelf(self -> self.zip(self.tail()).map(t -> t._1 + t._2)).take(10))
.isEqualTo(Stream.of(1, 1, 2, 3, 5, 8, 13, 21, 34, 55));
}
@Test
public void shouldRecurrentlyCalculatePrimes() {
assertThat(Stream
.of(2)
.appendSelf(self -> Stream
.gen(3, i -> i + 2)
.filter(i -> self.takeWhile(j -> j * j <= i).forAll(k -> i % k > 0)))
.take(10)).isEqualTo(Stream.of(2, 3, 5, 7, 11, 13, 17, 19, 23, 29));
}
@Test
public void shouldDoNothingOnNil() {
assertThat(Stream.empty().appendSelf(self -> self)).isEqualTo(Stream.empty());
}
@Test
public void shouldRecurrentlyCalculateArithmeticProgression() {
assertThat(Stream.of(1).appendSelf(self -> self.map(t -> t + 1)).take(4)).isEqualTo(Stream.of(1, 2, 3, 4));
}
@Test
public void shouldRecurrentlyCalculateGeometricProgression() {
assertThat(Stream.of(1).appendSelf(self -> self.map(t -> t * 2)).take(4)).isEqualTo(Stream.of(1, 2, 4, 8));
}
// -- containsSlice
@Test
public void shouldRecognizeInfiniteDoesContainSlice() {
final boolean actual = Stream.gen(1, i -> i + 1).containsSlice(of(12, 13, 14));
assertThat(actual).isTrue();
}
// -- lazy dropRight
@Test
public void shouldLazyDropRight() {
assertThat(Stream.from(1).takeUntil(i -> i == 18).dropRight(7)).isEqualTo(Stream.range(1, 11));
}
// -- cycle
@Test
public void shouldCycleEmptyStream() {
assertThat(empty().cycle()).isEqualTo(empty());
}
@Test
public void shouldCycleNonEmptyStream() {
assertThat(of(1, 2, 3).cycle().take(9)).isEqualTo(of(1, 2, 3, 1, 2, 3, 1, 2, 3));
}
// -- toString
@Test
public void shouldStringifyNil() {
assertThat(empty().toString()).isEqualTo("Stream()");
}
@Test
public void shouldStringifyNonNil() {
assertThat(of(1, 2, 3).toString()).isEqualTo("Stream(1, ?)");
}
@Test
public void shouldStringifyNonNilEvaluatingFirstTail() {
final Stream<Integer> stream = this.of(1, 2, 3);
stream.tail(); // evaluates second head element
assertThat(stream.toString()).isEqualTo("Stream(1, 2, ?)");
}
@Test
public void shouldStringifyNonNilAndNilTail() {
final Stream<Integer> stream = this.of(1);
stream.tail(); // evaluates empty tail
assertThat(stream.toString()).isEqualTo("Stream(1)");
}
// -- Serializable
@Test(expected = InvalidObjectException.class)
public void shouldNotSerializeEnclosingClassOfCons() throws Throwable {
Serializables.callReadObject(Stream.cons(1, Stream::empty));
}
@Test(expected = InvalidObjectException.class)
public void shouldNotDeserializeStreamWithSizeLessThanOne() throws Throwable {
try {
/*
* This implementation is stable regarding jvm impl changes of object serialization. The index of the number
* of Stream elements is gathered dynamically.
*/
final byte[] listWithOneElement = Serializables.serialize(Stream.of(0));
final byte[] listWithTwoElements = Serializables.serialize(Stream.of(0, 0));
int index = -1;
for (int i = 0; i < listWithOneElement.length && index == -1; i++) {
final byte b1 = listWithOneElement[i];
final byte b2 = listWithTwoElements[i];
if (b1 != b2) {
if (b1 != 1 || b2 != 2) {
throw new IllegalStateException("Difference does not indicate number of elements.");
} else {
index = i;
}
}
}
if (index == -1) {
throw new IllegalStateException("Hack incomplete - index not found");
}
/*
* Hack the serialized data and fake zero elements.
*/
listWithOneElement[index] = 0;
Serializables.deserialize(listWithOneElement);
} catch (IllegalStateException x) {
throw (x.getCause() != null) ? x.getCause() : x;
}
}
@Override
protected boolean useIsEqualToInsteadOfIsSameAs() {
return true;
}
@Test
public void shouldEvaluateTailAtMostOnce() {
final int[] counter = { 0 };
final Stream<Integer> stream = Stream.gen(() -> counter[0]++);
// this test ensures that the `tail.append(100)` does not modify the tail elements
final Stream<Integer> tail = stream.tail().append(100);
final String expected = stream.drop(1).take(3).mkString(",");
final String actual = tail.take(3).mkString(",");
assertThat(expected).isEqualTo("1,2,3");
assertThat(actual).isEqualTo(expected);
}
@Ignore
@Test
public void shouldNotProduceStackOverflow() {
Stream.range(0, 1_000_000)
.map(String::valueOf)
.foldLeft(Stream.<String> empty(), Stream::append)
.mkString();
}
@Test // See #327, #594
public void shouldNotEvaluateHeadOfTailWhenCallingIteratorHasNext() {
final Integer[] vals = new Integer[] { 1, 2, 3, 4, 5, 6, 7, 8, 9 };
final StringBuilder actual = new StringBuilder();
flatTryWithJavaslangStream(vals, i -> doStuff(i, actual));
final StringBuilder expected = new StringBuilder();
flatTryWithJavaStream(vals, i -> doStuff(i, expected));
assertThat(actual.toString()).isEqualTo(expected.toString());
}
private Try<Void> flatTryWithJavaslangStream(Integer[] vals, Try.CheckedConsumer<Integer> func) {
return Stream.of(vals)
.map(v -> Try.run(() -> func.accept(v)))
.find(Try::isFailure)
.getOrElse(() -> Try.success(null));
}
private Try<Void> flatTryWithJavaStream(Integer[] vals, Try.CheckedConsumer<Integer> func) {
return java.util.stream.Stream.of(vals)
.map(v -> Try.run(() -> func.accept(v)))
.filter(Try::isFailure)
.findFirst()
.orElseGet(() -> Try.success(null));
}
private String doStuff(int i, StringBuilder builder) throws Exception {
builder.append(i);
if (i == 5) {
throw new Exception("Some error !!!");
}
return i + " Value";
}
}
| 1 | 7,329 | I'm really impressed by the AppendElements addition! Very cool! :-) | vavr-io-vavr | java |
@@ -230,7 +230,7 @@ public class GridLauncherV3 {
}
configureLogging(common.getLog(), common.getDebug());
- log.info(version());
+ log.finest(version());
return true;
}
| 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.grid.selenium;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Joiner;
import com.google.common.collect.ImmutableMap;
import com.beust.jcommander.JCommander;
import org.openqa.grid.common.GridRole;
import org.openqa.grid.internal.cli.CommonCliOptions;
import org.openqa.grid.internal.cli.GridHubCliOptions;
import org.openqa.grid.internal.cli.GridNodeCliOptions;
import org.openqa.grid.internal.cli.StandaloneCliOptions;
import org.openqa.grid.internal.utils.SelfRegisteringRemote;
import org.openqa.grid.internal.utils.configuration.GridHubConfiguration;
import org.openqa.grid.internal.utils.configuration.GridNodeConfiguration;
import org.openqa.grid.internal.utils.configuration.StandaloneConfiguration;
import org.openqa.grid.shared.Stoppable;
import org.openqa.grid.web.Hub;
import org.openqa.selenium.BuildInfo;
import org.openqa.selenium.grid.log.TerseFormatter;
import org.openqa.selenium.net.PortProber;
import org.openqa.selenium.remote.server.SeleniumServer;
import org.openqa.selenium.remote.server.log.LoggingOptions;
import java.io.File;
import java.io.IOException;
import java.io.PrintStream;
import java.util.Arrays;
import java.util.Map;
import java.util.Optional;
import java.util.logging.ConsoleHandler;
import java.util.logging.FileHandler;
import java.util.logging.Handler;
import java.util.logging.Level;
import java.util.logging.Logger;
public class GridLauncherV3 {
private static final Logger log = Logger.getLogger(GridLauncherV3.class.getName());
private static final BuildInfo buildInfo = new BuildInfo();
private PrintStream out;
@FunctionalInterface
private interface GridItemLauncher {
Stoppable launch(String[] args);
}
private Map<GridRole, GridItemLauncher> LAUNCHERS = buildLaunchers();
public static void main(String[] args) {
new GridLauncherV3().launch(args);
}
public GridLauncherV3() {
this(System.out);
}
@VisibleForTesting
public GridLauncherV3(PrintStream out) {
this.out = out;
System.setProperty("org.seleniumhq.jetty9.LEVEL", "WARN");
}
public Stoppable launch(String[] args) {
return Optional.ofNullable(buildLauncher(args))
.map(l -> l.launch(args))
.orElse(()->{});
}
/**
* From the {@code args}, builds a new {@link GridItemLauncher} and populates it properly.
*
* @return null if no role is found, or a properly populated {@link GridItemLauncher}.
*/
private GridItemLauncher buildLauncher(String[] args) {
if (Arrays.asList(args).contains("-htmlSuite")) {
out.println(Joiner.on("\n").join(
"Download the Selenium HTML Runner from http://www.seleniumhq.org/download/ and",
"use that to run your HTML suite."));
return null;
}
String role = "standalone";
for (int i = 0; i < args.length; i++) {
if (args[i].startsWith("-role=")) {
role = args[i].substring("-role=".length());
} else if (args[i].equals("-role")) {
i++; // Increment, because we're going to need this.
if (i < args.length) {
role = args[i];
} else {
role = null; // Will cause us to print the usage information.
}
}
}
GridRole gridRole = GridRole.get(role);
if (gridRole == null || LAUNCHERS.get(gridRole) == null) {
printInfoAboutRoles(role);
return null;
}
return LAUNCHERS.get(gridRole);
}
private void printInfoAboutRoles(String roleCommandLineArg) {
if (roleCommandLineArg != null) {
printWrappedLine(
"",
"Error: the role '" + roleCommandLineArg +
"' does not match a recognized server role: node/hub/standalone\n");
} else {
printWrappedLine(
"",
"Error: -role option needs to be followed by the value that defines role of this " +
"component in the grid\n");
}
out.println(
"Selenium server can run in one of the following roles:\n" +
" hub as a hub of a Selenium grid\n" +
" node as a node of a Selenium grid\n" +
" standalone as a standalone server not being a part of a grid\n" +
"\n" +
"If -role option is omitted the server runs standalone\n");
printWrappedLine(
"",
"To get help on the options available for a specific role run the server" +
" with -help option and the corresponding -role option value");
}
private void printWrappedLine(String prefix, String msg) {
printWrappedLine(prefix, msg, true);
}
private void printWrappedLine(String prefix, String msg, boolean first) {
out.print(prefix);
if (!first) {
out.print(" ");
}
int defaultWrap = 70;
int wrap = defaultWrap - prefix.length();
if (wrap > msg.length()) {
out.println(msg);
return;
}
String lineRaw = msg.substring(0, wrap);
int spaceIndex = lineRaw.lastIndexOf(' ');
if (spaceIndex == -1) {
spaceIndex = lineRaw.length();
}
String line = lineRaw.substring(0, spaceIndex);
out.println(line);
printWrappedLine(prefix, msg.substring(spaceIndex + 1), false);
}
private static void configureLogging(String log, boolean debug) {
Level logLevel = debug ? Level.FINE : LoggingOptions.getDefaultLogLevel();
if (logLevel == null) {
logLevel = Level.INFO;
}
Logger.getLogger("").setLevel(logLevel);
String logFilename = log != null ? log : LoggingOptions.getDefaultLogOutFile();
if (logFilename != null) {
for (Handler handler : Logger.getLogger("").getHandlers()) {
if (handler instanceof ConsoleHandler) {
Logger.getLogger("").removeHandler(handler);
}
}
try {
Handler logFile = new FileHandler(new File(logFilename).getAbsolutePath(), true);
logFile.setFormatter(new TerseFormatter());
logFile.setLevel(logLevel);
Logger.getLogger("").addHandler(logFile);
} catch (IOException e) {
throw new RuntimeException(e);
}
} else {
for (Handler handler : Logger.getLogger("").getHandlers()) {
if (handler instanceof ConsoleHandler) {
handler.setLevel(logLevel);
handler.setFormatter(new TerseFormatter());
}
}
}
}
private String version() {
return String.format(
"Selenium server version: %s, revision: %s",
buildInfo.getReleaseLabel(),
buildInfo.getBuildRevision());
}
private boolean parse(String[] args, Object options, CommonCliOptions common) {
JCommander commander = JCommander.newBuilder().addObject(options).build();
commander.parse(args);
if (common.getVersion()) {
out.println(version());
return false;
}
if (common.getHelp()) {
StringBuilder toPrint = new StringBuilder();
commander.usage(toPrint);
out.append(toPrint);
return false;
}
configureLogging(common.getLog(), common.getDebug());
log.info(version());
return true;
}
private Map<GridRole, GridItemLauncher> buildLaunchers() {
return ImmutableMap.<GridRole, GridItemLauncher>builder()
.put(GridRole.NOT_GRID, (args) -> {
StandaloneCliOptions options = new StandaloneCliOptions();
if (!parse(args, options, options.getCommonOptions())) {
return ()->{};
}
StandaloneConfiguration configuration = new StandaloneConfiguration(options);
log.info(String.format(
"Launching a standalone Selenium Server on port %s", configuration.port));
SeleniumServer server = new SeleniumServer(configuration);
server.boot();
return server;
})
.put(GridRole.HUB, (args) -> {
GridHubCliOptions options = new GridHubCliOptions();
if (!parse(args, options, options.getCommonGridOptions().getCommonOptions())) {
return ()->{};
}
GridHubConfiguration configuration = new GridHubConfiguration(options);
configuration.setRawArgs(args); // for grid console
log.info(String.format(
"Launching Selenium Grid hub on port %s", configuration.port));
Hub hub = new Hub(configuration);
hub.start();
return hub;
})
.put(GridRole.NODE, (args) -> {
GridNodeCliOptions options = new GridNodeCliOptions();
if (!parse(args, options, options.getCommonGridOptions().getCommonOptions())) {
return ()->{};
}
GridNodeConfiguration configuration = new GridNodeConfiguration(options);
if (configuration.port == null || configuration.port == -1) {
configuration.port = PortProber.findFreePort();
}
log.info(String.format(
"Launching a Selenium Grid node on port %s", configuration.port));
SelfRegisteringRemote remote = new SelfRegisteringRemote(configuration);
SeleniumServer server = new SeleniumServer(remote.getConfiguration());
remote.setRemoteServer(server);
if (remote.startRemoteServer()) {
log.info("Selenium Grid node is up and ready to register to the hub");
remote.startRegistrationProcess();
}
return server;
})
.build();
}
}
| 1 | 16,456 | This change means that users can't easily see which version of the selenium server they're using. This is `info` level information. | SeleniumHQ-selenium | js |
@@ -41,6 +41,8 @@ from qutebrowser.misc import editor, guiprocess
from qutebrowser.completion.models import urlmodel, miscmodels
from qutebrowser.mainwindow import mainwindow
+# WORKAROUND for https://bugreports.qt.io/browse/QTBUG-69904
+MAX_WORLD_ID = 256 if qtutils.version_check('5.11.2') else 11
class CommandDispatcher:
| 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2018 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Command dispatcher for TabbedBrowser."""
import os
import os.path
import shlex
import functools
import typing
from PyQt5.QtWidgets import QApplication, QTabBar
from PyQt5.QtCore import pyqtSlot, Qt, QUrl, QEvent, QUrlQuery
from PyQt5.QtPrintSupport import QPrintPreviewDialog
from qutebrowser.commands import userscripts, cmdexc, cmdutils, runners
from qutebrowser.config import config, configdata
from qutebrowser.browser import (urlmarks, browsertab, inspector, navigate,
webelem, downloads)
from qutebrowser.keyinput import modeman, keyutils
from qutebrowser.utils import (message, usertypes, log, qtutils, urlutils,
objreg, utils, standarddir)
from qutebrowser.utils.usertypes import KeyMode
from qutebrowser.misc import editor, guiprocess
from qutebrowser.completion.models import urlmodel, miscmodels
from qutebrowser.mainwindow import mainwindow
class CommandDispatcher:
"""Command dispatcher for TabbedBrowser.
Contains all commands which are related to the current tab.
We can't simply add these commands to BrowserTab directly and use
currentWidget() for TabbedBrowser.cmd because at the time
cmdutils.register() decorators are run, currentWidget() will return None.
Attributes:
_win_id: The window ID the CommandDispatcher is associated with.
_tabbed_browser: The TabbedBrowser used.
"""
def __init__(self, win_id, tabbed_browser):
self._win_id = win_id
self._tabbed_browser = tabbed_browser
def __repr__(self):
return utils.get_repr(self)
def _new_tabbed_browser(self, private):
"""Get a tabbed-browser from a new window."""
new_window = mainwindow.MainWindow(private=private)
new_window.show()
return new_window.tabbed_browser
def _count(self):
"""Convenience method to get the widget count."""
return self._tabbed_browser.widget.count()
def _set_current_index(self, idx):
"""Convenience method to set the current widget index."""
cmdutils.check_overflow(idx, 'int')
self._tabbed_browser.widget.setCurrentIndex(idx)
def _current_index(self):
"""Convenience method to get the current widget index."""
return self._tabbed_browser.widget.currentIndex()
def _current_url(self):
"""Convenience method to get the current url."""
try:
return self._tabbed_browser.current_url()
except qtutils.QtValueError as e:
msg = "Current URL is invalid"
if e.reason:
msg += " ({})".format(e.reason)
msg += "!"
raise cmdexc.CommandError(msg)
def _current_title(self):
"""Convenience method to get the current title."""
return self._current_widget().title()
def _current_widget(self):
"""Get the currently active widget from a command."""
widget = self._tabbed_browser.widget.currentWidget()
if widget is None:
raise cmdexc.CommandError("No WebView available yet!")
return widget
def _open(self, url, tab=False, background=False, window=False,
related=False, private=None):
"""Helper function to open a page.
Args:
url: The URL to open as QUrl.
tab: Whether to open in a new tab.
background: Whether to open in the background.
window: Whether to open in a new window
private: If opening a new window, open it in private browsing mode.
If not given, inherit the current window's mode.
"""
urlutils.raise_cmdexc_if_invalid(url)
tabbed_browser = self._tabbed_browser
cmdutils.check_exclusive((tab, background, window, private), 'tbwp')
if window and private is None:
private = self._tabbed_browser.private
if window or private:
tabbed_browser = self._new_tabbed_browser(private)
tabbed_browser.tabopen(url)
elif tab:
tabbed_browser.tabopen(url, background=False, related=related)
elif background:
tabbed_browser.tabopen(url, background=True, related=related)
else:
widget = self._current_widget()
widget.openurl(url)
def _cntwidget(self, count=None):
"""Return a widget based on a count/idx.
Args:
count: The tab index, or None.
Return:
The current widget if count is None.
The widget with the given tab ID if count is given.
None if no widget was found.
"""
if count is None:
return self._tabbed_browser.widget.currentWidget()
elif 1 <= count <= self._count():
cmdutils.check_overflow(count + 1, 'int')
return self._tabbed_browser.widget.widget(count - 1)
else:
return None
def _tab_focus_last(self, *, show_error=True):
"""Select the tab which was last focused."""
try:
tab = objreg.get('last-focused-tab', scope='window',
window=self._win_id)
except KeyError:
if not show_error:
return
raise cmdexc.CommandError("No last focused tab!")
idx = self._tabbed_browser.widget.indexOf(tab)
if idx == -1:
raise cmdexc.CommandError("Last focused tab vanished!")
self._set_current_index(idx)
def _get_selection_override(self, prev, next_, opposite):
"""Helper function for tab_close to get the tab to select.
Args:
prev: Force selecting the tab before the current tab.
next_: Force selecting the tab after the current tab.
opposite: Force selecting the tab in the opposite direction of
what's configured in 'tabs.select_on_remove'.
Return:
QTabBar.SelectLeftTab, QTabBar.SelectRightTab, or None if no change
should be made.
"""
cmdutils.check_exclusive((prev, next_, opposite), 'pno')
if prev:
return QTabBar.SelectLeftTab
elif next_:
return QTabBar.SelectRightTab
elif opposite:
conf_selection = config.val.tabs.select_on_remove
if conf_selection == QTabBar.SelectLeftTab:
return QTabBar.SelectRightTab
elif conf_selection == QTabBar.SelectRightTab:
return QTabBar.SelectLeftTab
elif conf_selection == QTabBar.SelectPreviousTab:
raise cmdexc.CommandError(
"-o is not supported with 'tabs.select_on_remove' set to "
"'last-used'!")
else: # pragma: no cover
raise ValueError("Invalid select_on_remove value "
"{!r}!".format(conf_selection))
return None
def _tab_close(self, tab, prev=False, next_=False, opposite=False):
"""Helper function for tab_close be able to handle message.async.
Args:
tab: Tab object to select be closed.
prev: Force selecting the tab before the current tab.
next_: Force selecting the tab after the current tab.
opposite: Force selecting the tab in the opposite direction of
what's configured in 'tabs.select_on_remove'.
count: The tab index to close, or None
"""
tabbar = self._tabbed_browser.widget.tabBar()
selection_override = self._get_selection_override(prev, next_,
opposite)
if selection_override is None:
self._tabbed_browser.close_tab(tab)
else:
old_selection_behavior = tabbar.selectionBehaviorOnRemove()
tabbar.setSelectionBehaviorOnRemove(selection_override)
self._tabbed_browser.close_tab(tab)
tabbar.setSelectionBehaviorOnRemove(old_selection_behavior)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def tab_close(self, prev=False, next_=False, opposite=False,
force=False, count=None):
"""Close the current/[count]th tab.
Args:
prev: Force selecting the tab before the current tab.
next_: Force selecting the tab after the current tab.
opposite: Force selecting the tab in the opposite direction of
what's configured in 'tabs.select_on_remove'.
force: Avoid confirmation for pinned tabs.
count: The tab index to close, or None
"""
tab = self._cntwidget(count)
if tab is None:
return
close = functools.partial(self._tab_close, tab, prev,
next_, opposite)
self._tabbed_browser.tab_close_prompt_if_pinned(tab, force, close)
@cmdutils.register(instance='command-dispatcher', scope='window',
name='tab-pin')
@cmdutils.argument('count', count=True)
def tab_pin(self, count=None):
"""Pin/Unpin the current/[count]th tab.
Pinning a tab shrinks it to the size of its title text.
Attempting to close a pinned tab will cause a confirmation,
unless --force is passed.
Args:
count: The tab index to pin or unpin, or None
"""
tab = self._cntwidget(count)
if tab is None:
return
to_pin = not tab.data.pinned
self._tabbed_browser.widget.set_tab_pinned(tab, to_pin)
@cmdutils.register(instance='command-dispatcher', name='open',
maxsplit=0, scope='window')
@cmdutils.argument('url', completion=urlmodel.url)
@cmdutils.argument('count', count=True)
def openurl(self, url=None, related=False,
bg=False, tab=False, window=False, count=None, secure=False,
private=False):
"""Open a URL in the current/[count]th tab.
If the URL contains newlines, each line gets opened in its own tab.
Args:
url: The URL to open.
bg: Open in a new background tab.
tab: Open in a new tab.
window: Open in a new window.
related: If opening a new tab, position the tab as related to the
current one (like clicking on a link).
count: The tab index to open the URL in, or None.
secure: Force HTTPS.
private: Open a new window in private browsing mode.
"""
if url is None:
urls = [config.val.url.default_page]
else:
urls = self._parse_url_input(url)
for i, cur_url in enumerate(urls):
if secure:
cur_url.setScheme('https')
if not window and i > 0:
tab = False
bg = True
if tab or bg or window or private:
self._open(cur_url, tab, bg, window, related=related,
private=private)
else:
curtab = self._cntwidget(count)
if curtab is None:
if count is None:
# We want to open a URL in the current tab, but none
# exists yet.
self._tabbed_browser.tabopen(cur_url)
else:
# Explicit count with a tab that doesn't exist.
return
elif curtab.data.pinned:
message.info("Tab is pinned!")
else:
curtab.openurl(cur_url)
def _parse_url(self, url, *, force_search=False):
"""Parse a URL or quickmark or search query.
Args:
url: The URL to parse.
force_search: Whether to force a search even if the content can be
interpreted as a URL or a path.
Return:
A URL that can be opened.
"""
try:
return objreg.get('quickmark-manager').get(url)
except urlmarks.Error:
try:
return urlutils.fuzzy_url(url, force_search=force_search)
except urlutils.InvalidUrlError as e:
# We don't use cmdexc.CommandError here as this can be
# called async from edit_url
message.error(str(e))
return None
def _parse_url_input(self, url):
"""Parse a URL or newline-separated list of URLs.
Args:
url: The URL or list to parse.
Return:
A list of URLs that can be opened.
"""
if isinstance(url, QUrl):
yield url
return
force_search = False
urllist = [u for u in url.split('\n') if u.strip()]
if (len(urllist) > 1 and not urlutils.is_url(urllist[0]) and
urlutils.get_path_if_valid(urllist[0], check_exists=True)
is None):
urllist = [url]
force_search = True
for cur_url in urllist:
parsed = self._parse_url(cur_url, force_search=force_search)
if parsed is not None:
yield parsed
@cmdutils.register(instance='command-dispatcher', name='reload',
scope='window')
@cmdutils.argument('count', count=True)
def reloadpage(self, force=False, count=None):
"""Reload the current/[count]th tab.
Args:
count: The tab index to reload, or None.
force: Bypass the page cache.
"""
tab = self._cntwidget(count)
if tab is not None:
tab.reload(force=force)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def stop(self, count=None):
"""Stop loading in the current/[count]th tab.
Args:
count: The tab index to stop, or None.
"""
tab = self._cntwidget(count)
if tab is not None:
tab.stop()
def _print_preview(self, tab):
"""Show a print preview."""
def print_callback(ok):
if not ok:
message.error("Printing failed!")
tab.printing.check_preview_support()
diag = QPrintPreviewDialog(tab)
diag.setAttribute(Qt.WA_DeleteOnClose)
diag.setWindowFlags(diag.windowFlags() | Qt.WindowMaximizeButtonHint |
Qt.WindowMinimizeButtonHint)
diag.paintRequested.connect(functools.partial(
tab.printing.to_printer, callback=print_callback))
diag.exec_()
def _print_pdf(self, tab, filename):
"""Print to the given PDF file."""
tab.printing.check_pdf_support()
filename = os.path.expanduser(filename)
directory = os.path.dirname(filename)
if directory and not os.path.exists(directory):
os.mkdir(directory)
tab.printing.to_pdf(filename)
log.misc.debug("Print to file: {}".format(filename))
@cmdutils.register(instance='command-dispatcher', name='print',
scope='window')
@cmdutils.argument('count', count=True)
@cmdutils.argument('pdf', flag='f', metavar='file')
def printpage(self, preview=False, count=None, *, pdf=None):
"""Print the current/[count]th tab.
Args:
preview: Show preview instead of printing.
count: The tab index to print, or None.
pdf: The file path to write the PDF to.
"""
tab = self._cntwidget(count)
if tab is None:
return
try:
if preview:
self._print_preview(tab)
elif pdf:
self._print_pdf(tab, pdf)
else:
tab.printing.show_dialog()
except browsertab.WebTabError as e:
raise cmdexc.CommandError(e)
@cmdutils.register(instance='command-dispatcher', scope='window')
def tab_clone(self, bg=False, window=False):
"""Duplicate the current tab.
Args:
bg: Open in a background tab.
window: Open in a new window.
Return:
The new QWebView.
"""
cmdutils.check_exclusive((bg, window), 'bw')
curtab = self._current_widget()
cur_title = self._tabbed_browser.widget.page_title(
self._current_index())
try:
history = curtab.history.serialize()
except browsertab.WebTabError as e:
raise cmdexc.CommandError(e)
# The new tab could be in a new tabbed_browser (e.g. because of
# tabs.tabs_are_windows being set)
if window:
new_tabbed_browser = self._new_tabbed_browser(
private=self._tabbed_browser.private)
else:
new_tabbed_browser = self._tabbed_browser
newtab = new_tabbed_browser.tabopen(background=bg)
new_tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=newtab.win_id)
idx = new_tabbed_browser.widget.indexOf(newtab)
new_tabbed_browser.widget.set_page_title(idx, cur_title)
if curtab.data.should_show_icon():
new_tabbed_browser.widget.setTabIcon(idx, curtab.icon())
if config.val.tabs.tabs_are_windows:
new_tabbed_browser.widget.window().setWindowIcon(curtab.icon())
newtab.data.keep_icon = True
newtab.history.deserialize(history)
newtab.zoom.set_factor(curtab.zoom.factor())
new_tabbed_browser.widget.set_tab_pinned(newtab, curtab.data.pinned)
return newtab
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('index', completion=miscmodels.other_buffer)
def tab_take(self, index):
"""Take a tab from another window.
Args:
index: The [win_id/]index of the tab to take. Or a substring
in which case the closest match will be taken.
"""
tabbed_browser, tab = self._resolve_buffer_index(index)
if tabbed_browser is self._tabbed_browser:
raise cmdexc.CommandError("Can't take a tab from the same window")
self._open(tab.url(), tab=True)
tabbed_browser.close_tab(tab, add_undo=False)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('win_id', completion=miscmodels.window)
@cmdutils.argument('count', count=True)
def tab_give(self, win_id: int = None, count=None):
"""Give the current tab to a new or existing window if win_id given.
If no win_id is given, the tab will get detached into a new window.
Args:
win_id: The window ID of the window to give the current tab to.
count: Overrides win_id (index starts at 1 for win_id=0).
"""
if count is not None:
win_id = count - 1
if win_id == self._win_id:
raise cmdexc.CommandError("Can't give a tab to the same window")
if win_id is None:
if self._count() < 2:
raise cmdexc.CommandError("Cannot detach from a window with "
"only one tab")
tabbed_browser = self._new_tabbed_browser(
private=self._tabbed_browser.private)
else:
if win_id not in objreg.window_registry:
raise cmdexc.CommandError(
"There's no window with id {}!".format(win_id))
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=win_id)
tabbed_browser.tabopen(self._current_url())
self._tabbed_browser.close_tab(self._current_widget(), add_undo=False)
def _back_forward(self, tab, bg, window, count, forward):
"""Helper function for :back/:forward."""
history = self._current_widget().history
# Catch common cases before e.g. cloning tab
if not forward and not history.can_go_back():
raise cmdexc.CommandError("At beginning of history.")
elif forward and not history.can_go_forward():
raise cmdexc.CommandError("At end of history.")
if tab or bg or window:
widget = self.tab_clone(bg, window)
else:
widget = self._current_widget()
try:
if forward:
widget.history.forward(count)
else:
widget.history.back(count)
except browsertab.WebTabError as e:
raise cmdexc.CommandError(e)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def back(self, tab=False, bg=False, window=False, count=1):
"""Go back in the history of the current tab.
Args:
tab: Go back in a new tab.
bg: Go back in a background tab.
window: Go back in a new window.
count: How many pages to go back.
"""
self._back_forward(tab, bg, window, count, forward=False)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def forward(self, tab=False, bg=False, window=False, count=1):
"""Go forward in the history of the current tab.
Args:
tab: Go forward in a new tab.
bg: Go forward in a background tab.
window: Go forward in a new window.
count: How many pages to go forward.
"""
self._back_forward(tab, bg, window, count, forward=True)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('where', choices=['prev', 'next', 'up', 'increment',
'decrement'])
@cmdutils.argument('count', count=True)
def navigate(self, where: str, tab=False, bg=False, window=False, count=1):
"""Open typical prev/next links or navigate using the URL path.
This tries to automatically click on typical _Previous Page_ or
_Next Page_ links using some heuristics.
Alternatively it can navigate by changing the current URL.
Args:
where: What to open.
- `prev`: Open a _previous_ link.
- `next`: Open a _next_ link.
- `up`: Go up a level in the current URL.
- `increment`: Increment the last number in the URL.
Uses the
link:settings.html#url.incdec_segments[url.incdec_segments]
config option.
- `decrement`: Decrement the last number in the URL.
Uses the
link:settings.html#url.incdec_segments[url.incdec_segments]
config option.
tab: Open in a new tab.
bg: Open in a background tab.
window: Open in a new window.
count: For `increment` and `decrement`, the number to change the
URL by. For `up`, the number of levels to go up in the URL.
"""
# save the pre-jump position in the special ' mark
self.set_mark("'")
cmdutils.check_exclusive((tab, bg, window), 'tbw')
widget = self._current_widget()
url = self._current_url().adjusted(QUrl.RemoveFragment)
handlers = {
'prev': functools.partial(navigate.prevnext, prev=True),
'next': functools.partial(navigate.prevnext, prev=False),
'up': navigate.path_up,
'decrement': functools.partial(navigate.incdec,
inc_or_dec='decrement'),
'increment': functools.partial(navigate.incdec,
inc_or_dec='increment'),
}
try:
if where in ['prev', 'next']:
handler = handlers[where]
handler(browsertab=widget, win_id=self._win_id, baseurl=url,
tab=tab, background=bg, window=window)
elif where in ['up', 'increment', 'decrement']:
new_url = handlers[where](url, count)
self._open(new_url, tab, bg, window, related=True)
else: # pragma: no cover
raise ValueError("Got called with invalid value {} for "
"`where'.".format(where))
except navigate.Error as e:
raise cmdexc.CommandError(e)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def scroll_px(self, dx: int, dy: int, count=1):
"""Scroll the current tab by 'count * dx/dy' pixels.
Args:
dx: How much to scroll in x-direction.
dy: How much to scroll in y-direction.
count: multiplier
"""
dx *= count
dy *= count
cmdutils.check_overflow(dx, 'int')
cmdutils.check_overflow(dy, 'int')
self._current_widget().scroller.delta(dx, dy)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def scroll(self, direction: typing.Union[str, int], count=1):
"""Scroll the current tab in the given direction.
Note you can use `:run-with-count` to have a keybinding with a bigger
scroll increment.
Args:
direction: In which direction to scroll
(up/down/left/right/top/bottom).
count: multiplier
"""
tab = self._current_widget()
funcs = {
'up': tab.scroller.up,
'down': tab.scroller.down,
'left': tab.scroller.left,
'right': tab.scroller.right,
'top': tab.scroller.top,
'bottom': tab.scroller.bottom,
'page-up': tab.scroller.page_up,
'page-down': tab.scroller.page_down,
}
try:
func = funcs[direction]
except KeyError:
expected_values = ', '.join(sorted(funcs))
raise cmdexc.CommandError("Invalid value {!r} for direction - "
"expected one of: {}".format(
direction, expected_values))
if direction in ['top', 'bottom']:
func()
else:
func(count=count)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
@cmdutils.argument('horizontal', flag='x')
def scroll_to_perc(self, perc: float = None, horizontal=False, count=None):
"""Scroll to a specific percentage of the page.
The percentage can be given either as argument or as count.
If no percentage is given, the page is scrolled to the end.
Args:
perc: Percentage to scroll.
horizontal: Scroll horizontally instead of vertically.
count: Percentage to scroll.
"""
# save the pre-jump position in the special ' mark
self.set_mark("'")
if perc is None and count is None:
perc = 100
elif count is not None:
perc = count
if horizontal:
x = perc
y = None
else:
x = None
y = perc
self._current_widget().scroller.to_perc(x, y)
@cmdutils.register(instance='command-dispatcher', scope='window')
def scroll_to_anchor(self, name):
"""Scroll to the given anchor in the document.
Args:
name: The anchor to scroll to.
"""
self._current_widget().scroller.to_anchor(name)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
@cmdutils.argument('top_navigate', metavar='ACTION',
choices=('prev', 'decrement'))
@cmdutils.argument('bottom_navigate', metavar='ACTION',
choices=('next', 'increment'))
def scroll_page(self, x: float, y: float, *,
top_navigate: str = None, bottom_navigate: str = None,
count=1):
"""Scroll the frame page-wise.
Args:
x: How many pages to scroll to the right.
y: How many pages to scroll down.
bottom_navigate: :navigate action (next, increment) to run when
scrolling down at the bottom of the page.
top_navigate: :navigate action (prev, decrement) to run when
scrolling up at the top of the page.
count: multiplier
"""
tab = self._current_widget()
if not tab.url().isValid():
# See https://github.com/qutebrowser/qutebrowser/issues/701
return
if bottom_navigate is not None and tab.scroller.at_bottom():
self.navigate(bottom_navigate)
return
elif top_navigate is not None and tab.scroller.at_top():
self.navigate(top_navigate)
return
try:
tab.scroller.delta_page(count * x, count * y)
except OverflowError:
raise cmdexc.CommandError(
"Numeric argument is too large for internal int "
"representation.")
def _yank_url(self, what):
"""Helper method for yank() to get the URL to copy."""
assert what in ['url', 'pretty-url'], what
flags = QUrl.RemovePassword
if what == 'pretty-url':
flags |= QUrl.DecodeReserved
else:
flags |= QUrl.FullyEncoded
url = QUrl(self._current_url())
url_query = QUrlQuery()
url_query_str = urlutils.query_string(url)
if '&' not in url_query_str and ';' in url_query_str:
url_query.setQueryDelimiters('=', ';')
url_query.setQuery(url_query_str)
for key in dict(url_query.queryItems()):
if key in config.val.url.yank_ignored_parameters:
url_query.removeQueryItem(key)
url.setQuery(url_query)
return url.toString(flags)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('what', choices=['selection', 'url', 'pretty-url',
'title', 'domain'])
def yank(self, what='url', sel=False, keep=False):
"""Yank something to the clipboard or primary selection.
Args:
what: What to yank.
- `url`: The current URL.
- `pretty-url`: The URL in pretty decoded form.
- `title`: The current page's title.
- `domain`: The current scheme, domain, and port number.
- `selection`: The selection under the cursor.
sel: Use the primary selection instead of the clipboard.
keep: Stay in visual mode after yanking the selection.
"""
if what == 'title':
s = self._tabbed_browser.widget.page_title(self._current_index())
elif what == 'domain':
port = self._current_url().port()
s = '{}://{}{}'.format(self._current_url().scheme(),
self._current_url().host(),
':' + str(port) if port > -1 else '')
elif what in ['url', 'pretty-url']:
s = self._yank_url(what)
what = 'URL' # For printing
elif what == 'selection':
def _selection_callback(s):
if not s:
message.info("Nothing to yank")
return
self._yank_to_target(s, sel, what, keep)
caret = self._current_widget().caret
caret.selection(callback=_selection_callback)
return
else: # pragma: no cover
raise ValueError("Invalid value {!r} for `what'.".format(what))
self._yank_to_target(s, sel, what, keep)
def _yank_to_target(self, s, sel, what, keep):
if sel and utils.supports_selection():
target = "primary selection"
else:
sel = False
target = "clipboard"
utils.set_clipboard(s, selection=sel)
if what != 'selection':
message.info("Yanked {} to {}: {}".format(what, target, s))
else:
message.info("{} {} yanked to {}".format(
len(s), "char" if len(s) == 1 else "chars", target))
if not keep:
modeman.leave(self._win_id, KeyMode.caret, "yank selected",
maybe=True)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def zoom_in(self, count=1):
"""Increase the zoom level for the current tab.
Args:
count: How many steps to zoom in.
"""
tab = self._current_widget()
try:
perc = tab.zoom.offset(count)
except ValueError as e:
raise cmdexc.CommandError(e)
message.info("Zoom level: {}%".format(int(perc)), replace=True)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def zoom_out(self, count=1):
"""Decrease the zoom level for the current tab.
Args:
count: How many steps to zoom out.
"""
tab = self._current_widget()
try:
perc = tab.zoom.offset(-count)
except ValueError as e:
raise cmdexc.CommandError(e)
message.info("Zoom level: {}%".format(int(perc)), replace=True)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def zoom(self, zoom=None, count=None):
"""Set the zoom level for the current tab.
The zoom can be given as argument or as [count]. If neither is
given, the zoom is set to the default zoom. If both are given,
use [count].
Args:
zoom: The zoom percentage to set.
count: The zoom percentage to set.
"""
if zoom is not None:
try:
zoom = int(zoom.rstrip('%'))
except ValueError:
raise cmdexc.CommandError("zoom: Invalid int value {}"
.format(zoom))
level = count if count is not None else zoom
if level is None:
level = config.val.zoom.default
tab = self._current_widget()
try:
tab.zoom.set_factor(float(level) / 100)
except ValueError:
raise cmdexc.CommandError("Can't zoom {}%!".format(level))
message.info("Zoom level: {}%".format(int(level)), replace=True)
@cmdutils.register(instance='command-dispatcher', scope='window')
def tab_only(self, prev=False, next_=False, force=False):
"""Close all tabs except for the current one.
Args:
prev: Keep tabs before the current.
next_: Keep tabs after the current.
force: Avoid confirmation for pinned tabs.
"""
cmdutils.check_exclusive((prev, next_), 'pn')
cur_idx = self._tabbed_browser.widget.currentIndex()
assert cur_idx != -1
def _to_close(i):
"""Helper method to check if a tab should be closed or not."""
return not (i == cur_idx or
(prev and i < cur_idx) or
(next_ and i > cur_idx))
# close as many tabs as we can
first_tab = True
pinned_tabs_cleanup = False
for i, tab in enumerate(self._tabbed_browser.widgets()):
if _to_close(i):
if force or not tab.data.pinned:
self._tabbed_browser.close_tab(tab, new_undo=first_tab)
first_tab = False
else:
pinned_tabs_cleanup = tab
# Check to see if we would like to close any pinned tabs
if pinned_tabs_cleanup:
self._tabbed_browser.tab_close_prompt_if_pinned(
pinned_tabs_cleanup,
force,
lambda: self.tab_only(
prev=prev, next_=next_, force=True),
text="Are you sure you want to close pinned tabs?")
@cmdutils.register(instance='command-dispatcher', scope='window')
def undo(self):
"""Re-open the last closed tab or tabs."""
try:
self._tabbed_browser.undo()
except IndexError:
raise cmdexc.CommandError("Nothing to undo!")
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def tab_prev(self, count=1):
"""Switch to the previous tab, or switch [count] tabs back.
Args:
count: How many tabs to switch back.
"""
if self._count() == 0:
# Running :tab-prev after last tab was closed
# See https://github.com/qutebrowser/qutebrowser/issues/1448
return
newidx = self._current_index() - count
if newidx >= 0:
self._set_current_index(newidx)
elif config.val.tabs.wrap:
self._set_current_index(newidx % self._count())
else:
log.webview.debug("First tab")
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def tab_next(self, count=1):
"""Switch to the next tab, or switch [count] tabs forward.
Args:
count: How many tabs to switch forward.
"""
if self._count() == 0:
# Running :tab-next after last tab was closed
# See https://github.com/qutebrowser/qutebrowser/issues/1448
return
newidx = self._current_index() + count
if newidx < self._count():
self._set_current_index(newidx)
elif config.val.tabs.wrap:
self._set_current_index(newidx % self._count())
else:
log.webview.debug("Last tab")
def _resolve_buffer_index(self, index):
"""Resolve a buffer index to the tabbedbrowser and tab.
Args:
index: The [win_id/]index of the tab to be selected. Or a substring
in which case the closest match will be focused.
"""
index_parts = index.split('/', 1)
try:
for part in index_parts:
int(part)
except ValueError:
model = miscmodels.buffer()
model.set_pattern(index)
if model.count() > 0:
index = model.data(model.first_item())
index_parts = index.split('/', 1)
else:
raise cmdexc.CommandError(
"No matching tab for: {}".format(index))
if len(index_parts) == 2:
win_id = int(index_parts[0])
idx = int(index_parts[1])
elif len(index_parts) == 1:
idx = int(index_parts[0])
active_win = objreg.get('app').activeWindow()
if active_win is None:
# Not sure how you enter a command without an active window...
raise cmdexc.CommandError(
"No window specified and couldn't find active window!")
win_id = active_win.win_id
if win_id not in objreg.window_registry:
raise cmdexc.CommandError(
"There's no window with id {}!".format(win_id))
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=win_id)
if not 0 < idx <= tabbed_browser.widget.count():
raise cmdexc.CommandError(
"There's no tab with index {}!".format(idx))
return (tabbed_browser, tabbed_browser.widget.widget(idx-1))
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0)
@cmdutils.argument('index', completion=miscmodels.buffer)
@cmdutils.argument('count', count=True)
def buffer(self, index=None, count=None):
"""Select tab by index or url/title best match.
Focuses window if necessary when index is given. If both index and
count are given, use count.
With neither index nor count given, open the qute://tabs page.
Args:
index: The [win_id/]index of the tab to focus. Or a substring
in which case the closest match will be focused.
count: The tab index to focus, starting with 1.
"""
if count is None and index is None:
self.openurl('qute://tabs/', tab=True)
return
if count is not None:
index = str(count)
tabbed_browser, tab = self._resolve_buffer_index(index)
window = tabbed_browser.widget.window()
window.activateWindow()
window.raise_()
tabbed_browser.widget.setCurrentWidget(tab)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('index', choices=['last'])
@cmdutils.argument('count', count=True)
def tab_focus(self, index: typing.Union[str, int] = None,
count=None, no_last=False):
"""Select the tab given as argument/[count].
If neither count nor index are given, it behaves like tab-next.
If both are given, use count.
Args:
index: The tab index to focus, starting with 1. The special value
`last` focuses the last focused tab (regardless of count).
Negative indices count from the end, such that -1 is the
last tab.
count: The tab index to focus, starting with 1.
no_last: Whether to avoid focusing last tab if already focused.
"""
index = count if count is not None else index
if index == 'last':
self._tab_focus_last()
return
elif not no_last and index == self._current_index() + 1:
self._tab_focus_last(show_error=False)
return
elif index is None:
self.tab_next()
return
if index < 0:
index = self._count() + index + 1
if 1 <= index <= self._count():
self._set_current_index(index - 1)
else:
raise cmdexc.CommandError("There's no tab with index {}!".format(
index))
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('index', choices=['+', '-'])
@cmdutils.argument('count', count=True)
def tab_move(self, index: typing.Union[str, int] = None, count=None):
"""Move the current tab according to the argument and [count].
If neither is given, move it to the first position.
Args:
index: `+` or `-` to move relative to the current tab by
count, or a default of 1 space.
A tab index to move to that index.
count: If moving relatively: Offset.
If moving absolutely: New position (default: 0). This
overrides the index argument, if given.
"""
if index in ['+', '-']:
# relative moving
new_idx = self._current_index()
delta = 1 if count is None else count
if index == '-':
new_idx -= delta
elif index == '+': # pragma: no branch
new_idx += delta
if config.val.tabs.wrap:
new_idx %= self._count()
else:
# absolute moving
if count is not None:
new_idx = count - 1
elif index is not None:
new_idx = index - 1 if index >= 0 else index + self._count()
else:
new_idx = 0
if not 0 <= new_idx < self._count():
raise cmdexc.CommandError("Can't move tab to position {}!".format(
new_idx + 1))
cur_idx = self._current_index()
cmdutils.check_overflow(cur_idx, 'int')
cmdutils.check_overflow(new_idx, 'int')
self._tabbed_browser.widget.tabBar().moveTab(cur_idx, new_idx)
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0, no_replace_variables=True)
@cmdutils.argument('count', count=True)
def spawn(self, cmdline, userscript=False, verbose=False,
output=False, detach=False, count=None):
"""Spawn a command in a shell.
Args:
userscript: Run the command as a userscript. You can use an
absolute path, or store the userscript in one of those
locations:
- `~/.local/share/qutebrowser/userscripts`
(or `$XDG_DATA_DIR`)
- `/usr/share/qutebrowser/userscripts`
verbose: Show notifications when the command started/exited.
output: Whether the output should be shown in a new tab.
detach: Whether the command should be detached from qutebrowser.
cmdline: The commandline to execute.
count: Given to userscripts as $QUTE_COUNT.
"""
cmdutils.check_exclusive((userscript, detach), 'ud')
try:
cmd, *args = shlex.split(cmdline)
except ValueError as e:
raise cmdexc.CommandError("Error while splitting command: "
"{}".format(e))
args = runners.replace_variables(self._win_id, args)
log.procs.debug("Executing {} with args {}, userscript={}".format(
cmd, args, userscript))
@pyqtSlot()
def _on_proc_finished():
if output:
tb = objreg.get('tabbed-browser', scope='window',
window='last-focused')
tb.openurl(QUrl('qute://spawn-output'), newtab=True)
if userscript:
def _selection_callback(s):
try:
runner = self._run_userscript(s, cmd, args, verbose, count)
runner.finished.connect(_on_proc_finished)
except cmdexc.CommandError as e:
message.error(str(e))
# ~ expansion is handled by the userscript module.
# dirty hack for async call because of:
# https://bugreports.qt.io/browse/QTBUG-53134
# until it fixed or blocked async call implemented:
# https://github.com/qutebrowser/qutebrowser/issues/3327
caret = self._current_widget().caret
caret.selection(callback=_selection_callback)
else:
cmd = os.path.expanduser(cmd)
proc = guiprocess.GUIProcess(what='command', verbose=verbose,
parent=self._tabbed_browser)
if detach:
proc.start_detached(cmd, args)
else:
proc.start(cmd, args)
proc.finished.connect(_on_proc_finished)
@cmdutils.register(instance='command-dispatcher', scope='window')
def home(self):
"""Open main startpage in current tab."""
self.openurl(config.val.url.start_pages[0])
def _run_userscript(self, selection, cmd, args, verbose, count):
"""Run a userscript given as argument.
Args:
cmd: The userscript to run.
args: Arguments to pass to the userscript.
verbose: Show notifications when the command started/exited.
count: Exposed to the userscript.
"""
env = {
'QUTE_MODE': 'command',
'QUTE_SELECTED_TEXT': selection,
}
if count is not None:
env['QUTE_COUNT'] = str(count)
idx = self._current_index()
if idx != -1:
env['QUTE_TITLE'] = self._tabbed_browser.widget.page_title(idx)
# FIXME:qtwebengine: If tab is None, run_async will fail!
tab = self._tabbed_browser.widget.currentWidget()
try:
url = self._tabbed_browser.current_url()
except qtutils.QtValueError:
pass
else:
env['QUTE_URL'] = url.toString(QUrl.FullyEncoded)
try:
runner = userscripts.run_async(
tab, cmd, *args, win_id=self._win_id, env=env, verbose=verbose)
except userscripts.Error as e:
raise cmdexc.CommandError(e)
return runner
@cmdutils.register(instance='command-dispatcher', scope='window')
def quickmark_save(self):
"""Save the current page as a quickmark."""
quickmark_manager = objreg.get('quickmark-manager')
quickmark_manager.prompt_save(self._current_url())
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0)
@cmdutils.argument('name', completion=miscmodels.quickmark)
def quickmark_load(self, name, tab=False, bg=False, window=False):
"""Load a quickmark.
Args:
name: The name of the quickmark to load.
tab: Load the quickmark in a new tab.
bg: Load the quickmark in a new background tab.
window: Load the quickmark in a new window.
"""
try:
url = objreg.get('quickmark-manager').get(name)
except urlmarks.Error as e:
raise cmdexc.CommandError(str(e))
self._open(url, tab, bg, window)
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0)
@cmdutils.argument('name', completion=miscmodels.quickmark)
def quickmark_del(self, name=None):
"""Delete a quickmark.
Args:
name: The name of the quickmark to delete. If not given, delete the
quickmark for the current page (choosing one arbitrarily
if there are more than one).
"""
quickmark_manager = objreg.get('quickmark-manager')
if name is None:
url = self._current_url()
try:
name = quickmark_manager.get_by_qurl(url)
except urlmarks.DoesNotExistError as e:
raise cmdexc.CommandError(str(e))
try:
quickmark_manager.delete(name)
except KeyError:
raise cmdexc.CommandError("Quickmark '{}' not found!".format(name))
@cmdutils.register(instance='command-dispatcher', scope='window')
def bookmark_add(self, url=None, title=None, toggle=False):
"""Save the current page as a bookmark, or a specific url.
If no url and title are provided, then save the current page as a
bookmark.
If a url and title have been provided, then save the given url as
a bookmark with the provided title.
You can view all saved bookmarks on the
link:qute://bookmarks[bookmarks page].
Args:
url: url to save as a bookmark. If not given, use url of current
page.
title: title of the new bookmark.
toggle: remove the bookmark instead of raising an error if it
already exists.
"""
if url and not title:
raise cmdexc.CommandError('Title must be provided if url has '
'been provided')
bookmark_manager = objreg.get('bookmark-manager')
if not url:
url = self._current_url()
else:
try:
url = urlutils.fuzzy_url(url)
except urlutils.InvalidUrlError as e:
raise cmdexc.CommandError(e)
if not title:
title = self._current_title()
try:
was_added = bookmark_manager.add(url, title, toggle=toggle)
except urlmarks.Error as e:
raise cmdexc.CommandError(str(e))
else:
msg = "Bookmarked {}" if was_added else "Removed bookmark {}"
message.info(msg.format(url.toDisplayString()))
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0)
@cmdutils.argument('url', completion=miscmodels.bookmark)
def bookmark_load(self, url, tab=False, bg=False, window=False,
delete=False):
"""Load a bookmark.
Args:
url: The url of the bookmark to load.
tab: Load the bookmark in a new tab.
bg: Load the bookmark in a new background tab.
window: Load the bookmark in a new window.
delete: Whether to delete the bookmark afterwards.
"""
try:
qurl = urlutils.fuzzy_url(url)
except urlutils.InvalidUrlError as e:
raise cmdexc.CommandError(e)
self._open(qurl, tab, bg, window)
if delete:
self.bookmark_del(url)
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0)
@cmdutils.argument('url', completion=miscmodels.bookmark)
def bookmark_del(self, url=None):
"""Delete a bookmark.
Args:
url: The url of the bookmark to delete. If not given, use the
current page's url.
"""
if url is None:
url = self._current_url().toString(QUrl.RemovePassword |
QUrl.FullyEncoded)
try:
objreg.get('bookmark-manager').delete(url)
except KeyError:
raise cmdexc.CommandError("Bookmark '{}' not found!".format(url))
@cmdutils.register(instance='command-dispatcher', scope='window')
def follow_selected(self, *, tab=False):
"""Follow the selected text.
Args:
tab: Load the selected link in a new tab.
"""
try:
self._current_widget().caret.follow_selected(tab=tab)
except browsertab.WebTabError as e:
raise cmdexc.CommandError(str(e))
@cmdutils.register(instance='command-dispatcher', name='inspector',
scope='window')
def toggle_inspector(self):
"""Toggle the web inspector.
Note: Due a bug in Qt, the inspector will show incorrect request
headers in the network tab.
"""
tab = self._current_widget()
# FIXME:qtwebengine have a proper API for this
page = tab._widget.page() # pylint: disable=protected-access
try:
if tab.data.inspector is None:
tab.data.inspector = inspector.create()
tab.data.inspector.inspect(page)
tab.data.inspector.show()
else:
tab.data.inspector.toggle(page)
except inspector.WebInspectorError as e:
raise cmdexc.CommandError(e)
@cmdutils.register(instance='command-dispatcher', scope='window')
def download(self, url=None, *, mhtml_=False, dest=None):
"""Download a given URL, or current page if no URL given.
Args:
url: The URL to download. If not given, download the current page.
dest: The file path to write the download to, or None to ask.
mhtml_: Download the current page and all assets as mhtml file.
"""
# FIXME:qtwebengine do this with the QtWebEngine download manager?
download_manager = objreg.get('qtnetwork-download-manager')
target = None
if dest is not None:
dest = downloads.transform_path(dest)
if dest is None:
raise cmdexc.CommandError("Invalid target filename")
target = downloads.FileDownloadTarget(dest)
tab = self._current_widget()
user_agent = tab.user_agent()
if url:
if mhtml_:
raise cmdexc.CommandError("Can only download the current page"
" as mhtml.")
url = urlutils.qurl_from_user_input(url)
urlutils.raise_cmdexc_if_invalid(url)
download_manager.get(url, user_agent=user_agent, target=target)
elif mhtml_:
tab = self._current_widget()
if tab.backend == usertypes.Backend.QtWebEngine:
webengine_download_manager = objreg.get(
'webengine-download-manager')
try:
webengine_download_manager.get_mhtml(tab, target)
except browsertab.UnsupportedOperationError as e:
raise cmdexc.CommandError(e)
else:
download_manager.get_mhtml(tab, target)
else:
qnam = tab.networkaccessmanager()
suggested_fn = downloads.suggested_fn_from_title(
self._current_url().path(), tab.title()
)
download_manager.get(
self._current_url(),
user_agent=user_agent,
qnam=qnam,
target=target,
suggested_fn=suggested_fn
)
@cmdutils.register(instance='command-dispatcher', scope='window')
def view_source(self, edit=False, pygments=False):
"""Show the source of the current page in a new tab.
Args:
edit: Edit the source in the editor instead of opening a tab.
pygments: Use pygments to generate the view. This is always
the case for QtWebKit. For QtWebEngine it may display
slightly different source.
Some JavaScript processing may be applied.
"""
tab = self._current_widget()
try:
current_url = self._current_url()
except cmdexc.CommandError as e:
message.error(str(e))
return
if current_url.scheme() == 'view-source' or tab.data.viewing_source:
raise cmdexc.CommandError("Already viewing source!")
if edit:
ed = editor.ExternalEditor(self._tabbed_browser)
tab.dump_async(ed.edit)
else:
tab.action.show_source(pygments)
@cmdutils.register(instance='command-dispatcher', scope='window',
debug=True)
def debug_dump_page(self, dest, plain=False):
"""Dump the current page's content to a file.
Args:
dest: Where to write the file to.
plain: Write plain text instead of HTML.
"""
tab = self._current_widget()
dest = os.path.expanduser(dest)
def callback(data):
"""Write the data to disk."""
try:
with open(dest, 'w', encoding='utf-8') as f:
f.write(data)
except OSError as e:
message.error('Could not write page: {}'.format(e))
else:
message.info("Dumped page to {}.".format(dest))
tab.dump_async(callback, plain=plain)
@cmdutils.register(instance='command-dispatcher', scope='window')
def history(self, tab=True, bg=False, window=False):
"""Show browsing history.
Args:
tab: Open in a new tab.
bg: Open in a background tab.
window: Open in a new window.
"""
url = QUrl('qute://history/')
self._open(url, tab, bg, window)
@cmdutils.register(instance='command-dispatcher', name='help',
scope='window')
@cmdutils.argument('topic', completion=miscmodels.helptopic)
def show_help(self, tab=False, bg=False, window=False, topic=None):
r"""Show help about a command or setting.
Args:
tab: Open in a new tab.
bg: Open in a background tab.
window: Open in a new window.
topic: The topic to show help for.
- :__command__ for commands.
- __section__.__option__ for settings.
"""
if topic is None:
path = 'index.html'
elif topic.startswith(':'):
command = topic[1:]
if command not in cmdutils.cmd_dict:
raise cmdexc.CommandError("Invalid command {}!".format(
command))
path = 'commands.html#{}'.format(command)
elif topic in configdata.DATA:
path = 'settings.html#{}'.format(topic)
else:
raise cmdexc.CommandError("Invalid help topic {}!".format(topic))
url = QUrl('qute://help/{}'.format(path))
self._open(url, tab, bg, window)
@cmdutils.register(instance='command-dispatcher', scope='window')
def messages(self, level='info', plain=False, tab=False, bg=False,
window=False):
"""Show a log of past messages.
Args:
level: Include messages with `level` or higher severity.
Valid values: vdebug, debug, info, warning, error, critical.
plain: Whether to show plaintext (as opposed to html).
tab: Open in a new tab.
bg: Open in a background tab.
window: Open in a new window.
"""
if level.upper() not in log.LOG_LEVELS:
raise cmdexc.CommandError("Invalid log level {}!".format(level))
if plain:
url = QUrl('qute://plainlog?level={}'.format(level))
else:
url = QUrl('qute://log?level={}'.format(level))
self._open(url, tab, bg, window)
def _open_editor_cb(self, elem):
"""Open editor after the focus elem was found in open_editor."""
if elem is None:
message.error("No element focused!")
return
if not elem.is_editable(strict=True):
message.error("Focused element is not editable!")
return
text = elem.value()
if text is None:
message.error("Could not get text from the focused element.")
return
assert isinstance(text, str), text
caret_position = elem.caret_position()
ed = editor.ExternalEditor(watch=True, parent=self._tabbed_browser)
ed.file_updated.connect(functools.partial(
self.on_file_updated, ed, elem))
ed.editing_finished.connect(lambda: mainwindow.raise_window(
objreg.last_focused_window(), alert=False))
ed.edit(text, caret_position)
@cmdutils.register(instance='command-dispatcher', scope='window')
def open_editor(self):
"""Open an external editor with the currently selected form field.
The editor which should be launched can be configured via the
`editor.command` config option.
"""
tab = self._current_widget()
tab.elements.find_focused(self._open_editor_cb)
def on_file_updated(self, ed, elem, text):
"""Write the editor text into the form field and clean up tempfile.
Callback for GUIProcess when the edited text was updated.
Args:
elem: The WebElementWrapper which was modified.
text: The new text to insert.
"""
try:
elem.set_value(text)
except webelem.OrphanedError:
message.error('Edited element vanished')
ed.backup()
except webelem.Error as e:
message.error(str(e))
ed.backup()
@cmdutils.register(instance='command-dispatcher', maxsplit=0,
scope='window')
def insert_text(self, text):
"""Insert text at cursor position.
Args:
text: The text to insert.
"""
tab = self._current_widget()
def _insert_text_cb(elem):
if elem is None:
message.error("No element focused!")
return
try:
elem.insert_text(text)
except webelem.Error as e:
message.error(str(e))
return
tab.elements.find_focused(_insert_text_cb)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('filter_', choices=['id'])
def click_element(self, filter_: str, value, *,
target: usertypes.ClickTarget =
usertypes.ClickTarget.normal,
force_event=False):
"""Click the element matching the given filter.
The given filter needs to result in exactly one element, otherwise, an
error is shown.
Args:
filter_: How to filter the elements.
id: Get an element based on its ID.
value: The value to filter for.
target: How to open the clicked element (normal/tab/tab-bg/window).
force_event: Force generating a fake click event.
"""
tab = self._current_widget()
def single_cb(elem):
"""Click a single element."""
if elem is None:
message.error("No element found with id {}!".format(value))
return
try:
elem.click(target, force_event=force_event)
except webelem.Error as e:
message.error(str(e))
return
# def multiple_cb(elems):
# """Click multiple elements (with only one expected)."""
# if not elems:
# message.error("No element found!")
# return
# elif len(elems) != 1:
# message.error("{} elements found!".format(len(elems)))
# return
# elems[0].click(target)
handlers = {
'id': (tab.elements.find_id, single_cb),
}
handler, callback = handlers[filter_]
handler(value, callback)
def _search_cb(self, found, *, tab, old_scroll_pos, options, text, prev):
"""Callback called from search/search_next/search_prev.
Args:
found: Whether the text was found.
tab: The AbstractTab in which the search was made.
old_scroll_pos: The scroll position (QPoint) before the search.
options: The options (dict) the search was made with.
text: The text searched for.
prev: Whether we're searching backwards (i.e. :search-prev)
"""
# :search/:search-next without reverse -> down
# :search/:search-next with reverse -> up
# :search-prev without reverse -> up
# :search-prev with reverse -> down
going_up = options['reverse'] ^ prev
if found:
# Check if the scroll position got smaller and show info.
if not going_up and tab.scroller.pos_px().y() < old_scroll_pos.y():
message.info("Search hit BOTTOM, continuing at TOP")
elif going_up and tab.scroller.pos_px().y() > old_scroll_pos.y():
message.info("Search hit TOP, continuing at BOTTOM")
else:
message.warning("Text '{}' not found on page!".format(text),
replace=True)
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0)
def search(self, text="", reverse=False):
"""Search for a text on the current page. With no text, clear results.
Args:
text: The text to search for.
reverse: Reverse search direction.
"""
self.set_mark("'")
tab = self._current_widget()
if not text:
if tab.search.search_displayed:
tab.search.clear()
return
options = {
'ignore_case': config.val.search.ignore_case,
'reverse': reverse,
}
self._tabbed_browser.search_text = text
self._tabbed_browser.search_options = dict(options)
cb = functools.partial(self._search_cb, tab=tab,
old_scroll_pos=tab.scroller.pos_px(),
options=options, text=text, prev=False)
options['result_cb'] = cb
tab.search.search(text, **options)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def search_next(self, count=1):
"""Continue the search to the ([count]th) next term.
Args:
count: How many elements to ignore.
"""
tab = self._current_widget()
window_text = self._tabbed_browser.search_text
window_options = self._tabbed_browser.search_options
if window_text is None:
raise cmdexc.CommandError("No search done yet.")
self.set_mark("'")
if window_text is not None and window_text != tab.search.text:
tab.search.clear()
tab.search.search(window_text, **window_options)
count -= 1
if count == 0:
return
cb = functools.partial(self._search_cb, tab=tab,
old_scroll_pos=tab.scroller.pos_px(),
options=window_options, text=window_text,
prev=False)
for _ in range(count - 1):
tab.search.next_result()
tab.search.next_result(result_cb=cb)
@cmdutils.register(instance='command-dispatcher', scope='window')
@cmdutils.argument('count', count=True)
def search_prev(self, count=1):
"""Continue the search to the ([count]th) previous term.
Args:
count: How many elements to ignore.
"""
tab = self._current_widget()
window_text = self._tabbed_browser.search_text
window_options = self._tabbed_browser.search_options
if window_text is None:
raise cmdexc.CommandError("No search done yet.")
self.set_mark("'")
if window_text is not None and window_text != tab.search.text:
tab.search.clear()
tab.search.search(window_text, **window_options)
count -= 1
if count == 0:
return
cb = functools.partial(self._search_cb, tab=tab,
old_scroll_pos=tab.scroller.pos_px(),
options=window_options, text=window_text,
prev=True)
for _ in range(count - 1):
tab.search.prev_result()
tab.search.prev_result(result_cb=cb)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_next_line(self, count=1):
"""Move the cursor or selection to the next line.
Args:
count: How many lines to move.
"""
self._current_widget().caret.move_to_next_line(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_prev_line(self, count=1):
"""Move the cursor or selection to the prev line.
Args:
count: How many lines to move.
"""
self._current_widget().caret.move_to_prev_line(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_next_char(self, count=1):
"""Move the cursor or selection to the next char.
Args:
count: How many lines to move.
"""
self._current_widget().caret.move_to_next_char(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_prev_char(self, count=1):
"""Move the cursor or selection to the previous char.
Args:
count: How many chars to move.
"""
self._current_widget().caret.move_to_prev_char(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_end_of_word(self, count=1):
"""Move the cursor or selection to the end of the word.
Args:
count: How many words to move.
"""
self._current_widget().caret.move_to_end_of_word(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_next_word(self, count=1):
"""Move the cursor or selection to the next word.
Args:
count: How many words to move.
"""
self._current_widget().caret.move_to_next_word(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_prev_word(self, count=1):
"""Move the cursor or selection to the previous word.
Args:
count: How many words to move.
"""
self._current_widget().caret.move_to_prev_word(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
def move_to_start_of_line(self):
"""Move the cursor or selection to the start of the line."""
self._current_widget().caret.move_to_start_of_line()
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
def move_to_end_of_line(self):
"""Move the cursor or selection to the end of line."""
self._current_widget().caret.move_to_end_of_line()
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_start_of_next_block(self, count=1):
"""Move the cursor or selection to the start of next block.
Args:
count: How many blocks to move.
"""
self._current_widget().caret.move_to_start_of_next_block(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_start_of_prev_block(self, count=1):
"""Move the cursor or selection to the start of previous block.
Args:
count: How many blocks to move.
"""
self._current_widget().caret.move_to_start_of_prev_block(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_end_of_next_block(self, count=1):
"""Move the cursor or selection to the end of next block.
Args:
count: How many blocks to move.
"""
self._current_widget().caret.move_to_end_of_next_block(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
@cmdutils.argument('count', count=True)
def move_to_end_of_prev_block(self, count=1):
"""Move the cursor or selection to the end of previous block.
Args:
count: How many blocks to move.
"""
self._current_widget().caret.move_to_end_of_prev_block(count)
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
def move_to_start_of_document(self):
"""Move the cursor or selection to the start of the document."""
self._current_widget().caret.move_to_start_of_document()
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
def move_to_end_of_document(self):
"""Move the cursor or selection to the end of the document."""
self._current_widget().caret.move_to_end_of_document()
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
def toggle_selection(self):
"""Toggle caret selection mode."""
self._current_widget().caret.toggle_selection()
@cmdutils.register(instance='command-dispatcher', modes=[KeyMode.caret],
scope='window')
def drop_selection(self):
"""Drop selection and keep selection mode enabled."""
self._current_widget().caret.drop_selection()
@cmdutils.register(instance='command-dispatcher', scope='window',
debug=True)
@cmdutils.argument('count', count=True)
def debug_webaction(self, action, count=1):
"""Execute a webaction.
Available actions:
http://doc.qt.io/archives/qt-5.5/qwebpage.html#WebAction-enum (WebKit)
http://doc.qt.io/qt-5/qwebenginepage.html#WebAction-enum (WebEngine)
Args:
action: The action to execute, e.g. MoveToNextChar.
count: How many times to repeat the action.
"""
tab = self._current_widget()
for _ in range(count):
try:
tab.action.run_string(action)
except browsertab.WebTabError as e:
raise cmdexc.CommandError(str(e))
@cmdutils.register(instance='command-dispatcher', scope='window',
maxsplit=0, no_cmd_split=True)
def jseval(self, js_code, file=False, quiet=False, *,
world: typing.Union[usertypes.JsWorld, int] = None):
"""Evaluate a JavaScript string.
Args:
js_code: The string/file to evaluate.
file: Interpret js-code as a path to a file.
If the path is relative, the file is searched in a js/ subdir
in qutebrowser's data dir, e.g.
`~/.local/share/qutebrowser/js`.
quiet: Don't show resulting JS object.
world: Ignored on QtWebKit. On QtWebEngine, a world ID or name to
run the snippet in.
"""
if world is None:
world = usertypes.JsWorld.jseval
if quiet:
jseval_cb = None
else:
def jseval_cb(out):
"""Show the data returned from JS."""
if out is None:
# Getting the actual error (if any) seems to be difficult.
# The error does end up in
# BrowserPage.javaScriptConsoleMessage(), but
# distinguishing between :jseval errors and errors from the
# webpage is not trivial...
message.info('No output or error')
else:
# The output can be a string, number, dict, array, etc. But
# *don't* output too much data, as this will make
# qutebrowser hang
out = str(out)
if len(out) > 5000:
out = out[:5000] + ' [...trimmed...]'
message.info(out)
if file:
path = os.path.expanduser(js_code)
if not os.path.isabs(path):
path = os.path.join(standarddir.data(), 'js', path)
try:
with open(path, 'r', encoding='utf-8') as f:
js_code = f.read()
except OSError as e:
raise cmdexc.CommandError(str(e))
widget = self._current_widget()
widget.run_js_async(js_code, callback=jseval_cb, world=world)
@cmdutils.register(instance='command-dispatcher', scope='window')
def fake_key(self, keystring, global_=False):
"""Send a fake keypress or key string to the website or qutebrowser.
:fake-key xy - sends the keychain 'xy'
:fake-key <Ctrl-x> - sends Ctrl-x
:fake-key <Escape> - sends the escape key
Args:
keystring: The keystring to send.
global_: If given, the keys are sent to the qutebrowser UI.
"""
try:
sequence = keyutils.KeySequence.parse(keystring)
except keyutils.KeyParseError as e:
raise cmdexc.CommandError(str(e))
for keyinfo in sequence:
press_event = keyinfo.to_event(QEvent.KeyPress)
release_event = keyinfo.to_event(QEvent.KeyRelease)
if global_:
window = QApplication.focusWindow()
if window is None:
raise cmdexc.CommandError("No focused window!")
QApplication.postEvent(window, press_event)
QApplication.postEvent(window, release_event)
else:
tab = self._current_widget()
tab.send_event(press_event)
tab.send_event(release_event)
@cmdutils.register(instance='command-dispatcher', scope='window',
debug=True, backend=usertypes.Backend.QtWebKit)
def debug_clear_ssl_errors(self):
"""Clear remembered SSL error answers."""
self._current_widget().clear_ssl_errors()
@cmdutils.register(instance='command-dispatcher', scope='window')
def edit_url(self, url=None, bg=False, tab=False, window=False,
private=False, related=False):
"""Navigate to a url formed in an external editor.
The editor which should be launched can be configured via the
`editor.command` config option.
Args:
url: URL to edit; defaults to the current page url.
bg: Open in a new background tab.
tab: Open in a new tab.
window: Open in a new window.
private: Open a new window in private browsing mode.
related: If opening a new tab, position the tab as related to the
current one (like clicking on a link).
"""
cmdutils.check_exclusive((tab, bg, window), 'tbw')
old_url = self._current_url().toString()
ed = editor.ExternalEditor(self._tabbed_browser)
# Passthrough for openurl args (e.g. -t, -b, -w)
ed.file_updated.connect(functools.partial(
self._open_if_changed, old_url=old_url, bg=bg, tab=tab,
window=window, private=private, related=related))
ed.edit(url or old_url)
@cmdutils.register(instance='command-dispatcher', scope='window')
def set_mark(self, key):
"""Set a mark at the current scroll position in the current tab.
Args:
key: mark identifier; capital indicates a global mark
"""
self._tabbed_browser.set_mark(key)
@cmdutils.register(instance='command-dispatcher', scope='window')
def jump_mark(self, key):
"""Jump to the mark named by `key`.
Args:
key: mark identifier; capital indicates a global mark
"""
self._tabbed_browser.jump_mark(key)
def _open_if_changed(self, url=None, old_url=None, bg=False, tab=False,
window=False, private=False, related=False):
"""Open a URL unless it's already open in the tab.
Args:
old_url: The original URL to compare against.
url: The URL to open.
bg: Open in a new background tab.
tab: Open in a new tab.
window: Open in a new window.
private: Open a new window in private browsing mode.
related: If opening a new tab, position the tab as related to the
current one (like clicking on a link).
"""
if bg or tab or window or private or related or url != old_url:
self.openurl(url=url, bg=bg, tab=tab, window=window,
private=private, related=related)
@cmdutils.register(instance='command-dispatcher', scope='window')
def fullscreen(self, leave=False):
"""Toggle fullscreen mode.
Args:
leave: Only leave fullscreen if it was entered by the page.
"""
if leave:
tab = self._current_widget()
try:
tab.action.exit_fullscreen()
except browsertab.UnsupportedOperationError:
pass
return
window = self._tabbed_browser.widget.window()
window.setWindowState(window.windowState() ^ Qt.WindowFullScreen)
@cmdutils.register(instance='command-dispatcher', scope='window',
name='tab-mute')
@cmdutils.argument('count', count=True)
def tab_mute(self, count=None):
"""Mute/Unmute the current/[count]th tab.
Args:
count: The tab index to mute or unmute, or None
"""
tab = self._cntwidget(count)
if tab is None:
return
try:
tab.audio.toggle_muted()
except browsertab.WebTabError as e:
raise cmdexc.CommandError(e)
| 1 | 21,901 | This should be in qtutils and imported to here and the other place instead of declaring it twice. | qutebrowser-qutebrowser | py |
@@ -73,6 +73,15 @@ func (l *Logger) FetchCert(ctx context.Context, url string, bundle bool) ([][]by
return l.baseCl.FetchCert(ctx, url, bundle)
}
+func (l *Logger) FetchCertAlternatives(ctx context.Context, url string, bundle bool) ([][][]byte, error) {
+ l.log.V(logf.TraceLevel).Info("Calling FetchCertAlternatives")
+
+ ctx, cancel := context.WithTimeout(ctx, timeout)
+ defer cancel()
+
+ return l.baseCl.FetchCertAlternatives(ctx, url, bundle)
+}
+
func (l *Logger) WaitOrder(ctx context.Context, url string) (*acme.Order, error) {
l.log.V(logf.TraceLevel).Info("Calling WaitOrder")
| 1 | /*
Copyright 2019 The Jetstack cert-manager contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package middleware
import (
"context"
"time"
"github.com/go-logr/logr"
"golang.org/x/crypto/acme"
"github.com/jetstack/cert-manager/pkg/acme/client"
logf "github.com/jetstack/cert-manager/pkg/logs"
)
const (
timeout = time.Second * 10
)
func NewLogger(baseCl client.Interface) client.Interface {
return &Logger{
baseCl: baseCl,
log: logf.Log.WithName("acme-middleware"),
}
}
// Logger is a glog based logging middleware for an ACME client
type Logger struct {
baseCl client.Interface
log logr.Logger
}
var _ client.Interface = &Logger{}
func (l *Logger) AuthorizeOrder(ctx context.Context, id []acme.AuthzID, opt ...acme.OrderOption) (*acme.Order, error) {
l.log.V(logf.TraceLevel).Info("Calling AuthorizeOrder")
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
return l.baseCl.AuthorizeOrder(ctx, id, opt...)
}
func (l *Logger) GetOrder(ctx context.Context, url string) (*acme.Order, error) {
l.log.V(logf.TraceLevel).Info("Calling GetOrder")
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
return l.baseCl.GetOrder(ctx, url)
}
func (l *Logger) FetchCert(ctx context.Context, url string, bundle bool) ([][]byte, error) {
l.log.V(logf.TraceLevel).Info("Calling FetchCert")
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
return l.baseCl.FetchCert(ctx, url, bundle)
}
func (l *Logger) WaitOrder(ctx context.Context, url string) (*acme.Order, error) {
l.log.V(logf.TraceLevel).Info("Calling WaitOrder")
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
return l.baseCl.WaitOrder(ctx, url)
}
func (l *Logger) CreateOrderCert(ctx context.Context, finalizeURL string, csr []byte, bundle bool) (der [][]byte, certURL string, err error) {
l.log.V(logf.TraceLevel).Info("Calling CreateOrderCert")
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
return l.baseCl.CreateOrderCert(ctx, finalizeURL, csr, bundle)
}
func (l *Logger) Accept(ctx context.Context, chal *acme.Challenge) (*acme.Challenge, error) {
l.log.V(logf.TraceLevel).Info("Calling Accept")
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
return l.baseCl.Accept(ctx, chal)
}
func (l *Logger) GetChallenge(ctx context.Context, url string) (*acme.Challenge, error) {
l.log.V(logf.TraceLevel).Info("Calling GetChallenge")
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
return l.baseCl.GetChallenge(ctx, url)
}
func (l *Logger) GetAuthorization(ctx context.Context, url string) (*acme.Authorization, error) {
l.log.V(logf.TraceLevel).Info("Calling GetAuthorization")
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
return l.baseCl.GetAuthorization(ctx, url)
}
func (l *Logger) WaitAuthorization(ctx context.Context, url string) (*acme.Authorization, error) {
l.log.V(logf.TraceLevel).Info("Calling WaitAuthorization")
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
return l.baseCl.WaitAuthorization(ctx, url)
}
func (l *Logger) Register(ctx context.Context, a *acme.Account, prompt func(tosURL string) bool) (*acme.Account, error) {
l.log.V(logf.TraceLevel).Info("Calling Register")
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
return l.baseCl.Register(ctx, a, prompt)
}
func (l *Logger) GetReg(ctx context.Context, url string) (*acme.Account, error) {
l.log.V(logf.TraceLevel).Info("Calling GetReg")
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
return l.baseCl.GetReg(ctx, url)
}
func (l *Logger) HTTP01ChallengeResponse(token string) (string, error) {
l.log.V(logf.TraceLevel).Info("Calling HTTP01ChallengeResponse")
return l.baseCl.HTTP01ChallengeResponse(token)
}
func (l *Logger) DNS01ChallengeRecord(token string) (string, error) {
l.log.V(logf.TraceLevel).Info("Calling DNS01ChallengeRecord")
return l.baseCl.DNS01ChallengeRecord(token)
}
func (l *Logger) Discover(ctx context.Context) (acme.Directory, error) {
l.log.V(logf.TraceLevel).Info("Calling Discover")
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
return l.baseCl.Discover(ctx)
}
func (l *Logger) UpdateReg(ctx context.Context, a *acme.Account) (*acme.Account, error) {
l.log.V(logf.TraceLevel).Info("Calling UpdateReg")
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
return l.baseCl.UpdateReg(ctx, a)
}
| 1 | 23,197 | Seems a bit weird to add a timeout in "logging" middleware, but I see that that is done elsewhere, so fine. | jetstack-cert-manager | go |
@@ -25,7 +25,7 @@ export default AbstractIndexRoute.extend(ModalHelper, UserSession, {
actions: {
deleteItem(item) {
let i18n = this.get('i18n');
- let message = i18n.t('admin.customForms.messages.deleteForm');
+ let message = i18n.t('messages.delete_singular', { name: 'form' });
let model = Ember.Object.create({
itemToDelete: item
}); | 1 | import AbstractIndexRoute from 'hospitalrun/routes/abstract-index-route';
import Ember from 'ember';
import ModalHelper from 'hospitalrun/mixins/modal-helper';
import UserSession from 'hospitalrun/mixins/user-session';
import { translationMacro as t } from 'ember-i18n';
const { computed } = Ember;
export default AbstractIndexRoute.extend(ModalHelper, UserSession, {
newButtonAction: computed(function() {
if (this.currentUserCan('update_config')) {
return 'newItem';
} else {
return null;
}
}),
newButtonText: t('admin.customForms.buttons.newForm'),
pageTitle: t('admin.customForms.titles.customForms'),
model() {
let store = this.get('store');
return store.findAll('custom-form');
},
actions: {
deleteItem(item) {
let i18n = this.get('i18n');
let message = i18n.t('admin.customForms.messages.deleteForm');
let model = Ember.Object.create({
itemToDelete: item
});
let title = i18n.t('admin.customForms.titles.deleteForm');
this.displayConfirm(title, message, 'deleteCustomForm', model);
},
deleteCustomForm(model) {
model.itemToDelete.set('archived', true);
model.itemToDelete.save().then(()=> {
model.itemToDelete.unloadRecord();
});
},
editItem(item) {
this.transitionTo('admin.custom-forms.edit', item);
},
newItem() {
this.transitionTo('admin.custom-forms.edit', 'new');
}
}
});
| 1 | 13,474 | This code is passing a non localized string when it should be passing in a localized string or it should use the name of the item being deleted. | HospitalRun-hospitalrun-frontend | js |
@@ -556,6 +556,13 @@ class Uppy {
files: updatedFiles
})
+ // If nothing is uploading anymore, and if nothing has uploaded yet - allow new uploads!
+ if (Object.keys(updatedFiles).length === 0) {
+ this.setState({
+ allowNewUpload: true
+ })
+ }
+
removeUploads.forEach((uploadID) => {
this._removeUpload(uploadID)
}) | 1 | const Translator = require('@uppy/utils/lib/Translator')
const ee = require('namespace-emitter')
const cuid = require('cuid')
const throttle = require('lodash.throttle')
const prettyBytes = require('@uppy/utils/lib/prettyBytes')
const match = require('mime-match')
const DefaultStore = require('@uppy/store-default')
const getFileType = require('@uppy/utils/lib/getFileType')
const getFileNameAndExtension = require('@uppy/utils/lib/getFileNameAndExtension')
const generateFileID = require('@uppy/utils/lib/generateFileID')
const supportsUploadProgress = require('./supportsUploadProgress')
const { nullLogger, debugLogger } = require('./loggers')
const Plugin = require('./Plugin') // Exported from here.
class RestrictionError extends Error {
constructor (...args) {
super(...args)
this.isRestriction = true
}
}
/**
* Uppy Core module.
* Manages plugins, state updates, acts as an event bus,
* adds/removes files and metadata.
*/
class Uppy {
static VERSION = require('../package.json').version
/**
* Instantiate Uppy
*
* @param {Object} opts — Uppy options
*/
constructor (opts) {
this.defaultLocale = {
strings: {
youCanOnlyUploadX: {
0: 'You can only upload %{smart_count} file',
1: 'You can only upload %{smart_count} files',
2: 'You can only upload %{smart_count} files'
},
youHaveToAtLeastSelectX: {
0: 'You have to select at least %{smart_count} file',
1: 'You have to select at least %{smart_count} files',
2: 'You have to select at least %{smart_count} files'
},
exceedsSize: 'This file exceeds maximum allowed size of',
youCanOnlyUploadFileTypes: 'You can only upload: %{types}',
companionError: 'Connection with Companion failed',
companionAuthError: 'Authorization required',
failedToUpload: 'Failed to upload %{file}',
noInternetConnection: 'No Internet connection',
connectedToInternet: 'Connected to the Internet',
// Strings for remote providers
noFilesFound: 'You have no files or folders here',
selectX: {
0: 'Select %{smart_count}',
1: 'Select %{smart_count}',
2: 'Select %{smart_count}'
},
selectAllFilesFromFolderNamed: 'Select all files from folder %{name}',
unselectAllFilesFromFolderNamed: 'Unselect all files from folder %{name}',
selectFileNamed: 'Select file %{name}',
unselectFileNamed: 'Unselect file %{name}',
openFolderNamed: 'Open folder %{name}',
cancel: 'Cancel',
logOut: 'Log out',
filter: 'Filter',
resetFilter: 'Reset filter',
loading: 'Loading...',
authenticateWithTitle: 'Please authenticate with %{pluginName} to select files',
authenticateWith: 'Connect to %{pluginName}',
emptyFolderAdded: 'No files were added from empty folder',
folderAdded: {
0: 'Added %{smart_count} file from %{folder}',
1: 'Added %{smart_count} files from %{folder}',
2: 'Added %{smart_count} files from %{folder}'
}
}
}
// set default options
const defaultOptions = {
id: 'uppy',
autoProceed: false,
allowMultipleUploads: true,
debug: false,
restrictions: {
maxFileSize: null,
maxNumberOfFiles: null,
minNumberOfFiles: null,
allowedFileTypes: null
},
meta: {},
onBeforeFileAdded: (currentFile, files) => currentFile,
onBeforeUpload: (files) => files,
store: DefaultStore(),
logger: nullLogger
}
// Merge default options with the ones set by user
this.opts = Object.assign({}, defaultOptions, opts)
this.opts.restrictions = Object.assign({}, defaultOptions.restrictions, this.opts.restrictions)
// Support debug: true for backwards-compatability, unless logger is set in opts
// opts instead of this.opts to avoid comparing objects — we set logger: nullLogger in defaultOptions
if (opts && opts.logger && opts.debug) {
this.log('You are using a custom `logger`, but also set `debug: true`, which uses built-in logger to output logs to console. Ignoring `debug: true` and using your custom `logger`.', 'warning')
} else if (opts && opts.debug) {
this.opts.logger = debugLogger
}
this.log(`Using Core v${this.constructor.VERSION}`)
if (this.opts.restrictions.allowedFileTypes &&
this.opts.restrictions.allowedFileTypes !== null &&
!Array.isArray(this.opts.restrictions.allowedFileTypes)) {
throw new Error(`'restrictions.allowedFileTypes' must be an array`)
}
// i18n
this.translator = new Translator([ this.defaultLocale, this.opts.locale ])
this.locale = this.translator.locale
this.i18n = this.translator.translate.bind(this.translator)
this.i18nArray = this.translator.translateArray.bind(this.translator)
// Container for different types of plugins
this.plugins = {}
this.getState = this.getState.bind(this)
this.getPlugin = this.getPlugin.bind(this)
this.setFileMeta = this.setFileMeta.bind(this)
this.setFileState = this.setFileState.bind(this)
this.log = this.log.bind(this)
this.info = this.info.bind(this)
this.hideInfo = this.hideInfo.bind(this)
this.addFile = this.addFile.bind(this)
this.removeFile = this.removeFile.bind(this)
this.pauseResume = this.pauseResume.bind(this)
// ___Why throttle at 500ms?
// - We must throttle at >250ms for superfocus in Dashboard to work well (because animation takes 0.25s, and we want to wait for all animations to be over before refocusing).
// [Practical Check]: if thottle is at 100ms, then if you are uploading a file, and click 'ADD MORE FILES', - focus won't activate in Firefox.
// - We must throttle at around >500ms to avoid performance lags.
// [Practical Check] Firefox, try to upload a big file for a prolonged period of time. Laptop will start to heat up.
this._calculateProgress = throttle(this._calculateProgress.bind(this), 500, { leading: true, trailing: true })
this.updateOnlineStatus = this.updateOnlineStatus.bind(this)
this.resetProgress = this.resetProgress.bind(this)
this.pauseAll = this.pauseAll.bind(this)
this.resumeAll = this.resumeAll.bind(this)
this.retryAll = this.retryAll.bind(this)
this.cancelAll = this.cancelAll.bind(this)
this.retryUpload = this.retryUpload.bind(this)
this.upload = this.upload.bind(this)
this.emitter = ee()
this.on = this.on.bind(this)
this.off = this.off.bind(this)
this.once = this.emitter.once.bind(this.emitter)
this.emit = this.emitter.emit.bind(this.emitter)
this.preProcessors = []
this.uploaders = []
this.postProcessors = []
this.store = this.opts.store
this.setState({
plugins: {},
files: {},
currentUploads: {},
allowNewUpload: true,
capabilities: {
uploadProgress: supportsUploadProgress(),
individualCancellation: true,
resumableUploads: false
},
totalProgress: 0,
meta: { ...this.opts.meta },
info: {
isHidden: true,
type: 'info',
message: ''
}
})
this._storeUnsubscribe = this.store.subscribe((prevState, nextState, patch) => {
this.emit('state-update', prevState, nextState, patch)
this.updateAll(nextState)
})
// Exposing uppy object on window for debugging and testing
if (this.opts.debug && typeof window !== 'undefined') {
window[this.opts.id] = this
}
this._addListeners()
}
on (event, callback) {
this.emitter.on(event, callback)
return this
}
off (event, callback) {
this.emitter.off(event, callback)
return this
}
/**
* Iterate on all plugins and run `update` on them.
* Called each time state changes.
*
*/
updateAll (state) {
this.iteratePlugins(plugin => {
plugin.update(state)
})
}
/**
* Updates state with a patch
*
* @param {Object} patch {foo: 'bar'}
*/
setState (patch) {
this.store.setState(patch)
}
/**
* Returns current state.
*
* @returns {Object}
*/
getState () {
return this.store.getState()
}
/**
* Back compat for when uppy.state is used instead of uppy.getState().
*/
get state () {
return this.getState()
}
/**
* Shorthand to set state for a specific file.
*/
setFileState (fileID, state) {
if (!this.getState().files[fileID]) {
throw new Error(`Can’t set state for ${fileID} (the file could have been removed)`)
}
this.setState({
files: Object.assign({}, this.getState().files, {
[fileID]: Object.assign({}, this.getState().files[fileID], state)
})
})
}
resetProgress () {
const defaultProgress = {
percentage: 0,
bytesUploaded: 0,
uploadComplete: false,
uploadStarted: null
}
const files = Object.assign({}, this.getState().files)
const updatedFiles = {}
Object.keys(files).forEach(fileID => {
const updatedFile = Object.assign({}, files[fileID])
updatedFile.progress = Object.assign({}, updatedFile.progress, defaultProgress)
updatedFiles[fileID] = updatedFile
})
this.setState({
files: updatedFiles,
totalProgress: 0
})
// TODO Document on the website
this.emit('reset-progress')
}
addPreProcessor (fn) {
this.preProcessors.push(fn)
}
removePreProcessor (fn) {
const i = this.preProcessors.indexOf(fn)
if (i !== -1) {
this.preProcessors.splice(i, 1)
}
}
addPostProcessor (fn) {
this.postProcessors.push(fn)
}
removePostProcessor (fn) {
const i = this.postProcessors.indexOf(fn)
if (i !== -1) {
this.postProcessors.splice(i, 1)
}
}
addUploader (fn) {
this.uploaders.push(fn)
}
removeUploader (fn) {
const i = this.uploaders.indexOf(fn)
if (i !== -1) {
this.uploaders.splice(i, 1)
}
}
setMeta (data) {
const updatedMeta = Object.assign({}, this.getState().meta, data)
const updatedFiles = Object.assign({}, this.getState().files)
Object.keys(updatedFiles).forEach((fileID) => {
updatedFiles[fileID] = Object.assign({}, updatedFiles[fileID], {
meta: Object.assign({}, updatedFiles[fileID].meta, data)
})
})
this.log('Adding metadata:')
this.log(data)
this.setState({
meta: updatedMeta,
files: updatedFiles
})
}
setFileMeta (fileID, data) {
const updatedFiles = Object.assign({}, this.getState().files)
if (!updatedFiles[fileID]) {
this.log('Was trying to set metadata for a file that has been removed: ', fileID)
return
}
const newMeta = Object.assign({}, updatedFiles[fileID].meta, data)
updatedFiles[fileID] = Object.assign({}, updatedFiles[fileID], {
meta: newMeta
})
this.setState({ files: updatedFiles })
}
/**
* Get a file object.
*
* @param {string} fileID The ID of the file object to return.
*/
getFile (fileID) {
return this.getState().files[fileID]
}
/**
* Get all files in an array.
*/
getFiles () {
const { files } = this.getState()
return Object.keys(files).map((fileID) => files[fileID])
}
/**
* Check if minNumberOfFiles restriction is reached before uploading.
*
* @private
*/
_checkMinNumberOfFiles (files) {
const { minNumberOfFiles } = this.opts.restrictions
if (Object.keys(files).length < minNumberOfFiles) {
throw new RestrictionError(`${this.i18n('youHaveToAtLeastSelectX', { smart_count: minNumberOfFiles })}`)
}
}
/**
* Check if file passes a set of restrictions set in options: maxFileSize,
* maxNumberOfFiles and allowedFileTypes.
*
* @param {Object} file object to check
* @private
*/
_checkRestrictions (file) {
const { maxFileSize, maxNumberOfFiles, allowedFileTypes } = this.opts.restrictions
if (maxNumberOfFiles) {
if (Object.keys(this.getState().files).length + 1 > maxNumberOfFiles) {
throw new RestrictionError(`${this.i18n('youCanOnlyUploadX', { smart_count: maxNumberOfFiles })}`)
}
}
if (allowedFileTypes) {
const isCorrectFileType = allowedFileTypes.some((type) => {
// is this is a mime-type
if (type.indexOf('/') > -1) {
if (!file.type) return false
return match(file.type, type)
}
// otherwise this is likely an extension
if (type[0] === '.') {
return file.extension.toLowerCase() === type.substr(1).toLowerCase()
}
return false
})
if (!isCorrectFileType) {
const allowedFileTypesString = allowedFileTypes.join(', ')
throw new RestrictionError(this.i18n('youCanOnlyUploadFileTypes', { types: allowedFileTypesString }))
}
}
// We can't check maxFileSize if the size is unknown.
if (maxFileSize && file.data.size != null) {
if (file.data.size > maxFileSize) {
throw new RestrictionError(`${this.i18n('exceedsSize')} ${prettyBytes(maxFileSize)}`)
}
}
}
/**
* Add a new file to `state.files`. This will run `onBeforeFileAdded`,
* try to guess file type in a clever way, check file against restrictions,
* and start an upload if `autoProceed === true`.
*
* @param {Object} file object to add
* @returns {string} id for the added file
*/
addFile (file) {
const { files, allowNewUpload } = this.getState()
const onError = (msg) => {
const err = typeof msg === 'object' ? msg : new Error(msg)
this.log(err.message)
this.info(err.message, 'error', 5000)
throw err
}
if (allowNewUpload === false) {
onError(new Error('Cannot add new files: already uploading.'))
}
const fileType = getFileType(file)
file.type = fileType
const onBeforeFileAddedResult = this.opts.onBeforeFileAdded(file, files)
if (onBeforeFileAddedResult === false) {
this.log('Not adding file because onBeforeFileAdded returned false')
return
}
if (typeof onBeforeFileAddedResult === 'object' && onBeforeFileAddedResult) {
file = onBeforeFileAddedResult
}
let fileName
if (file.name) {
fileName = file.name
} else if (fileType.split('/')[0] === 'image') {
fileName = fileType.split('/')[0] + '.' + fileType.split('/')[1]
} else {
fileName = 'noname'
}
const fileExtension = getFileNameAndExtension(fileName).extension
const isRemote = file.isRemote || false
const fileID = generateFileID(file)
const meta = file.meta || {}
meta.name = fileName
meta.type = fileType
// `null` means the size is unknown.
const size = isFinite(file.data.size) ? file.data.size : null
const newFile = {
source: file.source || '',
id: fileID,
name: fileName,
extension: fileExtension || '',
meta: Object.assign({}, this.getState().meta, meta),
type: fileType,
data: file.data,
progress: {
percentage: 0,
bytesUploaded: 0,
bytesTotal: size,
uploadComplete: false,
uploadStarted: null
},
size: size,
isRemote: isRemote,
remote: file.remote || '',
preview: file.preview
}
try {
this._checkRestrictions(newFile)
} catch (err) {
this.emit('restriction-failed', newFile, err)
onError(err)
}
this.setState({
files: Object.assign({}, files, {
[fileID]: newFile
})
})
this.emit('file-added', newFile)
this.log(`Added file: ${fileName}, ${fileID}, mime type: ${fileType}`)
if (this.opts.autoProceed && !this.scheduledAutoProceed) {
this.scheduledAutoProceed = setTimeout(() => {
this.scheduledAutoProceed = null
this.upload().catch((err) => {
if (!err.isRestriction) {
this.uppy.log(err.stack || err.message || err)
}
})
}, 4)
}
return fileID
}
removeFile (fileID) {
const { files, currentUploads } = this.getState()
const updatedFiles = Object.assign({}, files)
const removedFile = updatedFiles[fileID]
delete updatedFiles[fileID]
// Remove this file from its `currentUpload`.
const updatedUploads = Object.assign({}, currentUploads)
const removeUploads = []
Object.keys(updatedUploads).forEach((uploadID) => {
const newFileIDs = currentUploads[uploadID].fileIDs.filter((uploadFileID) => uploadFileID !== fileID)
// Remove the upload if no files are associated with it anymore.
if (newFileIDs.length === 0) {
removeUploads.push(uploadID)
return
}
updatedUploads[uploadID] = Object.assign({}, currentUploads[uploadID], {
fileIDs: newFileIDs
})
})
this.setState({
currentUploads: updatedUploads,
files: updatedFiles
})
removeUploads.forEach((uploadID) => {
this._removeUpload(uploadID)
})
this._calculateTotalProgress()
this.emit('file-removed', removedFile)
this.log(`File removed: ${removedFile.id}`)
}
pauseResume (fileID) {
if (!this.getState().capabilities.resumableUploads ||
this.getFile(fileID).uploadComplete) {
return
}
const wasPaused = this.getFile(fileID).isPaused || false
const isPaused = !wasPaused
this.setFileState(fileID, {
isPaused: isPaused
})
this.emit('upload-pause', fileID, isPaused)
return isPaused
}
pauseAll () {
const updatedFiles = Object.assign({}, this.getState().files)
const inProgressUpdatedFiles = Object.keys(updatedFiles).filter((file) => {
return !updatedFiles[file].progress.uploadComplete &&
updatedFiles[file].progress.uploadStarted
})
inProgressUpdatedFiles.forEach((file) => {
const updatedFile = Object.assign({}, updatedFiles[file], {
isPaused: true
})
updatedFiles[file] = updatedFile
})
this.setState({ files: updatedFiles })
this.emit('pause-all')
}
resumeAll () {
const updatedFiles = Object.assign({}, this.getState().files)
const inProgressUpdatedFiles = Object.keys(updatedFiles).filter((file) => {
return !updatedFiles[file].progress.uploadComplete &&
updatedFiles[file].progress.uploadStarted
})
inProgressUpdatedFiles.forEach((file) => {
const updatedFile = Object.assign({}, updatedFiles[file], {
isPaused: false,
error: null
})
updatedFiles[file] = updatedFile
})
this.setState({ files: updatedFiles })
this.emit('resume-all')
}
retryAll () {
const updatedFiles = Object.assign({}, this.getState().files)
const filesToRetry = Object.keys(updatedFiles).filter(file => {
return updatedFiles[file].error
})
filesToRetry.forEach((file) => {
const updatedFile = Object.assign({}, updatedFiles[file], {
isPaused: false,
error: null
})
updatedFiles[file] = updatedFile
})
this.setState({
files: updatedFiles,
error: null
})
this.emit('retry-all', filesToRetry)
const uploadID = this._createUpload(filesToRetry)
return this._runUpload(uploadID)
}
cancelAll () {
this.emit('cancel-all')
const files = Object.keys(this.getState().files)
files.forEach((fileID) => {
this.removeFile(fileID)
})
this.setState({
allowNewUpload: true,
totalProgress: 0,
error: null
})
}
retryUpload (fileID) {
this.setFileState(fileID, {
error: null,
isPaused: false
})
this.emit('upload-retry', fileID)
const uploadID = this._createUpload([ fileID ])
return this._runUpload(uploadID)
}
reset () {
this.cancelAll()
}
_calculateProgress (file, data) {
if (!this.getFile(file.id)) {
this.log(`Not setting progress for a file that has been removed: ${file.id}`)
return
}
// bytesTotal may be null or zero; in that case we can't divide by it
const canHavePercentage = isFinite(data.bytesTotal) && data.bytesTotal > 0
this.setFileState(file.id, {
progress: Object.assign({}, this.getFile(file.id).progress, {
bytesUploaded: data.bytesUploaded,
bytesTotal: data.bytesTotal,
percentage: canHavePercentage
// TODO(goto-bus-stop) flooring this should probably be the choice of the UI?
// we get more accurate calculations if we don't round this at all.
? Math.round(data.bytesUploaded / data.bytesTotal * 100)
: 0
})
})
this._calculateTotalProgress()
}
_calculateTotalProgress () {
// calculate total progress, using the number of files currently uploading,
// multiplied by 100 and the summ of individual progress of each file
const files = this.getFiles()
const inProgress = files.filter((file) => {
return file.progress.uploadStarted
})
if (inProgress.length === 0) {
this.emit('progress', 0)
this.setState({ totalProgress: 0 })
return
}
const sizedFiles = inProgress.filter((file) => file.progress.bytesTotal != null)
const unsizedFiles = inProgress.filter((file) => file.progress.bytesTotal == null)
if (sizedFiles.length === 0) {
const progressMax = inProgress.length * 100
const currentProgress = unsizedFiles.reduce((acc, file) => {
return acc + file.progress.percentage
}, 0)
const totalProgress = Math.round(currentProgress / progressMax * 100)
this.setState({ totalProgress })
return
}
let totalSize = sizedFiles.reduce((acc, file) => {
return acc + file.progress.bytesTotal
}, 0)
const averageSize = totalSize / sizedFiles.length
totalSize += averageSize * unsizedFiles.length
let uploadedSize = 0
sizedFiles.forEach((file) => {
uploadedSize += file.progress.bytesUploaded
})
unsizedFiles.forEach((file) => {
uploadedSize += averageSize * (file.progress.percentage || 0) / 100
})
let totalProgress = totalSize === 0
? 0
: Math.round(uploadedSize / totalSize * 100)
// hot fix, because:
// uploadedSize ended up larger than totalSize, resulting in 1325% total
if (totalProgress > 100) {
totalProgress = 100
}
this.setState({ totalProgress })
this.emit('progress', totalProgress)
}
/**
* Registers listeners for all global actions, like:
* `error`, `file-removed`, `upload-progress`
*/
_addListeners () {
this.on('error', (error) => {
this.setState({ error: error.message })
})
this.on('upload-error', (file, error, response) => {
this.setFileState(file.id, {
error: error.message,
response
})
this.setState({ error: error.message })
let message = this.i18n('failedToUpload', { file: file.name })
if (typeof error === 'object' && error.message) {
message = { message: message, details: error.message }
}
this.info(message, 'error', 5000)
})
this.on('upload', () => {
this.setState({ error: null })
})
this.on('upload-started', (file, upload) => {
if (!this.getFile(file.id)) {
this.log(`Not setting progress for a file that has been removed: ${file.id}`)
return
}
this.setFileState(file.id, {
progress: {
uploadStarted: Date.now(),
uploadComplete: false,
percentage: 0,
bytesUploaded: 0,
bytesTotal: file.size
}
})
})
this.on('upload-progress', this._calculateProgress)
this.on('upload-success', (file, uploadResp) => {
const currentProgress = this.getFile(file.id).progress
this.setFileState(file.id, {
progress: Object.assign({}, currentProgress, {
uploadComplete: true,
percentage: 100,
bytesUploaded: currentProgress.bytesTotal
}),
response: uploadResp,
uploadURL: uploadResp.uploadURL,
isPaused: false
})
this._calculateTotalProgress()
})
this.on('preprocess-progress', (file, progress) => {
if (!this.getFile(file.id)) {
this.log(`Not setting progress for a file that has been removed: ${file.id}`)
return
}
this.setFileState(file.id, {
progress: Object.assign({}, this.getFile(file.id).progress, {
preprocess: progress
})
})
})
this.on('preprocess-complete', (file) => {
if (!this.getFile(file.id)) {
this.log(`Not setting progress for a file that has been removed: ${file.id}`)
return
}
const files = Object.assign({}, this.getState().files)
files[file.id] = Object.assign({}, files[file.id], {
progress: Object.assign({}, files[file.id].progress)
})
delete files[file.id].progress.preprocess
this.setState({ files: files })
})
this.on('postprocess-progress', (file, progress) => {
if (!this.getFile(file.id)) {
this.log(`Not setting progress for a file that has been removed: ${file.id}`)
return
}
this.setFileState(file.id, {
progress: Object.assign({}, this.getState().files[file.id].progress, {
postprocess: progress
})
})
})
this.on('postprocess-complete', (file) => {
if (!this.getFile(file.id)) {
this.log(`Not setting progress for a file that has been removed: ${file.id}`)
return
}
const files = Object.assign({}, this.getState().files)
files[file.id] = Object.assign({}, files[file.id], {
progress: Object.assign({}, files[file.id].progress)
})
delete files[file.id].progress.postprocess
// TODO should we set some kind of `fullyComplete` property on the file object
// so it's easier to see that the file is upload…fully complete…rather than
// what we have to do now (`uploadComplete && !postprocess`)
this.setState({ files: files })
})
this.on('restored', () => {
// Files may have changed--ensure progress is still accurate.
this._calculateTotalProgress()
})
// show informer if offline
if (typeof window !== 'undefined' && window.addEventListener) {
window.addEventListener('online', () => this.updateOnlineStatus())
window.addEventListener('offline', () => this.updateOnlineStatus())
setTimeout(() => this.updateOnlineStatus(), 3000)
}
}
updateOnlineStatus () {
const online =
typeof window.navigator.onLine !== 'undefined'
? window.navigator.onLine
: true
if (!online) {
this.emit('is-offline')
this.info(this.i18n('noInternetConnection'), 'error', 0)
this.wasOffline = true
} else {
this.emit('is-online')
if (this.wasOffline) {
this.emit('back-online')
this.info(this.i18n('connectedToInternet'), 'success', 3000)
this.wasOffline = false
}
}
}
getID () {
return this.opts.id
}
/**
* Registers a plugin with Core.
*
* @param {Object} Plugin object
* @param {Object} [opts] object with options to be passed to Plugin
* @returns {Object} self for chaining
*/
use (Plugin, opts) {
if (typeof Plugin !== 'function') {
let msg = `Expected a plugin class, but got ${Plugin === null ? 'null' : typeof Plugin}.` +
' Please verify that the plugin was imported and spelled correctly.'
throw new TypeError(msg)
}
// Instantiate
const plugin = new Plugin(this, opts)
const pluginId = plugin.id
this.plugins[plugin.type] = this.plugins[plugin.type] || []
if (!pluginId) {
throw new Error('Your plugin must have an id')
}
if (!plugin.type) {
throw new Error('Your plugin must have a type')
}
let existsPluginAlready = this.getPlugin(pluginId)
if (existsPluginAlready) {
let msg = `Already found a plugin named '${existsPluginAlready.id}'. ` +
`Tried to use: '${pluginId}'.\n` +
`Uppy plugins must have unique 'id' options. See https://uppy.io/docs/plugins/#id.`
throw new Error(msg)
}
if (Plugin.VERSION) {
this.log(`Using ${pluginId} v${Plugin.VERSION}`)
}
this.plugins[plugin.type].push(plugin)
plugin.install()
return this
}
/**
* Find one Plugin by name.
*
* @param {string} id plugin id
* @returns {Object|boolean}
*/
getPlugin (id) {
let foundPlugin = null
this.iteratePlugins((plugin) => {
if (plugin.id === id) {
foundPlugin = plugin
return false
}
})
return foundPlugin
}
/**
* Iterate through all `use`d plugins.
*
* @param {Function} method that will be run on each plugin
*/
iteratePlugins (method) {
Object.keys(this.plugins).forEach(pluginType => {
this.plugins[pluginType].forEach(method)
})
}
/**
* Uninstall and remove a plugin.
*
* @param {Object} instance The plugin instance to remove.
*/
removePlugin (instance) {
this.log(`Removing plugin ${instance.id}`)
this.emit('plugin-remove', instance)
if (instance.uninstall) {
instance.uninstall()
}
const list = this.plugins[instance.type].slice()
const index = list.indexOf(instance)
if (index !== -1) {
list.splice(index, 1)
this.plugins[instance.type] = list
}
const updatedState = this.getState()
delete updatedState.plugins[instance.id]
this.setState(updatedState)
}
/**
* Uninstall all plugins and close down this Uppy instance.
*/
close () {
this.log(`Closing Uppy instance ${this.opts.id}: removing all files and uninstalling plugins`)
this.reset()
this._storeUnsubscribe()
this.iteratePlugins((plugin) => {
this.removePlugin(plugin)
})
}
/**
* Set info message in `state.info`, so that UI plugins like `Informer`
* can display the message.
*
* @param {string | object} message Message to be displayed by the informer
* @param {string} [type]
* @param {number} [duration]
*/
info (message, type = 'info', duration = 3000) {
const isComplexMessage = typeof message === 'object'
this.setState({
info: {
isHidden: false,
type: type,
message: isComplexMessage ? message.message : message,
details: isComplexMessage ? message.details : null
}
})
this.emit('info-visible')
clearTimeout(this.infoTimeoutID)
if (duration === 0) {
this.infoTimeoutID = undefined
return
}
// hide the informer after `duration` milliseconds
this.infoTimeoutID = setTimeout(this.hideInfo, duration)
}
hideInfo () {
const newInfo = Object.assign({}, this.getState().info, {
isHidden: true
})
this.setState({
info: newInfo
})
this.emit('info-hidden')
}
/**
* Passes messages to a function, provided in `opt.logger`.
* If `opt.logger: Uppy.debugLogger` or `opt.debug: true`, logs to the browser console.
*
* @param {string|Object} message to log
* @param {string} [type] optional `error` or `warning`
*/
log (message, type) {
const { logger } = this.opts
switch (type) {
case 'error': logger.error(message); break
case 'warning': logger.warn(message); break
default: logger.debug(message); break
}
}
/**
* Obsolete, event listeners are now added in the constructor.
*/
run () {
this.log('Calling run() is no longer necessary.', 'warning')
return this
}
/**
* Restore an upload by its ID.
*/
restore (uploadID) {
this.log(`Core: attempting to restore upload "${uploadID}"`)
if (!this.getState().currentUploads[uploadID]) {
this._removeUpload(uploadID)
return Promise.reject(new Error('Nonexistent upload'))
}
return this._runUpload(uploadID)
}
/**
* Create an upload for a bunch of files.
*
* @param {Array<string>} fileIDs File IDs to include in this upload.
* @returns {string} ID of this upload.
*/
_createUpload (fileIDs) {
const { allowNewUpload, currentUploads } = this.getState()
if (!allowNewUpload) {
throw new Error('Cannot create a new upload: already uploading.')
}
const uploadID = cuid()
this.emit('upload', {
id: uploadID,
fileIDs: fileIDs
})
this.setState({
allowNewUpload: this.opts.allowMultipleUploads !== false,
currentUploads: {
...currentUploads,
[uploadID]: {
fileIDs: fileIDs,
step: 0,
result: {}
}
}
})
return uploadID
}
_getUpload (uploadID) {
const { currentUploads } = this.getState()
return currentUploads[uploadID]
}
/**
* Add data to an upload's result object.
*
* @param {string} uploadID The ID of the upload.
* @param {Object} data Data properties to add to the result object.
*/
addResultData (uploadID, data) {
if (!this._getUpload(uploadID)) {
this.log(`Not setting result for an upload that has been removed: ${uploadID}`)
return
}
const currentUploads = this.getState().currentUploads
const currentUpload = Object.assign({}, currentUploads[uploadID], {
result: Object.assign({}, currentUploads[uploadID].result, data)
})
this.setState({
currentUploads: Object.assign({}, currentUploads, {
[uploadID]: currentUpload
})
})
}
/**
* Remove an upload, eg. if it has been canceled or completed.
*
* @param {string} uploadID The ID of the upload.
*/
_removeUpload (uploadID) {
const currentUploads = Object.assign({}, this.getState().currentUploads)
delete currentUploads[uploadID]
this.setState({
currentUploads: currentUploads
})
}
/**
* Run an upload. This picks up where it left off in case the upload is being restored.
*
* @private
*/
_runUpload (uploadID) {
const uploadData = this.getState().currentUploads[uploadID]
const restoreStep = uploadData.step
const steps = [
...this.preProcessors,
...this.uploaders,
...this.postProcessors
]
let lastStep = Promise.resolve()
steps.forEach((fn, step) => {
// Skip this step if we are restoring and have already completed this step before.
if (step < restoreStep) {
return
}
lastStep = lastStep.then(() => {
const { currentUploads } = this.getState()
const currentUpload = currentUploads[uploadID]
if (!currentUpload) {
return
}
const updatedUpload = Object.assign({}, currentUpload, {
step: step
})
this.setState({
currentUploads: Object.assign({}, currentUploads, {
[uploadID]: updatedUpload
})
})
// TODO give this the `updatedUpload` object as its only parameter maybe?
// Otherwise when more metadata may be added to the upload this would keep getting more parameters
return fn(updatedUpload.fileIDs, uploadID)
}).then((result) => {
return null
})
})
// Not returning the `catch`ed promise, because we still want to return a rejected
// promise from this method if the upload failed.
lastStep.catch((err) => {
this.emit('error', err, uploadID)
this._removeUpload(uploadID)
})
return lastStep.then(() => {
// Set result data.
const { currentUploads } = this.getState()
const currentUpload = currentUploads[uploadID]
if (!currentUpload) {
return
}
const files = currentUpload.fileIDs
.map((fileID) => this.getFile(fileID))
const successful = files.filter((file) => !file.error)
const failed = files.filter((file) => file.error)
this.addResultData(uploadID, { successful, failed, uploadID })
}).then(() => {
// Emit completion events.
// This is in a separate function so that the `currentUploads` variable
// always refers to the latest state. In the handler right above it refers
// to an outdated object without the `.result` property.
const { currentUploads } = this.getState()
if (!currentUploads[uploadID]) {
return
}
const currentUpload = currentUploads[uploadID]
const result = currentUpload.result
this.emit('complete', result)
this._removeUpload(uploadID)
return result
}).then((result) => {
if (result == null) {
this.log(`Not setting result for an upload that has been removed: ${uploadID}`)
}
return result
})
}
/**
* Start an upload for all the files that are not currently being uploaded.
*
* @returns {Promise}
*/
upload () {
const onError = (err) => {
const message = typeof err === 'object' ? err.message : err
const details = (typeof err === 'object' && err.details) ? err.details : ''
this.log(`${message} ${details}`)
this.info({ message: message, details: details }, 'error', 5000)
throw (typeof err === 'object' ? err : new Error(err))
}
if (!this.plugins.uploader) {
this.log('No uploader type plugins are used', 'warning')
}
let files = this.getState().files
const onBeforeUploadResult = this.opts.onBeforeUpload(files)
if (onBeforeUploadResult === false) {
return Promise.reject(new Error('Not starting the upload because onBeforeUpload returned false'))
}
if (onBeforeUploadResult && typeof onBeforeUploadResult === 'object') {
files = onBeforeUploadResult
}
return Promise.resolve()
.then(() => this._checkMinNumberOfFiles(files))
.then(() => {
const { currentUploads } = this.getState()
// get a list of files that are currently assigned to uploads
const currentlyUploadingFiles = Object.keys(currentUploads).reduce((prev, curr) => prev.concat(currentUploads[curr].fileIDs), [])
const waitingFileIDs = []
Object.keys(files).forEach((fileID) => {
const file = this.getFile(fileID)
// if the file hasn't started uploading and hasn't already been assigned to an upload..
if ((!file.progress.uploadStarted) && (currentlyUploadingFiles.indexOf(fileID) === -1)) {
waitingFileIDs.push(file.id)
}
})
const uploadID = this._createUpload(waitingFileIDs)
return this._runUpload(uploadID)
})
.catch((err) => {
if (err.isRestriction) {
this.emit('restriction-failed', null, err)
}
onError(err)
})
}
}
module.exports = function (opts) {
return new Uppy(opts)
}
// Expose class constructor.
module.exports.Uppy = Uppy
module.exports.Plugin = Plugin
module.exports.debugLogger = debugLogger
| 1 | 12,334 | Could we combine this with the `setState` call above, so we don't have to call it twice? | transloadit-uppy | js |
@@ -31,10 +31,10 @@ class String
end
def fix_encoding_if_invalid!
- unless valid_encoding?
- encode!('utf-8', 'binary', invalid: :replace, undef: :replace)
- end
- force_encoding('utf-8')
+ # All new strings claim to be 8-bit ASCII in the DB, but are really saved as UTF-8...
+ force_encoding('utf-8') unless encoding == 'UTF-8'
+ # ...but old strings are a hodge-podge of encodings, so replace any unrecognizable characters with unknowns.
+ encode!('utf-8', invalid: :replace, undef: :replace) unless valid_encoding?
self
end
| 1 | class String
def strip_tags
gsub(/<.*?>/, '')
end
def strip_tags_preserve_line_breaks
html = CGI.unescapeHTML(self).gsub(/\r/, '')
# Preserve line-breaking tags by converting them to carriage returns
html.gsub!(/<br\s*\/?>\s*\n?/, "\n")
html.gsub!(/<\/p>\s*\n?/, "\n\n")
html.gsub!(/<p\s*\/>\s*\n?/, "\n\n")
text = html.strip_tags
# Restore line-breaking tags
text.gsub!(/\n(\s*\n)+/, '<br/><br/>')
text.gsub!(/\n/, '<br/>')
# Strip leading and trailing breaks
text.gsub!(/^(<br\/>)+/, '')
text.gsub!(/(<br\/>)+$/, '')
text
end
def valid_http_url?
URI.parse(self).is_a?(URI::HTTP)
rescue URI::InvalidURIError
false
end
def fix_encoding_if_invalid!
unless valid_encoding?
encode!('utf-8', 'binary', invalid: :replace, undef: :replace)
end
force_encoding('utf-8')
self
end
def to_bool
(self =~ /^(true|t|yes|y|1)$/i) ? true : false
end
class << self
def clean_string(str)
return str if str.blank?
str.to_s.strip.strip_tags
end
def clean_url(url)
return url if url.blank?
url.strip!
(url =~ %r{^(http:/)|(https:/)|(ftp:/)}) ? url : "http://#{url}"
end
end
end
| 1 | 7,528 | That part that still leaves me mystified is how the String class, when populated with a value from the SQL_ASCII encoded database is set to "UTF-8" encoding. Since the database is SQL_ASCII, each byte in the string stored in the database is considered one character. Ruby, however, is using UTF-8, which is writing a series of one-byte code points that are only meaningful when considered in the correct encoding. When Ruby passes a string to Postgresql, the code points are essentially "flattened" into bytes as they are written to the DB. When Rails (through Ruby) asks for a string from the DB, where does the information about the encoding come from? | blackducksoftware-ohloh-ui | rb |
@@ -233,13 +233,14 @@ func PrecreatedDaoOption(dao *blockDAO) Option {
}
}
-// BoltDBDaoOption sets blockchain's dao with BoltDB from config.Chain.ChainDBPath
+// BoltDBDaoOption sets blockchain's dao with BoltDB from config.Chain.ChainDBPath and cfg.DB.IndexDBPath
func BoltDBDaoOption() Option {
return func(bc *blockchain, cfg config.Config) error {
cfg.DB.DbPath = cfg.Chain.ChainDBPath // TODO: remove this after moving TrieDBPath from cfg.Chain to cfg.DB
_, gateway := cfg.Plugins[config.GatewayPlugin]
bc.dao = newBlockDAO(
- db.NewBoltDB(cfg.DB),
+ db.NewBoltDB(cfg.DB, cfg.DB.DbPath),
+ db.NewBoltDB(cfg.DB, cfg.DB.IndexDBPath),
gateway && !cfg.Chain.EnableAsyncIndexWrite,
cfg.Chain.CompressBlock,
cfg.Chain.MaxCacheSize, | 1 | // Copyright (c) 2019 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package blockchain
import (
"context"
"math/big"
"os"
"strconv"
"sync"
"sync/atomic"
"time"
"github.com/facebookgo/clock"
"github.com/iotexproject/go-pkgs/bloom"
"github.com/iotexproject/go-pkgs/hash"
"github.com/iotexproject/iotex-address/address"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"go.uber.org/zap"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/action/protocol"
"github.com/iotexproject/iotex-core/action/protocol/account"
accountutil "github.com/iotexproject/iotex-core/action/protocol/account/util"
"github.com/iotexproject/iotex-core/action/protocol/execution/evm"
"github.com/iotexproject/iotex-core/action/protocol/poll"
"github.com/iotexproject/iotex-core/action/protocol/rewarding"
"github.com/iotexproject/iotex-core/action/protocol/rolldpos"
"github.com/iotexproject/iotex-core/actpool/actioniterator"
"github.com/iotexproject/iotex-core/blockchain/block"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/crypto"
"github.com/iotexproject/iotex-core/db"
"github.com/iotexproject/iotex-core/pkg/lifecycle"
"github.com/iotexproject/iotex-core/pkg/log"
"github.com/iotexproject/iotex-core/pkg/prometheustimer"
"github.com/iotexproject/iotex-core/pkg/util/byteutil"
"github.com/iotexproject/iotex-core/pkg/util/fileutil"
"github.com/iotexproject/iotex-core/state"
"github.com/iotexproject/iotex-core/state/factory"
)
var (
blockMtc = prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Name: "iotex_block_metrics",
Help: "Block metrics.",
},
[]string{"type"},
)
errDelegatesNotExist = errors.New("delegates cannot be found")
)
func init() {
prometheus.MustRegister(blockMtc)
}
// Blockchain represents the blockchain data structure and hosts the APIs to access it
type Blockchain interface {
lifecycle.StartStopper
// Balance returns balance of an account
Balance(addr string) (*big.Int, error)
// Nonce returns the nonce if the account exists
Nonce(addr string) (uint64, error)
// CreateState adds a new account with initial balance to the factory
CreateState(addr string, init *big.Int) (*state.Account, error)
// CandidatesByHeight returns the candidate list by a given height
CandidatesByHeight(height uint64) ([]*state.Candidate, error)
// ProductivityByEpoch returns the number of produced blocks per delegate in an epoch
ProductivityByEpoch(epochNum uint64) (uint64, map[string]uint64, error)
// For exposing blockchain states
// GetHeightByHash returns Block's height by hash
GetHeightByHash(h hash.Hash256) (uint64, error)
// GetHashByHeight returns Block's hash by height
GetHashByHeight(height uint64) (hash.Hash256, error)
// GetBlockByHeight returns Block by height
GetBlockByHeight(height uint64) (*block.Block, error)
// GetBlockByHash returns Block by hash
GetBlockByHash(h hash.Hash256) (*block.Block, error)
// BlockHeaderByHeight return block header by height
BlockHeaderByHeight(height uint64) (*block.Header, error)
// BlockHeaderByHash return block header by hash
BlockHeaderByHash(h hash.Hash256) (*block.Header, error)
// BlockFooterByHeight return block footer by height
BlockFooterByHeight(height uint64) (*block.Footer, error)
// BlockFooterByHash return block footer by hash
BlockFooterByHash(h hash.Hash256) (*block.Footer, error)
// GetTotalActions returns the total number of actions
GetTotalActions() (uint64, error)
// GetNumActions returns the number of actions
GetNumActions(height uint64) (uint64, error)
// GetTranferAmount returns the transfer amount
GetTranferAmount(height uint64) (*big.Int, error)
// GetReceiptByActionHash returns the receipt by action hash
GetReceiptByActionHash(h hash.Hash256) (*action.Receipt, error)
// GetActionsFromAddress returns actions from address
GetActionsFromAddress(address string) ([]hash.Hash256, error)
// GetActionsToAddress returns actions to address
GetActionsToAddress(address string) ([]hash.Hash256, error)
// GetActionCountByAddress returns action count by address
GetActionCountByAddress(address string) (uint64, error)
// GetActionByActionHash returns action by action hash
GetActionByActionHash(h hash.Hash256) (action.SealedEnvelope, error)
// GetBlockHashByActionHash returns Block hash by action hash
GetBlockHashByActionHash(h hash.Hash256) (hash.Hash256, error)
// GetReceiptsByHeight returns action receipts by block height
GetReceiptsByHeight(height uint64) ([]*action.Receipt, error)
// GetFactory returns the state factory
GetFactory() factory.Factory
// GetChainID returns the chain ID
ChainID() uint32
// ChainAddress returns chain address on parent chain, the root chain return empty.
ChainAddress() string
// TipHash returns tip block's hash
TipHash() hash.Hash256
// TipHeight returns tip block's height
TipHeight() uint64
// StateByAddr returns account of a given address
StateByAddr(address string) (*state.Account, error)
// RecoverChainAndState recovers the chain to target height and refresh state db if necessary
RecoverChainAndState(targetHeight uint64) error
// GenesisTimestamp returns the timestamp of genesis
GenesisTimestamp() int64
// For block operations
// MintNewBlock creates a new block with given actions
// Note: the coinbase transfer will be added to the given transfers when minting a new block
MintNewBlock(
actionMap map[string][]action.SealedEnvelope,
timestamp time.Time,
) (*block.Block, error)
// CommitBlock validates and appends a block to the chain
CommitBlock(blk *block.Block) error
// ValidateBlock validates a new block before adding it to the blockchain
ValidateBlock(blk *block.Block) error
// For action operations
// Validator returns the current validator object
Validator() Validator
// SetValidator sets the current validator object
SetValidator(val Validator)
// For smart contract operations
// ExecuteContractRead runs a read-only smart contract operation, this is done off the network since it does not
// cause any state change
ExecuteContractRead(caller address.Address, ex *action.Execution) ([]byte, *action.Receipt, error)
// AddSubscriber make you listen to every single produced block
AddSubscriber(BlockCreationSubscriber) error
// RemoveSubscriber make you listen to every single produced block
RemoveSubscriber(BlockCreationSubscriber) error
// GetActionHashFromIndex returns action hash from index
GetActionHashFromIndex(index uint64) (hash.Hash256, error)
}
// blockchain implements the Blockchain interface
type blockchain struct {
mu sync.RWMutex // mutex to protect utk, tipHeight and tipHash
dao *blockDAO
config config.Config
tipHeight uint64
tipHash hash.Hash256
validator Validator
lifecycle lifecycle.Lifecycle
clk clock.Clock
blocklistener []BlockCreationSubscriber
timerFactory *prometheustimer.TimerFactory
// used by account-based model
sf factory.Factory
registry *protocol.Registry
enableExperimentalActions bool
}
// Option sets blockchain construction parameter
type Option func(*blockchain, config.Config) error
// DefaultStateFactoryOption sets blockchain's sf from config
func DefaultStateFactoryOption() Option {
return func(bc *blockchain, cfg config.Config) (err error) {
if cfg.Chain.EnableTrielessStateDB {
bc.sf, err = factory.NewStateDB(cfg, factory.DefaultStateDBOption())
} else {
bc.sf, err = factory.NewFactory(cfg, factory.DefaultTrieOption())
}
if err != nil {
return errors.Wrapf(err, "Failed to create state factory")
}
return nil
}
}
// PrecreatedStateFactoryOption sets blockchain's state.Factory to sf
func PrecreatedStateFactoryOption(sf factory.Factory) Option {
return func(bc *blockchain, conf config.Config) error {
bc.sf = sf
return nil
}
}
// InMemStateFactoryOption sets blockchain's factory.Factory as in memory sf
func InMemStateFactoryOption() Option {
return func(bc *blockchain, cfg config.Config) error {
sf, err := factory.NewFactory(cfg, factory.InMemTrieOption())
if err != nil {
return errors.Wrapf(err, "Failed to create state factory")
}
bc.sf = sf
return nil
}
}
// PrecreatedDaoOption sets blockchain's dao
func PrecreatedDaoOption(dao *blockDAO) Option {
return func(bc *blockchain, conf config.Config) error {
bc.dao = dao
return nil
}
}
// BoltDBDaoOption sets blockchain's dao with BoltDB from config.Chain.ChainDBPath
func BoltDBDaoOption() Option {
return func(bc *blockchain, cfg config.Config) error {
cfg.DB.DbPath = cfg.Chain.ChainDBPath // TODO: remove this after moving TrieDBPath from cfg.Chain to cfg.DB
_, gateway := cfg.Plugins[config.GatewayPlugin]
bc.dao = newBlockDAO(
db.NewBoltDB(cfg.DB),
gateway && !cfg.Chain.EnableAsyncIndexWrite,
cfg.Chain.CompressBlock,
cfg.Chain.MaxCacheSize,
cfg.DB,
)
return nil
}
}
// InMemDaoOption sets blockchain's dao with MemKVStore
func InMemDaoOption() Option {
return func(bc *blockchain, cfg config.Config) error {
_, gateway := cfg.Plugins[config.GatewayPlugin]
bc.dao = newBlockDAO(
db.NewMemKVStore(),
gateway && !cfg.Chain.EnableAsyncIndexWrite,
cfg.Chain.CompressBlock,
cfg.Chain.MaxCacheSize,
cfg.DB,
)
return nil
}
}
// ClockOption overrides the default clock
func ClockOption(clk clock.Clock) Option {
return func(bc *blockchain, conf config.Config) error {
bc.clk = clk
return nil
}
}
// RegistryOption sets the blockchain with the protocol registry
func RegistryOption(registry *protocol.Registry) Option {
return func(bc *blockchain, conf config.Config) error {
bc.registry = registry
return nil
}
}
// EnableExperimentalActions enables the blockchain to process experimental actions
func EnableExperimentalActions() Option {
return func(bc *blockchain, conf config.Config) error {
bc.enableExperimentalActions = true
return nil
}
}
// NewBlockchain creates a new blockchain and DB instance
func NewBlockchain(cfg config.Config, opts ...Option) Blockchain {
// create the Blockchain
chain := &blockchain{
config: cfg,
clk: clock.New(),
}
for _, opt := range opts {
if err := opt(chain, cfg); err != nil {
log.S().Panicf("Failed to execute blockchain creation option %p: %v", opt, err)
}
}
timerFactory, err := prometheustimer.New(
"iotex_blockchain_perf",
"Performance of blockchain module",
[]string{"topic", "chainID"},
[]string{"default", strconv.FormatUint(uint64(cfg.Chain.ID), 10)},
)
if err != nil {
log.L().Panic("Failed to generate prometheus timer factory.", zap.Error(err))
}
chain.timerFactory = timerFactory
// Set block validator
if err != nil {
log.L().Panic("Failed to get block producer address.", zap.Error(err))
}
chain.validator = &validator{
sf: chain.sf,
validatorAddr: cfg.ProducerAddress().String(),
enableExperimentalActions: chain.enableExperimentalActions,
}
if chain.dao != nil {
chain.lifecycle.Add(chain.dao)
}
if chain.sf != nil {
chain.lifecycle.Add(chain.sf)
}
return chain
}
func (bc *blockchain) ChainID() uint32 {
return atomic.LoadUint32(&bc.config.Chain.ID)
}
func (bc *blockchain) ChainAddress() string {
return bc.config.Chain.Address
}
// Start starts the blockchain
func (bc *blockchain) Start(ctx context.Context) (err error) {
bc.mu.Lock()
defer bc.mu.Unlock()
if err = bc.lifecycle.OnStart(ctx); err != nil {
return err
}
// get blockchain tip height
if bc.tipHeight, err = bc.dao.getBlockchainHeight(); err != nil {
return err
}
if bc.tipHeight == 0 {
return bc.startEmptyBlockchain()
}
// get blockchain tip hash
if bc.tipHash, err = bc.dao.getBlockHash(bc.tipHeight); err != nil {
return err
}
return bc.startExistingBlockchain()
}
// Stop stops the blockchain.
func (bc *blockchain) Stop(ctx context.Context) error {
bc.mu.Lock()
defer bc.mu.Unlock()
return bc.lifecycle.OnStop(ctx)
}
// Balance returns balance of address
func (bc *blockchain) Balance(addr string) (*big.Int, error) {
return bc.sf.Balance(addr)
}
// Nonce returns the nonce if the account exists
func (bc *blockchain) Nonce(addr string) (uint64, error) {
return bc.sf.Nonce(addr)
}
// CandidatesByHeight returns the candidate list by a given height
func (bc *blockchain) CandidatesByHeight(height uint64) ([]*state.Candidate, error) {
return bc.candidatesByHeight(height)
}
// ProductivityByEpoch returns the map of the number of blocks produced per delegate in an epoch
func (bc *blockchain) ProductivityByEpoch(epochNum uint64) (uint64, map[string]uint64, error) {
p, ok := bc.registry.Find(rolldpos.ProtocolID)
if !ok {
return 0, nil, errors.New("rolldpos protocol is not registered")
}
rp, ok := p.(*rolldpos.Protocol)
if !ok {
return 0, nil, errors.New("fail to cast rolldpos protocol")
}
var isCurrentEpoch bool
currentEpochNum := rp.GetEpochNum(bc.tipHeight)
if epochNum > currentEpochNum {
return 0, nil, errors.New("epoch number is larger than current epoch number")
}
if epochNum == currentEpochNum {
isCurrentEpoch = true
}
epochStartHeight := rp.GetEpochHeight(epochNum)
var epochEndHeight uint64
if isCurrentEpoch {
epochEndHeight = bc.tipHeight
} else {
epochEndHeight = rp.GetEpochLastBlockHeight(epochNum)
}
numBlks := epochEndHeight - epochStartHeight + 1
p, ok = bc.registry.Find(poll.ProtocolID)
if !ok {
return 0, nil, errors.New("poll protocol is not registered")
}
ctx := protocol.WithRunActionsCtx(context.Background(), protocol.RunActionsCtx{
BlockHeight: bc.tipHeight,
Registry: bc.registry,
})
ws, err := bc.sf.NewWorkingSet()
if err != nil {
return 0, nil, err
}
s, err := p.ReadState(ctx, ws, []byte("ActiveBlockProducersByEpoch"),
byteutil.Uint64ToBytes(epochNum))
if err != nil {
return 0, nil, status.Error(codes.NotFound, err.Error())
}
var activeConsensusBlockProducers state.CandidateList
if err := activeConsensusBlockProducers.Deserialize(s); err != nil {
return 0, nil, err
}
produce := make(map[string]uint64)
for _, bp := range activeConsensusBlockProducers {
produce[bp.Address] = 0
}
for i := uint64(0); i < numBlks; i++ {
blk, err := bc.blockHeaderByHeight(epochStartHeight + i)
if err != nil {
return 0, nil, err
}
produce[blk.ProducerAddress()]++
}
return numBlks, produce, nil
}
// GetHeightByHash returns block's height by hash
func (bc *blockchain) GetHeightByHash(h hash.Hash256) (uint64, error) {
return bc.dao.getBlockHeight(h)
}
// GetHashByHeight returns block's hash by height
func (bc *blockchain) GetHashByHeight(height uint64) (hash.Hash256, error) {
return bc.dao.getBlockHash(height)
}
// GetBlockByHeight returns block from the blockchain hash by height
func (bc *blockchain) GetBlockByHeight(height uint64) (*block.Block, error) {
blk, err := bc.getBlockByHeight(height)
if blk == nil || err != nil {
return blk, err
}
blk.HeaderLogger(log.L()).Debug("Get block.")
return blk, err
}
// GetBlockByHash returns block from the blockchain hash by hash
func (bc *blockchain) GetBlockByHash(h hash.Hash256) (*block.Block, error) {
return bc.dao.getBlock(h)
}
func (bc *blockchain) BlockHeaderByHeight(height uint64) (*block.Header, error) {
return bc.blockHeaderByHeight(height)
}
func (bc *blockchain) BlockHeaderByHash(h hash.Hash256) (*block.Header, error) {
return bc.dao.Header(h)
}
func (bc *blockchain) BlockFooterByHeight(height uint64) (*block.Footer, error) {
return bc.blockFooterByHeight(height)
}
func (bc *blockchain) BlockFooterByHash(h hash.Hash256) (*block.Footer, error) {
return bc.dao.Footer(h)
}
// GetTotalActions returns the total number of actions
func (bc *blockchain) GetTotalActions() (uint64, error) {
return bc.dao.getTotalActions()
}
// GetNumActions returns the number of actions
func (bc *blockchain) GetNumActions(height uint64) (uint64, error) {
return bc.dao.getNumActions(height)
}
// GetTranferAmount returns the transfer amount
func (bc *blockchain) GetTranferAmount(height uint64) (*big.Int, error) {
return bc.dao.getTranferAmount(height)
}
// GetReceiptByActionHash returns the receipt by action hash
func (bc *blockchain) GetReceiptByActionHash(h hash.Hash256) (*action.Receipt, error) {
return bc.dao.getReceiptByActionHash(h)
}
// GetActionsFromAddress returns actions from address
func (bc *blockchain) GetActionsFromAddress(addrStr string) ([]hash.Hash256, error) {
addr, err := address.FromString(addrStr)
if err != nil {
return nil, err
}
return getActionsBySenderAddress(bc.dao.kvstore, hash.BytesToHash160(addr.Bytes()))
}
// GetActionsFromIndex returns actions from index
func (bc *blockchain) GetActionHashFromIndex(index uint64) (hash.Hash256, error) {
return bc.dao.getActionHashFromIndex(index)
}
// GetActionToAddress returns action to address
func (bc *blockchain) GetActionsToAddress(addrStr string) ([]hash.Hash256, error) {
addr, err := address.FromString(addrStr)
if err != nil {
return nil, err
}
return getActionsByRecipientAddress(bc.dao.kvstore, hash.BytesToHash160(addr.Bytes()))
}
// GetActionCountByAddress returns action count by address
func (bc *blockchain) GetActionCountByAddress(addrStr string) (uint64, error) {
addr, err := address.FromString(addrStr)
if err != nil {
return 0, err
}
fromCount, err := getActionCountBySenderAddress(bc.dao.kvstore, hash.BytesToHash160(addr.Bytes()))
if err != nil {
return 0, err
}
toCount, err := getActionCountByRecipientAddress(bc.dao.kvstore, hash.BytesToHash160(addr.Bytes()))
if err != nil {
return 0, err
}
return fromCount + toCount, nil
}
func (bc *blockchain) getActionByActionHashHelper(h hash.Hash256) (hash.Hash256, error) {
return getBlockHashByActionHash(bc.dao.kvstore, h)
}
// GetActionByActionHash returns action by action hash
func (bc *blockchain) GetActionByActionHash(h hash.Hash256) (action.SealedEnvelope, error) {
blkHash, err := bc.getActionByActionHashHelper(h)
if err != nil {
return action.SealedEnvelope{}, err
}
blk, err := bc.dao.getBlock(blkHash)
if err != nil {
return action.SealedEnvelope{}, err
}
for _, act := range blk.Actions {
if act.Hash() == h {
return act, nil
}
}
return action.SealedEnvelope{}, errors.Errorf("block %x does not have transfer %x", blkHash, h)
}
// GetBlockHashByActionHash returns Block hash by action hash
func (bc *blockchain) GetBlockHashByActionHash(h hash.Hash256) (hash.Hash256, error) {
return getBlockHashByActionHash(bc.dao.kvstore, h)
}
// GetReceiptsByHeight returns action receipts by block height
func (bc *blockchain) GetReceiptsByHeight(height uint64) ([]*action.Receipt, error) {
return bc.dao.getReceipts(height)
}
// GetFactory returns the state factory
func (bc *blockchain) GetFactory() factory.Factory {
return bc.sf
}
// TipHash returns tip block's hash
func (bc *blockchain) TipHash() hash.Hash256 {
bc.mu.RLock()
defer bc.mu.RUnlock()
return bc.tipHash
}
// TipHeight returns tip block's height
func (bc *blockchain) TipHeight() uint64 {
return atomic.LoadUint64(&bc.tipHeight)
}
// ValidateBlock validates a new block before adding it to the blockchain
func (bc *blockchain) ValidateBlock(blk *block.Block) error {
bc.mu.RLock()
defer bc.mu.RUnlock()
timer := bc.timerFactory.NewTimer("ValidateBlock")
defer timer.End()
return bc.validateBlock(blk)
}
func (bc *blockchain) MintNewBlock(
actionMap map[string][]action.SealedEnvelope,
timestamp time.Time,
) (*block.Block, error) {
bc.mu.RLock()
defer bc.mu.RUnlock()
mintNewBlockTimer := bc.timerFactory.NewTimer("MintNewBlock")
defer mintNewBlockTimer.End()
newblockHeight := bc.tipHeight + 1
// run execution and update state trie root hash
ws, err := bc.sf.NewWorkingSet()
if err != nil {
return nil, errors.Wrap(err, "Failed to obtain working set from state factory")
}
gasLimitForContext := bc.config.Genesis.BlockGasLimit
ctx := protocol.WithRunActionsCtx(context.Background(),
protocol.RunActionsCtx{
BlockHeight: newblockHeight,
BlockTimeStamp: timestamp,
Producer: bc.config.ProducerAddress(),
GasLimit: gasLimitForContext,
Registry: bc.registry,
})
if newblockHeight == bc.config.Genesis.AleutianBlockHeight {
if err := bc.updateAleutianEpochRewardAmount(ctx, ws); err != nil {
return nil, err
}
}
_, rc, actions, err := bc.pickAndRunActions(ctx, actionMap, ws)
if err != nil {
return nil, errors.Wrapf(err, "Failed to update state changes in new block %d", newblockHeight)
}
blockMtc.WithLabelValues("numActions").Set(float64(len(actions)))
sk := bc.config.ProducerPrivateKey()
ra := block.NewRunnableActionsBuilder().
SetHeight(newblockHeight).
SetTimeStamp(timestamp).
AddActions(actions...).
Build(sk.PublicKey())
prevBlkHash := bc.tipHash
// The first block's previous block hash is pointing to the digest of genesis config. This is to guarantee all nodes
// could verify that they start from the same genesis
if newblockHeight == 1 {
prevBlkHash = bc.config.Genesis.Hash()
}
blk, err := block.NewBuilder(ra).
SetPrevBlockHash(prevBlkHash).
SetDeltaStateDigest(ws.Digest()).
SetReceipts(rc).
SetReceiptRoot(calculateReceiptRoot(rc)).
SetLogsBloom(calculateLogsBloom(bc.config, newblockHeight, rc)).
SignAndBuild(sk)
if err != nil {
return nil, errors.Wrapf(err, "failed to create block")
}
blk.WorkingSet = ws
return &blk, nil
}
// CommitBlock validates and appends a block to the chain
func (bc *blockchain) CommitBlock(blk *block.Block) error {
bc.mu.Lock()
defer bc.mu.Unlock()
timer := bc.timerFactory.NewTimer("CommitBlock")
defer timer.End()
return bc.commitBlock(blk)
}
// StateByAddr returns the account of an address
func (bc *blockchain) StateByAddr(address string) (*state.Account, error) {
if bc.sf != nil {
s, err := bc.sf.AccountState(address)
if err != nil {
log.L().Warn("Failed to get account.", zap.String("address", address), zap.Error(err))
return nil, err
}
return s, nil
}
return nil, errors.New("state factory is nil")
}
// SetValidator sets the current validator object
func (bc *blockchain) SetValidator(val Validator) {
bc.mu.Lock()
defer bc.mu.Unlock()
bc.validator = val
}
// Validator gets the current validator object
func (bc *blockchain) Validator() Validator {
bc.mu.RLock()
defer bc.mu.RUnlock()
return bc.validator
}
func (bc *blockchain) AddSubscriber(s BlockCreationSubscriber) error {
bc.mu.Lock()
defer bc.mu.Unlock()
log.L().Info("Add a subscriber.")
if s == nil {
return errors.New("subscriber could not be nil")
}
bc.blocklistener = append(bc.blocklistener, s)
return nil
}
func (bc *blockchain) RemoveSubscriber(s BlockCreationSubscriber) error {
bc.mu.Lock()
defer bc.mu.Unlock()
for i, sub := range bc.blocklistener {
if sub == s {
bc.blocklistener = append(bc.blocklistener[:i], bc.blocklistener[i+1:]...)
log.L().Info("Successfully unsubscribe block creation.")
return nil
}
}
return errors.New("cannot find subscription")
}
//======================================
// internal functions
//=====================================
// ExecuteContractRead runs a read-only smart contract operation, this is done off the network since it does not
// cause any state change
func (bc *blockchain) ExecuteContractRead(caller address.Address, ex *action.Execution) ([]byte, *action.Receipt, error) {
// use latest block as carrier to run the offline execution
// the block itself is not used
h := bc.TipHeight()
header, err := bc.BlockHeaderByHeight(h)
if err != nil {
return nil, nil, errors.Wrap(err, "failed to get block in ExecuteContractRead")
}
ws, err := bc.sf.NewWorkingSet()
if err != nil {
return nil, nil, errors.Wrap(err, "failed to obtain working set from state factory")
}
producer, err := address.FromString(header.ProducerAddress())
if err != nil {
return nil, nil, err
}
gasLimit := bc.config.Genesis.BlockGasLimit
ctx := protocol.WithRunActionsCtx(context.Background(), protocol.RunActionsCtx{
BlockHeight: header.Height(),
BlockTimeStamp: header.Timestamp(),
Producer: producer,
Caller: caller,
GasLimit: gasLimit,
GasPrice: big.NewInt(0),
IntrinsicGas: 0,
})
return evm.ExecuteContract(
ctx,
ws,
ex,
bc,
config.NewHeightUpgrade(bc.config),
)
}
// CreateState adds a new account with initial balance to the factory
func (bc *blockchain) CreateState(addr string, init *big.Int) (*state.Account, error) {
if bc.sf == nil {
return nil, errors.New("empty state factory")
}
ws, err := bc.sf.NewWorkingSet()
if err != nil {
return nil, errors.Wrapf(err, "failed to create clean working set")
}
account, err := accountutil.LoadOrCreateAccount(ws, addr, init)
if err != nil {
return nil, errors.Wrapf(err, "failed to create new account %s", addr)
}
gasLimit := bc.config.Genesis.BlockGasLimit
callerAddr, err := address.FromString(addr)
if err != nil {
return nil, err
}
ctx := protocol.WithRunActionsCtx(context.Background(),
protocol.RunActionsCtx{
GasLimit: gasLimit,
Caller: callerAddr,
ActionHash: hash.ZeroHash256,
Nonce: 0,
Registry: bc.registry,
})
if _, err = ws.RunActions(ctx, 0, nil); err != nil {
return nil, errors.Wrap(err, "failed to run the account creation")
}
if err = bc.sf.Commit(ws); err != nil {
return nil, errors.Wrap(err, "failed to commit the account creation")
}
return account, nil
}
// RecoverChainAndState recovers the chain to target height and refresh state db if necessary
func (bc *blockchain) RecoverChainAndState(targetHeight uint64) error {
var buildStateFromScratch bool
stateHeight, err := bc.sf.Height()
if err != nil {
buildStateFromScratch = true
}
if targetHeight > 0 {
if err := bc.recoverToHeight(targetHeight); err != nil {
return errors.Wrapf(err, "failed to recover blockchain to target height %d", targetHeight)
}
if stateHeight > bc.tipHeight {
buildStateFromScratch = true
}
}
if buildStateFromScratch {
return bc.refreshStateDB()
}
return nil
}
func (bc *blockchain) GenesisTimestamp() int64 {
return bc.config.Genesis.Timestamp
}
//======================================
// private functions
//=====================================
func (bc *blockchain) protocol(id string) (protocol.Protocol, bool) {
if bc.registry == nil {
return nil, false
}
return bc.registry.Find(id)
}
func (bc *blockchain) mustGetRollDPoSProtocol() *rolldpos.Protocol {
p, ok := bc.protocol(rolldpos.ProtocolID)
if !ok {
log.L().Panic("protocol rolldpos has not been registered")
}
rp, ok := p.(*rolldpos.Protocol)
if !ok {
log.L().Panic("failed to cast to rolldpos protocol")
}
return rp
}
func (bc *blockchain) candidatesByHeight(height uint64) (state.CandidateList, error) {
if bc.config.Genesis.EnableGravityChainVoting {
rp := bc.mustGetRollDPoSProtocol()
return bc.sf.CandidatesByHeight(rp.GetEpochHeight(rp.GetEpochNum(height)))
}
for {
candidates, err := bc.sf.CandidatesByHeight(height)
if err == nil {
return candidates, nil
}
if height == 0 {
return nil, err
}
height--
}
}
func (bc *blockchain) getBlockByHeight(height uint64) (*block.Block, error) {
hash, err := bc.dao.getBlockHash(height)
if err != nil {
return nil, err
}
return bc.dao.getBlock(hash)
}
func (bc *blockchain) blockHeaderByHeight(height uint64) (*block.Header, error) {
hash, err := bc.dao.getBlockHash(height)
if err != nil {
return nil, err
}
return bc.dao.Header(hash)
}
func (bc *blockchain) blockFooterByHeight(height uint64) (*block.Footer, error) {
hash, err := bc.dao.getBlockHash(height)
if err != nil {
return nil, err
}
return bc.dao.Footer(hash)
}
func (bc *blockchain) startEmptyBlockchain() error {
var ws factory.WorkingSet
var err error
if ws, err = bc.sf.NewWorkingSet(); err != nil {
return errors.Wrap(err, "failed to obtain working set from state factory")
}
if !bc.config.Chain.EmptyGenesis {
// Initialize the states before any actions happen on the blockchain
if err := bc.createGenesisStates(ws); err != nil {
return err
}
_ = ws.UpdateBlockLevelInfo(0)
}
// add Genesis states
if err := bc.sf.Commit(ws); err != nil {
return errors.Wrap(err, "failed to commit Genesis states")
}
return nil
}
func (bc *blockchain) startExistingBlockchain() error {
if bc.sf == nil {
return errors.New("statefactory cannot be nil")
}
stateHeight, err := bc.sf.Height()
if err != nil {
return err
}
if stateHeight > bc.tipHeight {
return errors.New("factory is higher than blockchain")
}
for i := stateHeight + 1; i <= bc.tipHeight; i++ {
blk, err := bc.getBlockByHeight(i)
if err != nil {
return err
}
ws, err := bc.sf.NewWorkingSet()
if err != nil {
return errors.Wrap(err, "failed to obtain working set from state factory")
}
if _, err := bc.runActions(blk.RunnableActions(), ws); err != nil {
return err
}
if err := bc.sf.Commit(ws); err != nil {
return err
}
}
stateHeight, err = bc.sf.Height()
if err != nil {
return errors.Wrap(err, "failed to get factory's height")
}
log.L().Info("Restarting blockchain.",
zap.Uint64("chainHeight",
bc.tipHeight),
zap.Uint64("factoryHeight", stateHeight))
return nil
}
func (bc *blockchain) validateBlock(blk *block.Block) error {
validateTimer := bc.timerFactory.NewTimer("validate")
prevBlkHash := bc.tipHash
if blk.Height() == 1 {
prevBlkHash = bc.config.Genesis.Hash()
}
err := bc.validator.Validate(blk, bc.tipHeight, prevBlkHash)
validateTimer.End()
if err != nil {
return errors.Wrapf(err, "error when validating block %d", blk.Height())
}
// run actions and update state factory
ws, err := bc.sf.NewWorkingSet()
if err != nil {
return errors.Wrap(err, "Failed to obtain working set from state factory")
}
runTimer := bc.timerFactory.NewTimer("runActions")
receipts, err := bc.runActions(blk.RunnableActions(), ws)
runTimer.End()
if err != nil {
log.L().Panic("Failed to update state.", zap.Uint64("tipHeight", bc.tipHeight), zap.Error(err))
}
if err = blk.VerifyDeltaStateDigest(ws.Digest()); err != nil {
return err
}
if err = blk.VerifyReceiptRoot(calculateReceiptRoot(receipts)); err != nil {
return errors.Wrap(err, "Failed to verify receipt root")
}
blk.Receipts = receipts
// attach working set to be committed to state factory
blk.WorkingSet = ws
return nil
}
// commitBlock commits a block to the chain
func (bc *blockchain) commitBlock(blk *block.Block) error {
// Check if it is already exists, and return earlier
blkHash, err := bc.dao.getBlockHash(blk.Height())
if blkHash != hash.ZeroHash256 {
log.L().Debug("Block already exists.", zap.Uint64("height", blk.Height()))
return nil
}
// If it's a ready db io error, return earlier with the error
if errors.Cause(err) != db.ErrNotExist {
return err
}
// write block into DB
putTimer := bc.timerFactory.NewTimer("putBlock")
err = bc.dao.putBlock(blk)
putTimer.End()
if err != nil {
return err
}
// update tip hash and height
atomic.StoreUint64(&bc.tipHeight, blk.Height())
bc.tipHash = blk.HashBlock()
if bc.sf != nil {
sfTimer := bc.timerFactory.NewTimer("sf.Commit")
err := bc.sf.Commit(blk.WorkingSet)
sfTimer.End()
// detach working set so it can be freed by GC
blk.WorkingSet = nil
if err != nil {
log.L().Panic("Error when committing states.", zap.Error(err))
}
// write smart contract receipt into DB
receiptTimer := bc.timerFactory.NewTimer("putReceipt")
err = bc.dao.putReceipts(blk.Height(), blk.Receipts)
receiptTimer.End()
if err != nil {
return errors.Wrapf(err, "failed to put smart contract receipts into DB on height %d", blk.Height())
}
}
blk.HeaderLogger(log.L()).Info("Committed a block.", log.Hex("tipHash", bc.tipHash[:]))
// emit block to all block subscribers
bc.emitToSubscribers(blk)
return nil
}
func (bc *blockchain) runActions(
acts block.RunnableActions,
ws factory.WorkingSet,
) ([]*action.Receipt, error) {
if bc.sf == nil {
return nil, errors.New("statefactory cannot be nil")
}
gasLimit := bc.config.Genesis.BlockGasLimit
// update state factory
producer, err := address.FromBytes(acts.BlockProducerPubKey().Hash())
if err != nil {
return nil, err
}
ctx := protocol.WithRunActionsCtx(context.Background(),
protocol.RunActionsCtx{
BlockHeight: acts.BlockHeight(),
BlockTimeStamp: acts.BlockTimeStamp(),
Producer: producer,
GasLimit: gasLimit,
Registry: bc.registry,
})
if acts.BlockHeight() == bc.config.Genesis.AleutianBlockHeight {
if err := bc.updateAleutianEpochRewardAmount(ctx, ws); err != nil {
return nil, err
}
}
return ws.RunActions(ctx, acts.BlockHeight(), acts.Actions())
}
func (bc *blockchain) pickAndRunActions(ctx context.Context, actionMap map[string][]action.SealedEnvelope,
ws factory.WorkingSet) (hash.Hash256, []*action.Receipt, []action.SealedEnvelope, error) {
if bc.sf == nil {
return hash.ZeroHash256, nil, nil, errors.New("statefactory cannot be nil")
}
receipts := make([]*action.Receipt, 0)
executedActions := make([]action.SealedEnvelope, 0)
raCtx := protocol.MustGetRunActionsCtx(ctx)
// initial action iterator
actionIterator := actioniterator.NewActionIterator(actionMap)
for {
nextAction, ok := actionIterator.Next()
if !ok {
break
}
receipt, err := ws.RunAction(raCtx, nextAction)
if err != nil {
if errors.Cause(err) == action.ErrHitGasLimit {
// hit block gas limit, we should not process actions belong to this user anymore since we
// need monotonically increasing nounce. But we can continue processing other actions
// that belong other users
actionIterator.PopAccount()
continue
}
return hash.ZeroHash256, nil, nil, errors.Wrapf(err, "Failed to update state changes for selp %x", nextAction.Hash())
}
if receipt != nil {
raCtx.GasLimit -= receipt.GasConsumed
receipts = append(receipts, receipt)
}
executedActions = append(executedActions, nextAction)
// To prevent loop all actions in act_pool, we stop processing action when remaining gas is below
// than certain threshold
if raCtx.GasLimit < bc.config.Chain.AllowedBlockGasResidue {
break
}
}
var lastBlkHeight uint64
if bc.config.Consensus.Scheme == config.RollDPoSScheme {
rp := bc.mustGetRollDPoSProtocol()
epochNum := rp.GetEpochNum(raCtx.BlockHeight)
lastBlkHeight = rp.GetEpochLastBlockHeight(epochNum)
// generate delegates for next round
skip, putPollResult, err := bc.createPutPollResultAction(raCtx.BlockHeight)
switch errors.Cause(err) {
case nil:
if !skip {
receipt, err := ws.RunAction(raCtx, putPollResult)
if err != nil {
return hash.ZeroHash256, nil, nil, err
}
if receipt != nil {
receipts = append(receipts, receipt)
}
executedActions = append(executedActions, putPollResult)
}
case errDelegatesNotExist:
if raCtx.BlockHeight == lastBlkHeight {
// TODO (zhi): if some bp by pass this condition, we need to reject block in validation step
return hash.ZeroHash256, nil, nil, errors.Wrapf(
err,
"failed to prepare delegates for next epoch %d",
epochNum+1,
)
}
default:
return hash.ZeroHash256, nil, nil, err
}
}
// Process grant block reward action
grant, err := bc.createGrantRewardAction(action.BlockReward, raCtx.BlockHeight)
if err != nil {
return hash.ZeroHash256, nil, nil, err
}
receipt, err := ws.RunAction(raCtx, grant)
if err != nil {
return hash.ZeroHash256, nil, nil, err
}
if receipt != nil {
receipts = append(receipts, receipt)
}
executedActions = append(executedActions, grant)
// Process grant epoch reward action if the block is the last one in an epoch
if raCtx.BlockHeight == lastBlkHeight {
grant, err := bc.createGrantRewardAction(action.EpochReward, raCtx.BlockHeight)
if err != nil {
return hash.ZeroHash256, nil, nil, err
}
receipt, err := ws.RunAction(raCtx, grant)
if err != nil {
return hash.ZeroHash256, nil, nil, err
}
if receipt != nil {
receipts = append(receipts, receipt)
}
executedActions = append(executedActions, grant)
}
blockMtc.WithLabelValues("gasConsumed").Set(float64(bc.config.Genesis.BlockGasLimit - raCtx.GasLimit))
return ws.UpdateBlockLevelInfo(raCtx.BlockHeight), receipts, executedActions, nil
}
func (bc *blockchain) createPutPollResultAction(height uint64) (skip bool, se action.SealedEnvelope, err error) {
skip = true
if !bc.config.Genesis.EnableGravityChainVoting {
return
}
pl, ok := bc.protocol(poll.ProtocolID)
if !ok {
log.L().Panic("protocol poll has not been registered")
}
pp, ok := pl.(poll.Protocol)
if !ok {
log.L().Panic("Failed to cast to poll.Protocol")
}
rp := bc.mustGetRollDPoSProtocol()
epochNum := rp.GetEpochNum(height)
epochHeight := rp.GetEpochHeight(epochNum)
nextEpochHeight := rp.GetEpochHeight(epochNum + 1)
if height < epochHeight+(nextEpochHeight-epochHeight)/2 {
return
}
log.L().Debug(
"createPutPollResultAction",
zap.Uint64("height", height),
zap.Uint64("epochNum", epochNum),
zap.Uint64("epochHeight", epochHeight),
zap.Uint64("nextEpochHeight", nextEpochHeight),
)
_, err = bc.candidatesByHeight(nextEpochHeight)
switch errors.Cause(err) {
case nil:
return
case state.ErrStateNotExist:
skip = false
default:
return
}
l, err := pp.DelegatesByHeight(epochHeight)
switch errors.Cause(err) {
case nil:
if len(l) == 0 {
err = errors.Wrapf(
errDelegatesNotExist,
"failed to fetch delegates by epoch height %d, empty list",
epochHeight,
)
return
}
case db.ErrNotExist:
err = errors.Wrapf(
errDelegatesNotExist,
"failed to fetch delegates by epoch height %d, original error %v",
epochHeight,
err,
)
return
default:
return
}
sk := bc.config.ProducerPrivateKey()
nonce := uint64(0)
pollAction := action.NewPutPollResult(nonce, nextEpochHeight, l)
builder := action.EnvelopeBuilder{}
se, err = action.Sign(builder.SetNonce(nonce).SetAction(pollAction).Build(), sk)
return skip, se, err
}
func (bc *blockchain) emitToSubscribers(blk *block.Block) {
if bc.blocklistener == nil {
return
}
for _, s := range bc.blocklistener {
go func(bcs BlockCreationSubscriber, b *block.Block) {
if err := bcs.HandleBlock(b); err != nil {
log.L().Error("Failed to handle new block.", zap.Error(err))
}
}(s, blk)
}
}
// RecoverToHeight recovers the blockchain to target height
func (bc *blockchain) recoverToHeight(targetHeight uint64) error {
for bc.tipHeight > targetHeight {
if err := bc.dao.deleteTipBlock(); err != nil {
return err
}
bc.tipHeight--
}
return nil
}
// RefreshStateDB deletes the existing state DB and creates a new one with state changes from genesis block
func (bc *blockchain) refreshStateDB() error {
// Delete existing state DB and reinitialize it
if fileutil.FileExists(bc.config.Chain.TrieDBPath) && os.Remove(bc.config.Chain.TrieDBPath) != nil {
return errors.New("failed to delete existing state DB")
}
if err := DefaultStateFactoryOption()(bc, bc.config); err != nil {
return errors.Wrap(err, "failed to reinitialize state DB")
}
for _, p := range bc.registry.All() {
bc.sf.AddActionHandlers(p)
}
if err := bc.sf.Start(context.Background()); err != nil {
return errors.Wrap(err, "failed to start state factory")
}
if err := bc.startEmptyBlockchain(); err != nil {
return err
}
if err := bc.sf.Stop(context.Background()); err != nil {
return errors.Wrap(err, "failed to stop state factory")
}
return nil
}
func (bc *blockchain) createGrantRewardAction(rewardType int, height uint64) (action.SealedEnvelope, error) {
gb := action.GrantRewardBuilder{}
grant := gb.SetRewardType(rewardType).SetHeight(height).Build()
eb := action.EnvelopeBuilder{}
envelope := eb.SetNonce(0).
SetGasPrice(big.NewInt(0)).
SetGasLimit(grant.GasLimit()).
SetAction(&grant).
Build()
sk := bc.config.ProducerPrivateKey()
return action.Sign(envelope, sk)
}
func (bc *blockchain) createGenesisStates(ws factory.WorkingSet) error {
if bc.registry == nil {
// TODO: return nil to avoid test cases to blame on missing rewarding protocol
return nil
}
ctx := protocol.WithRunActionsCtx(context.Background(), protocol.RunActionsCtx{
BlockHeight: 0,
BlockTimeStamp: time.Unix(bc.config.Genesis.Timestamp, 0),
GasLimit: 0,
Producer: nil,
Caller: nil,
ActionHash: hash.ZeroHash256,
Nonce: 0,
Registry: bc.registry,
})
if err := bc.createAccountGenesisStates(ctx, ws); err != nil {
return err
}
if bc.config.Consensus.Scheme == config.RollDPoSScheme {
if err := bc.createPollGenesisStates(ctx, ws); err != nil {
return err
}
}
return bc.createRewardingGenesisStates(ctx, ws)
}
func (bc *blockchain) createAccountGenesisStates(ctx context.Context, ws factory.WorkingSet) error {
p, ok := bc.registry.Find(account.ProtocolID)
if !ok {
return nil
}
ap, ok := p.(*account.Protocol)
if !ok {
return errors.Errorf("error when casting protocol")
}
addrs, balances := bc.config.Genesis.InitBalances()
return ap.Initialize(ctx, ws, addrs, balances)
}
func (bc *blockchain) createRewardingGenesisStates(ctx context.Context, ws factory.WorkingSet) error {
p, ok := bc.registry.Find(rewarding.ProtocolID)
if !ok {
return nil
}
rp, ok := p.(*rewarding.Protocol)
if !ok {
return errors.Errorf("error when casting protocol")
}
return rp.Initialize(
ctx,
ws,
bc.config.Genesis.InitBalance(),
bc.config.Genesis.BlockReward(),
bc.config.Genesis.EpochReward(),
bc.config.Genesis.NumDelegatesForEpochReward,
bc.config.Genesis.ExemptAddrsFromEpochReward(),
bc.config.Genesis.FoundationBonus(),
bc.config.Genesis.NumDelegatesForFoundationBonus,
bc.config.Genesis.FoundationBonusLastEpoch,
bc.config.Genesis.ProductivityThreshold,
)
}
func (bc *blockchain) createPollGenesisStates(ctx context.Context, ws factory.WorkingSet) error {
if bc.config.Genesis.EnableGravityChainVoting {
p, ok := bc.protocol(poll.ProtocolID)
if !ok {
return errors.Errorf("protocol %s is not found", poll.ProtocolID)
}
pp, ok := p.(poll.Protocol)
if !ok {
return errors.Errorf("error when casting poll protocol")
}
return pp.Initialize(
ctx,
ws,
)
}
return nil
}
func (bc *blockchain) updateAleutianEpochRewardAmount(ctx context.Context, ws factory.WorkingSet) error {
p, ok := bc.registry.Find(rewarding.ProtocolID)
if !ok {
return nil
}
rp, ok := p.(*rewarding.Protocol)
if !ok {
return errors.Errorf("error when casting protocol")
}
return rp.SetReward(ctx, ws, bc.config.Genesis.AleutianEpochReward(), false)
}
func calculateReceiptRoot(receipts []*action.Receipt) hash.Hash256 {
if len(receipts) == 0 {
return hash.ZeroHash256
}
h := make([]hash.Hash256, 0, len(receipts))
for _, receipt := range receipts {
h = append(h, receipt.Hash())
}
res := crypto.NewMerkleTree(h).HashTree()
return res
}
func calculateLogsBloom(cfg config.Config, height uint64, receipts []*action.Receipt) bloom.BloomFilter {
if height < cfg.Genesis.AleutianBlockHeight {
return nil
}
bloom, _ := bloom.NewBloomFilter(2048, 3)
for _, receipt := range receipts {
for _, log := range receipt.Logs {
for _, topic := range log.Topics {
bloom.Add(topic[:])
}
}
}
return bloom
}
| 1 | 18,823 | Is it a doable way and does it make sense. | iotexproject-iotex-core | go |
@@ -95,7 +95,10 @@ def verify_limit_range(limit):
max_query_limit = constants.MAX_QUERY_SIZE
if limit > max_query_limit:
LOG.warning('Query limit %d was larger than max query limit %d, '
- 'setting limit to %d', limit, max_query_limit, max)
+ 'setting limit to %d',
+ limit,
+ max_query_limit,
+ max_query_limit)
limit = max_query_limit
return limit
| 1 | # -------------------------------------------------------------------------
# The CodeChecker Infrastructure
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
# -------------------------------------------------------------------------
"""
Handle Thrift requests.
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import base64
import codecs
from collections import defaultdict
from datetime import datetime, timedelta
import io
import os
import re
import shlex
import tempfile
import zipfile
import zlib
import sqlalchemy
from sqlalchemy.sql.expression import or_, and_, not_, func, \
asc, desc, text, union_all, select, bindparam, literal_column, cast
import codechecker_api_shared
from codeCheckerDBAccess_v6 import constants, ttypes
from codeCheckerDBAccess_v6.ttypes import BugPathPos, CheckerCount, \
CommentData, DiffType, Encoding, RunHistoryData, Order, ReportData, \
ReportDetails, ReviewData, RunData, RunFilter, RunReportCount, \
RunSortType, RunTagCount, SourceComponentData, SourceFileData, SortMode, \
SortType
from codechecker_common import plist_parser, skiplist_handler
from codechecker_common.source_code_comment_handler import \
SourceCodeCommentHandler, SKIP_REVIEW_STATUSES
from codechecker_common import util
from codechecker_common.logger import get_logger
from codechecker_common.report import get_report_path_hash
from codechecker_web.shared import webserver_context
from codechecker_server.profiler import timeit
from .. import permissions
from ..database import db_cleanup
from ..database.config_db_model import Product
from ..database.database import conv
from ..database.run_db_model import \
AnalyzerStatistic, Report, ReviewStatus, File, Run, RunHistory, \
RunLock, Comment, BugPathEvent, BugReportPoint, \
FileContent, SourceComponent, ExtendedReportData
from ..tmp import TemporaryDirectory
from .db import DBSession, escape_like
from .thrift_enum_helper import detection_status_enum, \
detection_status_str, review_status_enum, review_status_str, \
report_extended_data_type_enum
from . import store_handler
LOG = get_logger('server')
class CommentKindValue(object):
USER = 0
SYSTEM = 1
def comment_kind_from_thrift_type(kind):
""" Convert the given comment kind from Thrift type to Python enum. """
if kind == ttypes.CommentKind.USER:
return CommentKindValue.USER
elif kind == ttypes.CommentKind.SYSTEM:
return CommentKindValue.SYSTEM
def comment_kind_to_thrift_type(kind):
""" Convert the given comment kind from Python enum to Thrift type. """
if kind == CommentKindValue.USER:
return ttypes.CommentKind.USER
elif kind == CommentKindValue.SYSTEM:
return ttypes.CommentKind.SYSTEM
def verify_limit_range(limit):
"""Verify limit value for the queries.
Query limit should not be larger than the max allowed value.
Max is returned if the value is larger than max.
"""
max_query_limit = constants.MAX_QUERY_SIZE
if limit > max_query_limit:
LOG.warning('Query limit %d was larger than max query limit %d, '
'setting limit to %d', limit, max_query_limit, max)
limit = max_query_limit
return limit
def slugify(text):
"""
Removes and replaces special characters in a given text.
"""
# Removes non-alpha characters.
norm_text = re.sub(r'[^\w\s\-/]', '', text)
# Converts spaces and slashes to underscores.
norm_text = re.sub(r'([\s]+|[/]+)', '_', norm_text)
return norm_text
def exc_to_thrift_reqfail(func):
"""
Convert internal exceptions to RequestFailed exception
which can be sent back on the thrift connections.
"""
func_name = func.__name__
def wrapper(*args, **kwargs):
try:
res = func(*args, **kwargs)
return res
except sqlalchemy.exc.SQLAlchemyError as alchemy_ex:
# Convert SQLAlchemy exceptions.
msg = str(alchemy_ex)
LOG.warning("%s:\n%s", func_name, msg)
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.DATABASE, msg)
except codechecker_api_shared.ttypes.RequestFailed as rf:
LOG.warning(rf.message)
raise
except Exception as ex:
msg = str(ex)
LOG.warning(msg)
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.GENERAL, msg)
return wrapper
def get_component_values(session, component_name):
"""
Get component values by component names and returns a tuple where the
first item contains a list path which should be skipped and the second
item contains a list of path which should be included.
E.g.:
+/a/b/x.cpp
+/a/b/y.cpp
-/a/b
On the above component value this function will return the following:
(['/a/b'], ['/a/b/x.cpp', '/a/b/y.cpp'])
"""
components = session.query(SourceComponent) \
.filter(SourceComponent.name.like(component_name)) \
.all()
skip = []
include = []
for component in components:
values = component.value.split('\n')
for value in values:
v = value[1:].strip()
if value[0] == '+':
include.append(v)
elif value[0] == '-':
skip.append(v)
return skip, include
def process_report_filter(session, report_filter):
"""
Process the new report filter.
"""
if report_filter is None:
return text('')
AND = []
if report_filter.filepath:
OR = [File.filepath.ilike(conv(fp))
for fp in report_filter.filepath]
AND.append(or_(*OR))
if report_filter.checkerMsg:
OR = [Report.checker_message.ilike(conv(cm))
for cm in report_filter.checkerMsg]
AND.append(or_(*OR))
if report_filter.checkerName:
OR = [Report.checker_id.ilike(conv(cn))
for cn in report_filter.checkerName]
AND.append(or_(*OR))
if report_filter.runName:
OR = [Run.name.ilike(conv(rn))
for rn in report_filter.runName]
AND.append(or_(*OR))
if report_filter.reportHash:
OR = [Report.bug_id.ilike(conv(rh))
for rh in report_filter.reportHash]
AND.append(or_(*OR))
if report_filter.severity:
AND.append(Report.severity.in_(report_filter.severity))
if report_filter.detectionStatus:
dst = list(map(detection_status_str,
report_filter.detectionStatus))
AND.append(Report.detection_status.in_(dst))
if report_filter.reviewStatus:
OR = [ReviewStatus.status.in_(
list(map(review_status_str, report_filter.reviewStatus)))]
# No database entry for unreviewed reports
if (ttypes.ReviewStatus.UNREVIEWED in
report_filter.reviewStatus):
OR.append(ReviewStatus.status.is_(None))
AND.append(or_(*OR))
detection_status = report_filter.detectionStatus
if report_filter.firstDetectionDate is not None:
date = datetime.fromtimestamp(report_filter.firstDetectionDate)
OR = []
if detection_status is not None and len(detection_status) == 1 and \
ttypes.DetectionStatus.RESOLVED in detection_status:
OR.append(Report.fixed_at >= date)
else:
OR.append(Report.detected_at >= date)
AND.append(or_(*OR))
if report_filter.fixDate is not None:
date = datetime.fromtimestamp(report_filter.fixDate)
OR = []
if detection_status is not None and len(detection_status) == 1 and \
ttypes.DetectionStatus.RESOLVED in detection_status:
OR.append(Report.fixed_at < date)
else:
OR.append(Report.detected_at < date)
AND.append(or_(*OR))
if report_filter.runHistoryTag:
OR = []
for history_date in report_filter.runHistoryTag:
date = datetime.strptime(history_date,
'%Y-%m-%d %H:%M:%S.%f')
OR.append(and_(Report.detected_at <= date, or_(
Report.fixed_at.is_(None), Report.fixed_at >= date)))
AND.append(or_(*OR))
if report_filter.runTag:
OR = []
for tag_id in report_filter.runTag:
history = session.query(RunHistory).get(tag_id)
OR.append(and_(Report.run_id == history.run_id,
and_(Report.detected_at <= history.time,
or_(Report.fixed_at.is_(None),
Report.fixed_at >= history.time))))
AND.append(or_(*OR))
if report_filter.componentNames:
OR = []
for component_name in report_filter.componentNames:
skip, include = get_component_values(session, component_name)
skip_q, include_q = None, None
if include:
and_q = [File.filepath.like(conv(fp)) for fp in include]
include_q = select([File.id]).where(or_(*and_q))
if skip:
and_q = [(File.filepath.like(conv(fp))) for fp in skip]
skip_q = select([File.id]).where(or_(*and_q))
file_ids = []
if skip and include:
skip_q = include_q.except_(skip_q).alias('component')
file_ids = session.query(skip_q) \
.distinct() \
.all()
elif include:
file_ids = session.query(include_q.alias('include')).all()
elif skip:
and_q = [not_(File.filepath.like(conv(fp))) for fp in skip]
skip_q = select([File.id]).where(and_(*and_q))
file_ids = session.query(skip_q.alias('skip')).all()
if file_ids:
OR.append(or_(File.id.in_([f_id[0] for f_id in file_ids])))
else:
# File id list can be empty for example when the user skips
# everything.
OR.append(False)
AND.append(or_(*OR))
if report_filter.bugPathLength is not None:
min_path_length = report_filter.bugPathLength.min
if min_path_length is not None:
AND.append(Report.path_length >= min_path_length)
max_path_length = report_filter.bugPathLength.max
if max_path_length is not None:
AND.append(Report.path_length <= max_path_length)
filter_expr = and_(*AND)
return filter_expr
def process_run_history_filter(query, run_ids, run_history_filter):
"""
Process run history filter.
"""
if run_ids:
query = query.filter(RunHistory.run_id.in_(run_ids))
if run_history_filter and run_history_filter.tagNames:
query = query.filter(RunHistory.version_tag.in_(
run_history_filter.tagNames))
return query
def process_run_filter(session, query, run_filter):
"""
Process run filter.
"""
if run_filter is None:
return query
if run_filter.ids:
query = query.filter(Run.id.in_(run_filter.ids))
if run_filter.names:
if run_filter.exactMatch:
query = query.filter(Run.name.in_(run_filter.names))
else:
OR = [Run.name.ilike('{0}'.format(conv(
escape_like(name, '\\'))), escape='\\') for
name in run_filter.names]
query = query.filter(or_(*OR))
if run_filter.beforeTime:
date = datetime.fromtimestamp(run_filter.beforeTime)
query = query.filter(Run.date < date)
if run_filter.afterTime:
date = datetime.fromtimestamp(run_filter.afterTime)
query = query.filter(Run.date > date)
if run_filter.beforeRun:
run = session.query(Run.date) \
.filter(Run.name == run_filter.beforeRun) \
.one_or_none()
if run:
query = query.filter(Run.date < run.date)
if run_filter.afterRun:
run = session.query(Run.date) \
.filter(Run.name == run_filter.afterRun) \
.one_or_none()
if run:
query = query.filter(Run.date > run.date)
return query
def get_diff_hashes_for_query(base_run_ids, base_line_hashes, new_run_ids,
new_check_hashes, diff_type):
"""
Get the report hash list for the result comparison.
Returns the list of hashes (NEW, RESOLVED, UNRESOLVED) and
the run ids which should be queried for the reports.
"""
if diff_type == DiffType.NEW:
df = [] + list(new_check_hashes.difference(base_line_hashes))
return df, new_run_ids
elif diff_type == DiffType.RESOLVED:
df = [] + list(base_line_hashes.difference(new_check_hashes))
return df, base_run_ids
elif diff_type == DiffType.UNRESOLVED:
df = [] + list(base_line_hashes.intersection(new_check_hashes))
return df, new_run_ids
else:
msg = 'Unsupported diff type: ' + str(diff_type)
LOG.error(msg)
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.DATABASE, msg)
def get_report_details(session, report_ids):
"""
Returns report details for the given report ids.
"""
details = {}
# Get bug path events.
bug_path_events = session.query(BugPathEvent, File.filepath) \
.filter(BugPathEvent.report_id.in_(report_ids)) \
.outerjoin(File,
File.id == BugPathEvent.file_id) \
.order_by(BugPathEvent.report_id, BugPathEvent.order)
bug_events_list = defaultdict(list)
for event, file_path in bug_path_events:
report_id = event.report_id
event = bugpathevent_db_to_api(event)
event.filePath = file_path
bug_events_list[report_id].append(event)
# Get bug report points.
bug_report_points = session.query(BugReportPoint, File.filepath) \
.filter(BugReportPoint.report_id.in_(report_ids)) \
.outerjoin(File,
File.id == BugReportPoint.file_id) \
.order_by(BugReportPoint.report_id, BugReportPoint.order)
bug_point_list = defaultdict(list)
for bug_point, file_path in bug_report_points:
report_id = bug_point.report_id
bug_point = bugreportpoint_db_to_api(bug_point)
bug_point.filePath = file_path
bug_point_list[report_id].append(bug_point)
# Get extended report data.
extended_data_list = defaultdict(list)
q = session.query(ExtendedReportData, File.filepath) \
.filter(ExtendedReportData.report_id.in_(report_ids)) \
.outerjoin(File,
File.id == ExtendedReportData.file_id)
for data, file_path in q:
report_id = data.report_id
extended_data = extended_data_db_to_api(data)
extended_data.filePath = file_path
extended_data_list[report_id].append(extended_data)
for report_id in report_ids:
details[report_id] = \
ReportDetails(pathEvents=bug_events_list[report_id],
executionPath=bug_point_list[report_id],
extendedData=extended_data_list[report_id])
return details
def bugpathevent_db_to_api(bpe):
return ttypes.BugPathEvent(
startLine=bpe.line_begin,
startCol=bpe.col_begin,
endLine=bpe.line_end,
endCol=bpe.col_end,
msg=bpe.msg,
fileId=bpe.file_id)
def bugreportpoint_db_to_api(brp):
return BugPathPos(
startLine=brp.line_begin,
startCol=brp.col_begin,
endLine=brp.line_end,
endCol=brp.col_end,
fileId=brp.file_id)
def extended_data_db_to_api(erd):
return ttypes.ExtendedReportData(
type=report_extended_data_type_enum(erd.type),
startLine=erd.line_begin,
startCol=erd.col_begin,
endLine=erd.line_end,
endCol=erd.col_end,
message=erd.message,
fileId=erd.file_id)
def unzip(b64zip, output_dir):
"""
This function unzips the base64 encoded zip file. This zip is extracted
to a temporary directory and the ZIP is then deleted. The function returns
the name of the extracted directory.
"""
with tempfile.NamedTemporaryFile(suffix='.zip') as zip_file:
LOG.debug("Unzipping mass storage ZIP '%s' to '%s'...",
zip_file.name, output_dir)
zip_file.write(zlib.decompress(base64.b64decode(b64zip)))
with zipfile.ZipFile(zip_file, 'r', allowZip64=True) as zipf:
try:
zipf.extractall(output_dir)
except Exception:
LOG.error("Failed to extract received ZIP.")
import traceback
traceback.print_exc()
raise
def create_review_data(review_status):
if review_status:
return ReviewData(status=review_status_enum(review_status.status),
comment=review_status.message,
author=review_status.author,
date=str(review_status.date))
else:
return ReviewData(status=ttypes.ReviewStatus.UNREVIEWED)
def create_count_expression(report_filter):
if report_filter is not None and report_filter.isUnique:
return func.count(Report.bug_id.distinct())
else:
return func.count(literal_column('*'))
def filter_report_filter(q, filter_expression, run_ids=None, cmp_data=None,
diff_hashes=None):
if run_ids:
q = q.filter(Report.run_id.in_(run_ids))
q = q.outerjoin(File,
Report.file_id == File.id) \
.outerjoin(ReviewStatus,
ReviewStatus.bug_hash == Report.bug_id) \
.filter(filter_expression)
if cmp_data:
q = q.filter(Report.bug_id.in_(diff_hashes))
return q
def get_sort_map(sort_types, is_unique=False):
# Get a list of sort_types which will be a nested ORDER BY.
sort_type_map = {
SortType.FILENAME: [(File.filepath, 'filepath'),
(Report.line, 'line')],
SortType.BUG_PATH_LENGTH: [(Report.path_length, 'bug_path_length')],
SortType.CHECKER_NAME: [(Report.checker_id, 'checker_id')],
SortType.SEVERITY: [(Report.severity, 'severity')],
SortType.REVIEW_STATUS: [(ReviewStatus.status, 'rw_status')],
SortType.DETECTION_STATUS: [(Report.detection_status, 'dt_status')]}
if is_unique:
sort_type_map[SortType.FILENAME] = [(File.filename, 'filename')]
sort_type_map[SortType.DETECTION_STATUS] = []
# Mapping the SQLAlchemy functions.
order_type_map = {Order.ASC: asc, Order.DESC: desc}
if sort_types is None:
sort_types = [SortMode(SortType.SEVERITY, Order.DESC)]
return sort_types, sort_type_map, order_type_map
def sort_results_query(query, sort_types, sort_type_map, order_type_map,
order_by_label=False):
"""
Helper method for __queryDiffResults and queryResults to apply sorting.
"""
for sort in sort_types:
sorttypes = sort_type_map.get(sort.type)
for sorttype in sorttypes:
order_type = order_type_map.get(sort.ord)
sort_col = sorttype[1] if order_by_label else sorttype[0]
query = query.order_by(order_type(sort_col))
return query
def filter_unresolved_reports(q):
"""
Filter reports which are unresolved.
Note: review status of these reports are not in the SKIP_REVIEW_STATUSES
list and detection statuses are not in skip_detection_statuses.
"""
skip_review_status = SKIP_REVIEW_STATUSES
skip_detection_statuses = ['resolved', 'off', 'unavailable']
return q.filter(Report.detection_status.notin_(skip_detection_statuses)) \
.filter(or_(ReviewStatus.status.is_(None),
ReviewStatus.status.notin_(skip_review_status))) \
.outerjoin(ReviewStatus,
ReviewStatus.bug_hash == Report.bug_id)
def get_report_hashes(session, run_ids, tag_ids):
"""
Get report hash list for the reports which can be found in the given runs
and the given tags.
"""
q = session.query(Report.bug_id)
if run_ids:
q = q.filter(Report.run_id.in_(run_ids))
if tag_ids:
q = q.outerjoin(RunHistory,
RunHistory.run_id == Report.run_id) \
.filter(RunHistory.id.in_(tag_ids)) \
.filter(Report.detected_at <= RunHistory.time) \
.filter(or_(Report.fixed_at.is_(None),
Report.fixed_at > RunHistory.time))
return set([t[0] for t in q])
def check_remove_runs_lock(session, run_ids):
"""
Check if there is an existing lock on the given runs, which has not
expired yet. If so, the run cannot be deleted, as someone is assumed to
be storing into it.
"""
locks_expired_at = datetime.now() - timedelta(
seconds=db_cleanup.RUN_LOCK_TIMEOUT_IN_DATABASE)
run_locks = session.query(RunLock.name) \
.filter(RunLock.locked_at >= locks_expired_at)
if run_ids:
run_locks = run_locks.filter(Run.id.in_(run_ids))
run_locks = run_locks \
.outerjoin(Run,
Run.name == RunLock.name) \
.all()
if run_locks:
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.DATABASE,
"Can not remove results because the following runs "
"are locked: {0}".format(
', '.join([r[0] for r in run_locks])))
def sort_run_data_query(query, sort_mode):
"""
Sort run data query by the given sort type.
"""
# Sort by run date by default.
if not sort_mode:
return query.order_by(desc(Run.date))
order_type_map = {Order.ASC: asc, Order.DESC: desc}
order_type = order_type_map.get(sort_mode.ord)
if sort_mode.type == RunSortType.NAME:
query = query.order_by(order_type(Run.name))
elif sort_mode.type == RunSortType.UNRESOLVED_REPORTS:
query = query.order_by(order_type('report_count'))
elif sort_mode.type == RunSortType.DATE:
query = query.order_by(order_type(Run.date))
elif sort_mode.type == RunSortType.DURATION:
query = query.order_by(order_type(Run.duration))
elif sort_mode.type == RunSortType.CC_VERSION:
query = query.order_by(order_type(RunHistory.cc_version))
return query
def escape_whitespaces(s, whitespaces=None):
if not whitespaces:
whitespaces = [' ', '\n', '\t', '\r']
escaped = s
for w in whitespaces:
escaped = escaped.replace(w, '\\{0}'.format(w))
return escaped
class ThriftRequestHandler(object):
"""
Connect to database and handle thrift client requests.
"""
def __init__(self,
manager,
Session,
product,
auth_session,
config_database,
checker_md_docs,
checker_md_docs_map,
package_version,
context):
if not product:
raise ValueError("Cannot initialize request handler without "
"a product to serve.")
self.__manager = manager
self.__product = product
self.__auth_session = auth_session
self.__config_database = config_database
self.__checker_md_docs = checker_md_docs
self.__checker_doc_map = checker_md_docs_map
self.__package_version = package_version
self.__Session = Session
self.__context = context
self.__permission_args = {
'productID': product.id
}
def __get_username(self):
"""
Returns the actually logged in user name.
"""
return self.__auth_session.user if self.__auth_session else "Anonymous"
def __require_permission(self, required):
"""
Helper method to raise an UNAUTHORIZED exception if the user does not
have any of the given permissions.
"""
with DBSession(self.__config_database) as session:
args = dict(self.__permission_args)
args['config_db_session'] = session
if not any([permissions.require_permission(
perm, args, self.__auth_session)
for perm in required]):
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.UNAUTHORIZED,
"You are not authorized to execute this action.")
return True
def __require_admin(self):
self.__require_permission([permissions.PRODUCT_ADMIN])
def __require_access(self):
self.__require_permission([permissions.PRODUCT_ACCESS])
def __require_store(self):
self.__require_permission([permissions.PRODUCT_STORE])
@staticmethod
def __get_run_ids_to_query(session, cmp_data=None):
"""
Return run id list for the queries.
If compare data is set remove those run ids from the returned list.
The returned run id list can be used as a baseline for comparisons.
"""
res = session.query(Run.id).all()
run_ids = [r[0] for r in res]
if cmp_data:
all_rids = set(run_ids)
cmp_rids = set(cmp_data.runIds) if cmp_data.runIds else set()
run_ids = list(all_rids.difference(cmp_rids))
return run_ids
def __add_comment(self, bug_id, message, kind=CommentKindValue.USER):
""" Creates a new comment object. """
user = self.__get_username()
return Comment(bug_id,
user,
message,
kind,
datetime.now())
@timeit
def getRunData(self, run_filter, limit, offset, sort_mode):
self.__require_access()
limit = verify_limit_range(limit)
with DBSession(self.__Session) as session:
# Count the reports subquery.
stmt = session.query(Report.run_id,
func.count(Report.bug_id)
.label('report_count'))
stmt = filter_unresolved_reports(stmt) \
.group_by(Report.run_id).subquery()
tag_q = session.query(RunHistory.run_id,
func.max(RunHistory.id).label(
'run_history_id'),
func.max(RunHistory.time).label(
'run_history_time')) \
.group_by(RunHistory.run_id) \
.subquery()
q = session.query(Run.id,
Run.date,
Run.name,
Run.duration,
RunHistory.version_tag,
RunHistory.cc_version,
stmt.c.report_count)
q = process_run_filter(session, q, run_filter)
q = q.outerjoin(stmt, Run.id == stmt.c.run_id) \
.outerjoin(tag_q, Run.id == tag_q.c.run_id) \
.outerjoin(RunHistory,
RunHistory.id == tag_q.c.run_history_id) \
.group_by(Run.id,
RunHistory.version_tag,
RunHistory.cc_version,
stmt.c.report_count)
q = sort_run_data_query(q, sort_mode)
if limit:
q = q.limit(limit).offset(offset)
# Get the runs.
run_data = q.all()
# Set run ids filter by using the previous results.
if not run_filter:
run_filter = RunFilter()
run_filter.ids = [r[0] for r in run_data]
# Get report count for each detection statuses.
status_q = session.query(Report.run_id,
Report.detection_status,
func.count(Report.bug_id))
if run_filter and run_filter.ids is not None:
status_q = status_q.filter(Report.run_id.in_(run_filter.ids))
status_q = status_q.group_by(Report.run_id,
Report.detection_status)
status_sum = defaultdict(defaultdict)
for run_id, status, count in status_q:
status_sum[run_id][detection_status_enum(status)] = count
# Get analyzer statistics.
analyzer_statistics = defaultdict(lambda: defaultdict())
stat_q = session.query(AnalyzerStatistic,
Run.id)
if run_filter and run_filter.ids is not None:
stat_q = stat_q.filter(Run.id.in_(run_filter.ids))
stat_q = stat_q \
.outerjoin(RunHistory,
RunHistory.id == AnalyzerStatistic.run_history_id) \
.outerjoin(Run,
Run.id == RunHistory.run_id)
for stat, run_id in stat_q:
analyzer_statistics[run_id][stat.analyzer_type] = \
ttypes.AnalyzerStatistics(failed=stat.failed,
successful=stat.successful)
results = []
for run_id, run_date, run_name, duration, tag, cc_version, \
report_count \
in run_data:
if report_count is None:
report_count = 0
analyzer_stats = analyzer_statistics[run_id]
results.append(RunData(runId=run_id,
runDate=str(run_date),
name=run_name,
duration=duration,
resultCount=report_count,
detectionStatusCount=status_sum[run_id],
versionTag=tag,
codeCheckerVersion=cc_version,
analyzerStatistics=analyzer_stats))
return results
@exc_to_thrift_reqfail
@timeit
def getRunCount(self, run_filter):
self.__require_access()
with DBSession(self.__Session) as session:
query = session.query(Run.id)
query = process_run_filter(session, query, run_filter)
return query.count()
def getCheckCommand(self, run_history_id, run_id):
self.__require_access()
if not run_history_id and not run_id:
return ""
with DBSession(self.__Session) as session:
query = session.query(RunHistory.check_command)
if run_history_id:
query = query.filter(RunHistory.id == run_history_id)
elif run_id:
query = query.filter(RunHistory.run_id == run_id) \
.order_by(RunHistory.time.desc()) \
.limit(1)
history = query.first()
if not history or not history[0]:
return ""
return zlib.decompress(history[0])
@exc_to_thrift_reqfail
@timeit
def getRunHistory(self, run_ids, limit, offset, run_history_filter):
self.__require_access()
limit = verify_limit_range(limit)
with DBSession(self.__Session) as session:
res = session.query(RunHistory)
res = process_run_history_filter(res, run_ids, run_history_filter)
res = res.order_by(RunHistory.time.desc())
if limit:
res = res.limit(limit).offset(offset)
results = []
for history in res:
analyzer_statistics = {}
for stat in history.analyzer_statistics:
analyzer_statistics[stat.analyzer_type] = \
ttypes.AnalyzerStatistics(
failed=stat.failed,
successful=stat.successful)
results.append(RunHistoryData(
id=history.id,
runId=history.run.id,
runName=history.run.name,
versionTag=history.version_tag,
user=history.user,
time=str(history.time),
codeCheckerVersion=history.cc_version,
analyzerStatistics=analyzer_statistics))
return results
@exc_to_thrift_reqfail
@timeit
def getRunHistoryCount(self, run_ids, run_history_filter):
self.__require_access()
with DBSession(self.__Session) as session:
query = session.query(RunHistory.id)
query = process_run_history_filter(query,
run_ids,
run_history_filter)
return query.count()
@exc_to_thrift_reqfail
@timeit
def getReport(self, reportId):
self.__require_access()
with DBSession(self.__Session) as session:
result = session.query(Report,
File,
ReviewStatus) \
.filter(Report.id == reportId) \
.outerjoin(File, Report.file_id == File.id) \
.outerjoin(ReviewStatus,
ReviewStatus.bug_hash == Report.bug_id) \
.limit(1).one_or_none()
if not result:
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.DATABASE,
"Report " + str(reportId) + " not found!")
report, source_file, review_status = result
return ReportData(
runId=report.run_id,
bugHash=report.bug_id,
checkedFile=source_file.filepath,
checkerMsg=report.checker_message,
reportId=report.id,
fileId=source_file.id,
line=report.line,
column=report.column,
checkerId=report.checker_id,
severity=report.severity,
reviewData=create_review_data(review_status),
detectionStatus=detection_status_enum(report.detection_status),
detectedAt=str(report.detected_at),
fixedAt=str(report.fixed_at) if report.fixed_at else None)
@exc_to_thrift_reqfail
@timeit
def getDiffResultsHash(self, run_ids, report_hashes, diff_type,
skip_detection_statuses):
self.__require_access()
if not skip_detection_statuses:
skip_detection_statuses = [ttypes.DetectionStatus.RESOLVED,
ttypes.DetectionStatus.OFF,
ttypes.DetectionStatus.UNAVAILABLE]
# Convert statuses to string.
skip_statuses_str = [detection_status_str(status)
for status in skip_detection_statuses]
with DBSession(self.__Session) as session:
if diff_type == DiffType.NEW:
# In postgresql we can select multiple rows filled with
# constants by using `unnest` function. In sqlite we have to
# use multiple UNION ALL.
if not report_hashes:
return []
base_hashes = session.query(Report.bug_id.label('bug_id')) \
.outerjoin(File, Report.file_id == File.id) \
.filter(Report.detection_status.notin_(skip_statuses_str))
if run_ids:
base_hashes = \
base_hashes.filter(Report.run_id.in_(run_ids))
if self.__product.driver_name == 'postgresql':
new_hashes = select([func.unnest(report_hashes)
.label('bug_id')]) \
.except_(base_hashes).alias('new_bugs')
return [res[0] for res in session.query(new_hashes)]
else:
# The maximum number of compound select in sqlite is 500
# by default. We increased SQLITE_MAX_COMPOUND_SELECT
# limit but when the number of compound select was larger
# than 8435 sqlite threw a `Segmentation fault` error.
# For this reason we create queries with chunks.
new_hashes = []
chunk_size = 500
for chunk in [report_hashes[i:i + chunk_size] for
i in range(0, len(report_hashes),
chunk_size)]:
new_hashes_query = union_all(*[
select([bindparam('bug_id' + str(i), h)
.label('bug_id')])
for i, h in enumerate(chunk)])
q = select([new_hashes_query]).except_(base_hashes)
new_hashes.extend([res[0] for res in session.query(q)])
return new_hashes
elif diff_type == DiffType.RESOLVED:
results = session.query(Report.bug_id) \
.filter(Report.bug_id.notin_(report_hashes))
if run_ids:
results = results.filter(Report.run_id.in_(run_ids))
return [res[0] for res in results]
elif diff_type == DiffType.UNRESOLVED:
results = session.query(Report.bug_id) \
.filter(Report.bug_id.in_(report_hashes)) \
.filter(Report.detection_status.notin_(skip_statuses_str))
if run_ids:
results = results.filter(Report.run_id.in_(run_ids))
return [res[0] for res in results]
else:
return []
@exc_to_thrift_reqfail
@timeit
def getRunResults(self, run_ids, limit, offset, sort_types,
report_filter, cmp_data, get_details):
self.__require_access()
limit = verify_limit_range(limit)
with DBSession(self.__Session) as session:
results = []
diff_hashes = None
if cmp_data:
diff_hashes, run_ids = self._cmp_helper(session,
run_ids,
report_filter,
cmp_data)
if not diff_hashes:
# There is no difference.
return results
filter_expression = process_report_filter(session, report_filter)
is_unique = report_filter is not None and report_filter.isUnique
if is_unique:
sort_types, sort_type_map, order_type_map = \
get_sort_map(sort_types, True)
selects = [func.max(Report.id).label('id')]
for sort in sort_types:
sorttypes = sort_type_map.get(sort.type)
for sorttype in sorttypes:
if sorttype[0] != 'bug_path_length':
selects.append(func.max(sorttype[0])
.label(sorttype[1]))
unique_reports = session.query(*selects)
unique_reports = filter_report_filter(unique_reports,
filter_expression,
run_ids,
cmp_data,
diff_hashes)
unique_reports = unique_reports \
.group_by(Report.bug_id) \
.subquery()
# Sort the results
sorted_reports = \
session.query(unique_reports.c.id)
sorted_reports = sort_results_query(sorted_reports,
sort_types,
sort_type_map,
order_type_map,
True)
sorted_reports = sorted_reports \
.limit(limit).offset(offset).subquery()
q = session.query(Report.id, Report.bug_id,
Report.checker_message, Report.checker_id,
Report.severity, Report.detected_at,
Report.fixed_at, ReviewStatus,
File.filename, File.filepath,
Report.path_length) \
.outerjoin(File, Report.file_id == File.id) \
.outerjoin(ReviewStatus,
ReviewStatus.bug_hash == Report.bug_id) \
.outerjoin(sorted_reports,
sorted_reports.c.id == Report.id) \
.filter(sorted_reports.c.id.isnot(None))
# We have to sort the results again because an ORDER BY in a
# subtable is broken by the JOIN.
q = sort_results_query(q,
sort_types,
sort_type_map,
order_type_map)
query_result = q.all()
# Get report details if it is required.
report_details = {}
if get_details:
report_ids = [r[0] for r in query_result]
report_details = get_report_details(session, report_ids)
for report_id, bug_id, checker_msg, checker, severity, \
detected_at, fixed_at, status, filename, path, \
bug_path_len in query_result:
review_data = create_review_data(status)
results.append(
ReportData(bugHash=bug_id,
checkedFile=filename,
checkerMsg=checker_msg,
checkerId=checker,
severity=severity,
reviewData=review_data,
detectedAt=str(detected_at),
fixedAt=str(fixed_at),
bugPathLength=bug_path_len,
details=report_details.get(report_id)))
else:
q = session.query(Report.run_id, Report.id, Report.file_id,
Report.line, Report.column,
Report.detection_status, Report.bug_id,
Report.checker_message, Report.checker_id,
Report.severity, Report.detected_at,
Report.fixed_at, ReviewStatus,
File.filepath,
Report.path_length) \
.outerjoin(File, Report.file_id == File.id) \
.outerjoin(ReviewStatus,
ReviewStatus.bug_hash == Report.bug_id) \
.filter(filter_expression)
if run_ids:
q = q.filter(Report.run_id.in_(run_ids))
if cmp_data:
q = q.filter(Report.bug_id.in_(diff_hashes))
sort_types, sort_type_map, order_type_map = \
get_sort_map(sort_types)
q = sort_results_query(q, sort_types, sort_type_map,
order_type_map)
q = q.limit(limit).offset(offset)
query_result = q.all()
# Get report details if it is required.
report_details = {}
if get_details:
report_ids = [r[1] for r in query_result]
report_details = get_report_details(session, report_ids)
for run_id, report_id, file_id, line, column, d_status, \
bug_id, checker_msg, checker, severity, detected_at,\
fixed_at, r_status, path, bug_path_len \
in query_result:
review_data = create_review_data(r_status)
results.append(
ReportData(runId=run_id,
bugHash=bug_id,
checkedFile=path,
checkerMsg=checker_msg,
reportId=report_id,
fileId=file_id,
line=line,
column=column,
checkerId=checker,
severity=severity,
reviewData=review_data,
detectionStatus=detection_status_enum(
d_status),
detectedAt=str(detected_at),
fixedAt=str(fixed_at) if fixed_at else None,
bugPathLength=bug_path_len,
details=report_details.get(report_id)))
return results
@timeit
def getRunReportCounts(self, run_ids, report_filter, limit, offset):
"""
Count the results separately for multiple runs.
If an empty run id list is provided the report
counts will be calculated for all of the available runs.
"""
self.__require_access()
results = []
with DBSession(self.__Session) as session:
filter_expression = process_report_filter(session, report_filter)
count_expr = create_count_expression(report_filter)
q = session.query(Run.id,
Run.name,
count_expr) \
.select_from(Report)
if run_ids:
q = q.filter(Report.run_id.in_(run_ids))
q = q.outerjoin(File, Report.file_id == File.id) \
.outerjoin(ReviewStatus,
ReviewStatus.bug_hash == Report.bug_id) \
.outerjoin(Run,
Report.run_id == Run.id) \
.filter(filter_expression) \
.order_by(Run.name) \
.group_by(Run.id)
if limit:
q = q.limit(limit).offset(offset)
for run_id, run_name, count in q:
report_count = RunReportCount(runId=run_id,
name=run_name,
reportCount=count)
results.append(report_count)
return results
@exc_to_thrift_reqfail
@timeit
def getRunResultCount(self, run_ids, report_filter, cmp_data):
self.__require_access()
with DBSession(self.__Session) as session:
diff_hashes = None
if cmp_data:
diff_hashes, run_ids = self._cmp_helper(session,
run_ids,
report_filter,
cmp_data)
if not diff_hashes:
# There is no difference.
return 0
filter_expression = process_report_filter(session, report_filter)
q = session.query(Report.bug_id)
q = filter_report_filter(q, filter_expression, run_ids, cmp_data,
diff_hashes)
if report_filter is not None and report_filter.isUnique:
q = q.group_by(Report.bug_id)
report_count = q.count()
if report_count is None:
report_count = 0
return report_count
@staticmethod
@timeit
def __construct_bug_item_list(session, report_id, item_type):
q = session.query(item_type) \
.filter(item_type.report_id == report_id) \
.order_by(item_type.order)
bug_items = []
for event in q:
f = session.query(File).get(event.file_id)
bug_items.append((event, f.filepath))
return bug_items
@exc_to_thrift_reqfail
@timeit
def getReportDetails(self, reportId):
"""
Parameters:
- reportId
"""
self.__require_access()
with DBSession(self.__Session) as session:
return get_report_details(session, [reportId])[reportId]
def _setReviewStatus(self, report_id, status, message, session):
"""
This function sets the review status of the given report. This is the
implementation of changeReviewStatus(), but it is also extended with
a session parameter which represents a database transaction. This is
needed because during storage a specific session object has to be used.
"""
report = session.query(Report).get(report_id)
if report:
review_status = session.query(ReviewStatus).get(report.bug_id)
if review_status is None:
review_status = ReviewStatus()
review_status.bug_hash = report.bug_id
user = self.__get_username()
old_status = review_status.status if review_status.status \
else review_status_str(ttypes.ReviewStatus.UNREVIEWED)
review_status.status = review_status_str(status)
review_status.author = user
review_status.message = message.encode('utf8')
review_status.date = datetime.now()
session.add(review_status)
if message:
system_comment_msg = 'rev_st_changed_msg {0} {1} {2}'.format(
escape_whitespaces(old_status.capitalize()),
escape_whitespaces(review_status.status.capitalize()),
escape_whitespaces(message))
else:
system_comment_msg = 'rev_st_changed {0} {1}'.format(
escape_whitespaces(old_status.capitalize()),
escape_whitespaces(review_status.status.capitalize()))
system_comment = self.__add_comment(review_status.bug_hash,
system_comment_msg,
CommentKindValue.SYSTEM)
session.add(system_comment)
session.flush()
return True
else:
msg = "No report found in the database."
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.DATABASE, msg)
@exc_to_thrift_reqfail
@timeit
def isReviewStatusChangeDisabled(self):
"""
Return True if review status change is disabled.
"""
with DBSession(self.__config_database) as session:
product = session.query(Product).get(self.__product.id)
return product.is_review_status_change_disabled
@exc_to_thrift_reqfail
@timeit
def changeReviewStatus(self, report_id, status, message):
"""
Change review status of the bug by report id.
"""
self.__require_permission([permissions.PRODUCT_ACCESS,
permissions.PRODUCT_STORE])
if self.isReviewStatusChangeDisabled():
msg = "Review status change is disabled!"
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.GENERAL, msg)
with DBSession(self.__Session) as session:
res = self._setReviewStatus(report_id, status, message, session)
session.commit()
LOG.info("Review status of report '%s' was changed to '%s' by %s.",
report_id, review_status_str(status),
self.__get_username())
return res
@exc_to_thrift_reqfail
@timeit
def getComments(self, report_id):
"""
Return the list of comments for the given bug.
"""
self.__require_access()
with DBSession(self.__Session) as session:
report = session.query(Report).get(report_id)
if report:
result = []
comments = session.query(Comment) \
.filter(Comment.bug_hash == report.bug_id) \
.order_by(Comment.created_at.desc()) \
.all()
context = webserver_context.get_context()
for comment in comments:
message = comment.message
sys_comment = comment_kind_from_thrift_type(
ttypes.CommentKind.SYSTEM)
if comment.kind == sys_comment:
elements = shlex.split(message)
system_comment = context.system_comment_map.get(
elements[0])
if system_comment:
for idx, value in enumerate(elements[1:]):
system_comment = system_comment.replace(
'{' + str(idx) + '}', value)
message = system_comment
result.append(CommentData(
comment.id,
comment.author,
message,
str(comment.created_at),
comment_kind_to_thrift_type(comment.kind)))
return result
else:
msg = 'Report id ' + str(report_id) + \
' was not found in the database.'
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.DATABASE, msg)
@exc_to_thrift_reqfail
@timeit
def getCommentCount(self, report_id):
"""
Return the number of comments for the given bug.
"""
self.__require_access()
with DBSession(self.__Session) as session:
report = session.query(Report).get(report_id)
if report:
commentCount = session.query(Comment) \
.filter(Comment.bug_hash == report.bug_id) \
.count()
if commentCount is None:
commentCount = 0
return commentCount
@exc_to_thrift_reqfail
@timeit
def addComment(self, report_id, comment_data):
""" Add new comment for the given bug. """
self.__require_access()
if not comment_data.message.strip():
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.GENERAL,
'The comment message can not be empty!')
with DBSession(self.__Session) as session:
report = session.query(Report).get(report_id)
if report:
comment = self.__add_comment(report.bug_id,
comment_data.message)
session.add(comment)
session.commit()
return True
else:
msg = 'Report id ' + str(report_id) + \
' was not found in the database.'
LOG.error(msg)
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.DATABASE, msg)
@exc_to_thrift_reqfail
@timeit
def updateComment(self, comment_id, content):
"""
Update the given comment message with new content. We allow
comments to be updated by it's original author only, except for
Anyonymous comments that can be updated by anybody.
"""
self.__require_access()
if not content.strip():
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.GENERAL,
'The comment message can not be empty!')
with DBSession(self.__Session) as session:
user = self.__get_username()
comment = session.query(Comment).get(comment_id)
if comment:
if comment.author != 'Anonymous' and comment.author != user:
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.UNAUTHORIZED,
'Unathorized comment modification!')
# Create system comment if the message is changed.
if comment.message != content:
system_comment_msg = 'comment_changed {0} {1}'.format(
escape_whitespaces(comment.message),
escape_whitespaces(content))
system_comment = \
self.__add_comment(comment.bug_hash,
system_comment_msg,
CommentKindValue.SYSTEM)
session.add(system_comment)
comment.message = content
session.add(comment)
session.commit()
return True
else:
msg = 'Comment id ' + str(comment_id) + \
' was not found in the database.'
LOG.error(msg)
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.DATABASE, msg)
@exc_to_thrift_reqfail
@timeit
def removeComment(self, comment_id):
"""
Remove the comment. We allow comments to be removed by it's
original author only, except for Anyonymous comments that can be
updated by anybody.
"""
self.__require_access()
user = self.__get_username()
with DBSession(self.__Session) as session:
comment = session.query(Comment).get(comment_id)
if comment:
if comment.author != 'Anonymous' and comment.author != user:
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.UNAUTHORIZED,
'Unathorized comment modification!')
session.delete(comment)
session.commit()
LOG.info("Comment '%s...' was removed from bug hash '%s' by "
"'%s'.", comment.message[:10], comment.bug_hash,
self.__get_username())
return True
else:
msg = 'Comment id ' + str(comment_id) + \
' was not found in the database.'
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.DATABASE, msg)
@exc_to_thrift_reqfail
@timeit
def getCheckerDoc(self, checkerId):
"""
Parameters:
- checkerId
"""
missing_doc = "No documentation found for checker: " + checkerId + \
"\n\nPlease refer to the documentation at the "
if "." in checkerId:
sa_link = "http://clang-analyzer.llvm.org/available_checks.html"
missing_doc += "[ClangSA](" + sa_link + ")"
elif "-" in checkerId:
tidy_link = "http://clang.llvm.org/extra/clang-tidy/checks/" + \
checkerId + ".html"
missing_doc += "[ClangTidy](" + tidy_link + ")"
missing_doc += " homepage."
try:
md_file = self.__checker_doc_map.get(checkerId)
if md_file:
md_file = os.path.join(self.__checker_md_docs, md_file)
try:
with io.open(md_file, 'r') as md_content:
missing_doc = md_content.read()
except (IOError, OSError):
LOG.warning("Failed to read checker documentation: %s",
md_file)
return missing_doc
except Exception as ex:
msg = str(ex)
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.IOERROR, msg)
@exc_to_thrift_reqfail
@timeit
def getSourceFileData(self, fileId, fileContent, encoding):
"""
Parameters:
- fileId
- fileContent
- enum Encoding
"""
self.__require_access()
with DBSession(self.__Session) as session:
sourcefile = session.query(File).get(fileId)
if sourcefile is None:
return SourceFileData()
if fileContent:
cont = session.query(FileContent).get(sourcefile.content_hash)
source = zlib.decompress(cont.content)
if not encoding or encoding == Encoding.DEFAULT:
source = codecs.decode(source, 'utf-8', 'replace')
elif encoding == Encoding.BASE64:
source = base64.b64encode(source)
return SourceFileData(fileId=sourcefile.id,
filePath=sourcefile.filepath,
fileContent=source)
else:
return SourceFileData(fileId=sourcefile.id,
filePath=sourcefile.filepath)
@exc_to_thrift_reqfail
@timeit
def getLinesInSourceFileContents(self, lines_in_files_requested, encoding):
self.__require_access()
with DBSession(self.__Session) as session:
res = defaultdict(lambda: defaultdict(str))
for lines_in_file in lines_in_files_requested:
sourcefile = session.query(File).get(lines_in_file.fileId)
cont = session.query(FileContent).get(sourcefile.content_hash)
lines = zlib.decompress(cont.content).split('\n')
for line in lines_in_file.lines:
content = '' if len(lines) < line else lines[line - 1]
if not encoding or encoding == Encoding.DEFAULT:
content = codecs.decode(content, 'utf-8', 'replace')
elif encoding == Encoding.BASE64:
content = base64.b64encode(content)
res[lines_in_file.fileId][line] = content
return res
def _cmp_helper(self, session, run_ids, report_filter, cmp_data):
"""
Get the report hashes for all of the runs.
Return the hash list which should be queried
in the returned run id list.
"""
if not run_ids:
run_ids = ThriftRequestHandler.__get_run_ids_to_query(session,
cmp_data)
base_run_ids = run_ids
new_run_ids = cmp_data.runIds
diff_type = cmp_data.diffType
tag_ids = report_filter.runTag if report_filter else None
base_line_hashes = get_report_hashes(session,
base_run_ids,
tag_ids)
# If run tag is set in compare data, after base line hashes are
# calculated remove it from the report filter because we will filter
# results by these hashes and there is no need to filter results by
# these tags again.
if cmp_data.runTag:
report_filter.runTag = None
if not new_run_ids and not cmp_data.runTag:
return base_line_hashes, base_run_ids
new_check_hashes = get_report_hashes(session,
new_run_ids,
cmp_data.runTag)
report_hashes, run_ids = \
get_diff_hashes_for_query(base_run_ids,
base_line_hashes,
new_run_ids,
new_check_hashes,
diff_type)
return report_hashes, run_ids
@exc_to_thrift_reqfail
@timeit
def getCheckerCounts(self, run_ids, report_filter, cmp_data, limit,
offset):
"""
If the run id list is empty the metrics will be counted
for all of the runs and in compare mode all of the runs
will be used as a baseline excluding the runs in compare data.
"""
self.__require_access()
results = []
with DBSession(self.__Session) as session:
diff_hashes = None
if cmp_data:
diff_hashes, run_ids = self._cmp_helper(session,
run_ids,
report_filter,
cmp_data)
if not diff_hashes:
# There is no difference.
return results
filter_expression = process_report_filter(session, report_filter)
is_unique = report_filter is not None and report_filter.isUnique
if is_unique:
q = session.query(func.max(Report.checker_id).label(
'checker_id'),
func.max(Report.severity).label(
'severity'),
Report.bug_id)
else:
q = session.query(Report.checker_id,
Report.severity,
func.count(Report.id))
q = filter_report_filter(q, filter_expression, run_ids, cmp_data,
diff_hashes)
if is_unique:
q = q.group_by(Report.bug_id).subquery()
unique_checker_q = session.query(q.c.checker_id,
func.max(q.c.severity),
func.count(q.c.bug_id)) \
.group_by(q.c.checker_id) \
.order_by(q.c.checker_id)
else:
unique_checker_q = q.group_by(Report.checker_id,
Report.severity) \
.order_by(Report.checker_id)
if limit:
unique_checker_q = unique_checker_q.limit(limit).offset(offset)
for name, severity, count in unique_checker_q:
checker_count = CheckerCount(name=name,
severity=severity,
count=count)
results.append(checker_count)
return results
@exc_to_thrift_reqfail
@timeit
def getSeverityCounts(self, run_ids, report_filter, cmp_data):
"""
If the run id list is empty the metrics will be counted
for all of the runs and in compare mode all of the runs
will be used as a baseline excluding the runs in compare data.
"""
self.__require_access()
results = {}
with DBSession(self.__Session) as session:
diff_hashes = None
if cmp_data:
diff_hashes, run_ids = self._cmp_helper(session,
run_ids,
report_filter,
cmp_data)
if not diff_hashes:
# There is no difference.
return results
filter_expression = process_report_filter(session, report_filter)
is_unique = report_filter is not None and report_filter.isUnique
if is_unique:
q = session.query(func.max(Report.severity).label('severity'),
Report.bug_id)
else:
q = session.query(Report.severity,
func.count(Report.id))
q = filter_report_filter(q, filter_expression, run_ids, cmp_data,
diff_hashes)
if is_unique:
q = q.group_by(Report.bug_id).subquery()
severities = session.query(q.c.severity,
func.count(q.c.bug_id)) \
.group_by(q.c.severity)
else:
severities = q.group_by(Report.severity)
results = dict(severities)
return results
@exc_to_thrift_reqfail
@timeit
def getCheckerMsgCounts(self, run_ids, report_filter, cmp_data, limit,
offset):
"""
If the run id list is empty the metrics will be counted
for all of the runs and in compare mode all of the runs
will be used as a baseline excluding the runs in compare data.
"""
self.__require_access()
results = {}
with DBSession(self.__Session) as session:
diff_hashes = None
if cmp_data:
diff_hashes, run_ids = self._cmp_helper(session,
run_ids,
report_filter,
cmp_data)
if not diff_hashes:
# There is no difference.
return results
filter_expression = process_report_filter(session, report_filter)
is_unique = report_filter is not None and report_filter.isUnique
if is_unique:
q = session.query(func.max(Report.checker_message).label(
'checker_message'),
Report.bug_id)
else:
q = session.query(Report.checker_message,
func.count(Report.id))
q = filter_report_filter(q, filter_expression, run_ids, cmp_data,
diff_hashes)
if is_unique:
q = q.group_by(Report.bug_id).subquery()
checker_messages = session.query(q.c.checker_message,
func.count(q.c.bug_id)) \
.group_by(q.c.checker_message) \
.order_by(q.c.checker_message)
else:
checker_messages = q.group_by(Report.checker_message) \
.order_by(Report.checker_message)
if limit:
checker_messages = checker_messages.limit(limit).offset(offset)
results = dict(checker_messages.all())
return results
@exc_to_thrift_reqfail
@timeit
def getReviewStatusCounts(self, run_ids, report_filter, cmp_data):
"""
If the run id list is empty the metrics will be counted
for all of the runs and in compare mode all of the runs
will be used as a baseline excluding the runs in compare data.
"""
self.__require_access()
results = defaultdict(int)
with DBSession(self.__Session) as session:
diff_hashes = None
if cmp_data:
diff_hashes, run_ids = self._cmp_helper(session,
run_ids,
report_filter,
cmp_data)
if not diff_hashes:
# There is no difference.
return results
filter_expression = process_report_filter(session, report_filter)
is_unique = report_filter is not None and report_filter.isUnique
if is_unique:
q = session.query(Report.bug_id,
func.max(ReviewStatus.status).label(
'status'))
else:
q = session.query(func.max(Report.bug_id),
ReviewStatus.status,
func.count(Report.id))
q = filter_report_filter(q, filter_expression, run_ids, cmp_data,
diff_hashes)
if is_unique:
q = q.group_by(Report.bug_id).subquery()
review_statuses = session.query(func.max(q.c.bug_id),
q.c.status,
func.count(q.c.bug_id)) \
.group_by(q.c.status)
else:
review_statuses = q.group_by(ReviewStatus.status)
for _, rev_status, count in review_statuses:
if rev_status is None:
# If no review status is set count it as unreviewed.
rev_status = ttypes.ReviewStatus.UNREVIEWED
results[rev_status] += count
else:
rev_status = review_status_enum(rev_status)
results[rev_status] += count
return results
@exc_to_thrift_reqfail
@timeit
def getFileCounts(self, run_ids, report_filter, cmp_data, limit, offset):
"""
If the run id list is empty the metrics will be counted
for all of the runs and in compare mode all of the runs
will be used as a baseline excluding the runs in compare data.
"""
self.__require_access()
results = {}
with DBSession(self.__Session) as session:
if cmp_data:
diff_hashes, run_ids = self._cmp_helper(session,
run_ids,
report_filter,
cmp_data)
if not diff_hashes:
# There is no difference.
return results
filter_expression = process_report_filter(session, report_filter)
stmt = session.query(Report.bug_id,
Report.file_id) \
.outerjoin(ReviewStatus,
ReviewStatus.bug_hash == Report.bug_id) \
.outerjoin(File,
File.id == Report.file_id) \
.filter(filter_expression)
if run_ids:
stmt = stmt.filter(Report.run_id.in_(run_ids))
if report_filter is not None and report_filter.isUnique:
stmt = stmt.group_by(Report.bug_id, Report.file_id)
stmt = stmt.subquery()
# When using pg8000, 1 cannot be passed as parameter to the count
# function. This is the reason why we have to convert it to
# Integer (see: https://github.com/mfenniak/pg8000/issues/110)
count_int = cast(1, sqlalchemy.Integer)
report_count = session.query(stmt.c.file_id,
func.count(count_int).label(
'report_count')) \
.group_by(stmt.c.file_id)
if limit:
report_count = report_count.limit(limit).offset(offset)
report_count = report_count.subquery()
file_paths = session.query(File.filepath,
report_count.c.report_count) \
.join(report_count,
report_count.c.file_id == File.id)
for fp, count in file_paths:
results[fp] = count
return results
@exc_to_thrift_reqfail
@timeit
def getRunHistoryTagCounts(self, run_ids, report_filter, cmp_data):
"""
If the run id list is empty the metrics will be counted
for all of the runs and in compare mode all of the runs
will be used as a baseline excluding the runs in compare data.
"""
self.__require_access()
results = []
with DBSession(self.__Session) as session:
if cmp_data:
diff_hashes, run_ids = self._cmp_helper(session,
run_ids,
report_filter,
cmp_data)
if not diff_hashes:
# There is no difference.
return results
filter_expression = process_report_filter(session, report_filter)
tag_run_ids = session.query(RunHistory.run_id.distinct()) \
.filter(RunHistory.version_tag.isnot(None)) \
.subquery()
report_cnt_q = session.query(Report.run_id,
Report.bug_id,
Report.detected_at,
Report.fixed_at) \
.outerjoin(File, Report.file_id == File.id) \
.outerjoin(ReviewStatus,
ReviewStatus.bug_hash == Report.bug_id) \
.filter(filter_expression) \
.filter(Report.run_id.in_(tag_run_ids)) \
.subquery()
is_unique = report_filter is not None and report_filter.isUnique
count_expr = func.count(report_cnt_q.c.bug_id if not is_unique
else report_cnt_q.c.bug_id.distinct())
count_q = session.query(RunHistory.id.label('run_history_id'),
count_expr.label('report_count')) \
.outerjoin(report_cnt_q,
report_cnt_q.c.run_id == RunHistory.run_id) \
.filter(RunHistory.version_tag.isnot(None)) \
.filter(and_(report_cnt_q.c.detected_at <= RunHistory.time,
or_(report_cnt_q.c.fixed_at.is_(None),
report_cnt_q.c.fixed_at >=
RunHistory.time))) \
.group_by(RunHistory.id) \
.subquery()
tag_q = session.query(RunHistory.run_id.label('run_id'),
RunHistory.id.label('run_history_id')) \
.filter(RunHistory.version_tag.isnot(None))
if run_ids:
tag_q = tag_q.filter(RunHistory.run_id.in_(run_ids))
tag_q = tag_q.subquery()
q = session.query(tag_q.c.run_history_id,
func.max(Run.name).label('run_name'),
func.max(RunHistory.id),
func.max(RunHistory.time),
func.max(RunHistory.version_tag),
func.max(count_q.c.report_count)) \
.outerjoin(RunHistory,
RunHistory.id == tag_q.c.run_history_id) \
.outerjoin(Run, Run.id == tag_q.c.run_id) \
.outerjoin(count_q,
count_q.c.run_history_id == RunHistory.id) \
.filter(RunHistory.version_tag.isnot(None)) \
.group_by(tag_q.c.run_history_id, RunHistory.time) \
.order_by(RunHistory.time.desc())
for _, run_name, tag_id, version_time, tag, count in q:
if tag:
results.append(RunTagCount(id=tag_id,
time=str(version_time),
name=tag,
runName=run_name,
count=count if count else 0))
return results
@exc_to_thrift_reqfail
@timeit
def getDetectionStatusCounts(self, run_ids, report_filter, cmp_data):
"""
If the run id list is empty the metrics will be counted
for all of the runs and in compare mode all of the runs
will be used as a baseline excluding the runs in compare data.
"""
self.__require_access()
results = {}
with DBSession(self.__Session) as session:
diff_hashes = None
if cmp_data:
diff_hashes, run_ids = self._cmp_helper(session,
run_ids,
report_filter,
cmp_data)
if not diff_hashes:
# There is no difference.
return results
filter_expression = process_report_filter(session, report_filter)
count_expr = func.count(literal_column('*'))
q = session.query(Report.detection_status,
count_expr)
q = filter_report_filter(q, filter_expression, run_ids, cmp_data,
diff_hashes)
detection_stats = q.group_by(Report.detection_status).all()
results = dict(detection_stats)
results = {detection_status_enum(k): v for k, v in results.items()}
return results
# -----------------------------------------------------------------------
@timeit
def getPackageVersion(self):
return self.__package_version
# -----------------------------------------------------------------------
@exc_to_thrift_reqfail
@timeit
def removeRunResults(self, run_ids):
self.__require_store()
failed = False
for run_id in run_ids:
try:
self.removeRun(run_id, None)
except Exception as ex:
LOG.error("Failed to remove run: %s", run_id)
LOG.error(ex)
failed = True
return not failed
def __removeReports(self, session, report_ids, chunk_size=500):
"""
Removing reports in chunks.
"""
for r_ids in [report_ids[i:i + chunk_size] for
i in range(0, len(report_ids),
chunk_size)]:
session.query(Report) \
.filter(Report.id.in_(r_ids)) \
.delete(synchronize_session=False)
@exc_to_thrift_reqfail
@timeit
def removeRunReports(self, run_ids, report_filter, cmp_data):
self.__require_store()
if not run_ids:
run_ids = []
if cmp_data and cmp_data.runIds:
run_ids.extend(cmp_data.runIds)
with DBSession(self.__Session) as session:
check_remove_runs_lock(session, run_ids)
try:
diff_hashes = None
if cmp_data:
diff_hashes, _ = self._cmp_helper(session,
run_ids,
report_filter,
cmp_data)
if not diff_hashes:
# There is no difference.
return True
filter_expression = process_report_filter(session,
report_filter)
q = session.query(Report.id) \
.outerjoin(File, Report.file_id == File.id) \
.outerjoin(ReviewStatus,
ReviewStatus.bug_hash == Report.bug_id) \
.filter(filter_expression)
if run_ids:
q = q.filter(Report.run_id.in_(run_ids))
if cmp_data:
q = q.filter(Report.bug_id.in_(diff_hashes))
reports_to_delete = [r[0] for r in q]
if reports_to_delete:
self.__removeReports(session, reports_to_delete)
# Delete files and contents that are not present
# in any bug paths.
db_cleanup.remove_unused_files(session)
session.commit()
session.close()
return True
except Exception as ex:
session.rollback()
LOG.error("Database cleanup failed.")
LOG.error(ex)
return False
@exc_to_thrift_reqfail
@timeit
def removeRun(self, run_id, run_filter):
self.__require_store()
# Remove the whole run.
with DBSession(self.__Session) as session:
check_remove_runs_lock(session, [run_id])
if not run_filter:
run_filter = RunFilter(ids=[run_id])
q = session.query(Run)
q = process_run_filter(session, q, run_filter)
q.delete(synchronize_session=False)
# Delete files and contents that are not present
# in any bug paths.
db_cleanup.remove_unused_files(session)
session.commit()
session.close()
LOG.info("Run '%s' was removed by '%s'.", run_id,
self.__get_username())
return True
@exc_to_thrift_reqfail
@timeit
def updateRunData(self, run_id, new_run_name):
self.__require_store()
if not new_run_name:
msg = 'No new run name was given to update the run.'
LOG.error(msg)
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.GENERAL, msg)
with DBSession(self.__Session) as session:
check_new_run_name = session.query(Run) \
.filter(Run.name == new_run_name) \
.all()
if check_new_run_name:
msg = "New run name '" + new_run_name + "' already exists."
LOG.error(msg)
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.DATABASE, msg)
run_data = session.query(Run).get(run_id)
if run_data:
old_run_name = run_data.name
run_data.name = new_run_name
session.add(run_data)
session.commit()
LOG.info("Run name '%s' (%d) was changed to %s by '%s'.",
old_run_name, run_id, new_run_name,
self.__get_username())
return True
else:
msg = 'Run id ' + str(run_id) + \
' was not found in the database.'
LOG.error(msg)
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.DATABASE, msg)
return True
@exc_to_thrift_reqfail
def getSuppressFile(self):
"""
DEPRECATED the server is not started with a suppress file anymore.
Returning empty string.
"""
self.__require_access()
return ''
@exc_to_thrift_reqfail
@timeit
def addSourceComponent(self, name, value, description):
"""
Adds a new source if it does not exist or updates an old one.
"""
self.__require_admin()
with DBSession(self.__Session) as session:
component = session.query(SourceComponent).get(name)
user = self.__auth_session.user if self.__auth_session else None
if component:
component.value = value
component.description = description
component.user = user
else:
component = SourceComponent(name,
value,
description,
user)
session.add(component)
session.commit()
return True
@exc_to_thrift_reqfail
@timeit
def getSourceComponents(self, component_filter):
"""
Returns the available source components.
"""
self.__require_access()
with DBSession(self.__Session) as session:
q = session.query(SourceComponent)
if component_filter and component_filter:
sql_component_filter = [SourceComponent.name.ilike(conv(cf))
for cf in component_filter]
q = q.filter(*sql_component_filter)
q = q.order_by(SourceComponent.name)
return list(map(lambda c:
SourceComponentData(c.name,
c.value,
c.description), q))
@exc_to_thrift_reqfail
@timeit
def removeSourceComponent(self, name):
"""
Removes a source component.
"""
self.__require_admin()
with DBSession(self.__Session) as session:
component = session.query(SourceComponent).get(name)
if component:
session.delete(component)
session.commit()
LOG.info("Source component '%s' has been removed by '%s'",
name, self.__get_username())
return True
else:
msg = 'Source component ' + str(name) + \
' was not found in the database.'
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.DATABASE, msg)
@exc_to_thrift_reqfail
@timeit
def getMissingContentHashes(self, file_hashes):
self.__require_store()
if not file_hashes:
return []
with DBSession(self.__Session) as session:
q = session.query(FileContent) \
.options(sqlalchemy.orm.load_only('content_hash')) \
.filter(FileContent.content_hash.in_(file_hashes))
return list(set(file_hashes) -
set(map(lambda fc: fc.content_hash, q)))
def __store_source_files(self, source_root, filename_to_hash,
trim_path_prefixes):
"""
Storing file contents from plist.
"""
file_path_to_id = {}
for file_name, file_hash in filename_to_hash.items():
source_file_name = os.path.join(source_root,
file_name.strip("/"))
source_file_name = os.path.realpath(source_file_name)
LOG.debug("Storing source file: %s", source_file_name)
trimmed_file_path = util.trim_path_prefixes(file_name,
trim_path_prefixes)
if not os.path.isfile(source_file_name):
# The file was not in the ZIP file, because we already
# have the content. Let's check if we already have a file
# record in the database or we need to add one.
LOG.debug(file_name + ' not found or already stored.')
with DBSession(self.__Session) as session:
fid = store_handler.addFileRecord(session,
trimmed_file_path,
file_hash)
if not fid:
LOG.error("File ID for %s is not found in the DB with "
"content hash %s. Missing from ZIP?",
source_file_name, file_hash)
file_path_to_id[file_name] = fid
LOG.debug("%d fileid found", fid)
continue
with DBSession(self.__Session) as session:
file_path_to_id[file_name] = \
store_handler.addFileContent(session,
trimmed_file_path,
source_file_name,
file_hash,
None)
return file_path_to_id
def __store_reports(self, session, report_dir, source_root, run_id,
file_path_to_id, run_history_time, severity_map,
wrong_src_code_comments, skip_handler,
checkers):
"""
Parse up and store the plist report files.
"""
all_reports = session.query(Report) \
.filter(Report.run_id == run_id) \
.all()
hash_map_reports = defaultdict(list)
for report in all_reports:
hash_map_reports[report.bug_id].append(report)
already_added = set()
new_bug_hashes = set()
# Get checker names which was enabled during the analysis.
enabled_checkers = set()
disabled_checkers = set()
for analyzer_checkers in checkers.values():
if isinstance(analyzer_checkers, dict):
for checker_name, enabled in analyzer_checkers.iteritems():
if enabled:
enabled_checkers.add(checker_name)
else:
disabled_checkers.add(checker_name)
else:
enabled_checkers.update(analyzer_checkers)
def checker_is_unavailable(checker_name):
"""
Returns True if the given checker is unavailable.
We filter out checkers which start with 'clang-diagnostic-' because
these are warnings and the warning list is not available right now.
FIXME: using the 'diagtool' could be a solution later so the
client can send the warning list to the server.
"""
return not checker_name.startswith('clang-diagnostic-') and \
enabled_checkers and checker_name not in enabled_checkers
sc_handler = SourceCodeCommentHandler()
# Processing PList files.
_, _, report_files = next(os.walk(report_dir), ([], [], []))
for f in report_files:
if not f.endswith('.plist'):
continue
LOG.debug("Parsing input file '%s'", f)
try:
files, reports = plist_parser.parse_plist_file(
os.path.join(report_dir, f), source_root)
except Exception as ex:
LOG.error('Parsing the plist failed: %s', str(ex))
continue
file_ids = {}
for file_name in files:
file_ids[file_name] = file_path_to_id[file_name]
# Store report.
for report in reports:
checker_name = report.main['check_name']
source_file = files[report.main['location']['file']]
if skip_handler.should_skip(source_file):
continue
bug_paths, bug_events, bug_extended_data = \
store_handler.collect_paths_events(report, file_ids,
files)
report_path_hash = get_report_path_hash(report, files)
if report_path_hash in already_added:
LOG.debug('Not storing report. Already added')
LOG.debug(report)
continue
LOG.debug("Storing check results to the database.")
LOG.debug("Storing report")
bug_id = report.main[
'issue_hash_content_of_line_in_context']
detection_status = 'new'
detected_at = run_history_time
if bug_id in hash_map_reports:
old_report = hash_map_reports[bug_id][0]
old_status = old_report.detection_status
detection_status = 'reopened' \
if old_status == 'resolved' else 'unresolved'
detected_at = old_report.detected_at
if checker_name in disabled_checkers:
detection_status = 'off'
elif checker_is_unavailable(checker_name):
detection_status = 'unavailable'
report_id = store_handler.addReport(
session,
run_id,
file_ids[source_file],
report.main,
bug_paths,
bug_events,
bug_extended_data,
detection_status,
detected_at,
severity_map)
new_bug_hashes.add(bug_id)
already_added.add(report_path_hash)
last_report_event = report.bug_path[-1]
file_name = files[last_report_event['location']['file']]
source_file_name = os.path.realpath(
os.path.join(source_root, file_name.strip("/")))
if os.path.isfile(source_file_name):
report_line = last_report_event['location']['line']
source_file = os.path.basename(file_name)
src_comment_data = sc_handler.filter_source_line_comments(
source_file_name,
report_line,
checker_name)
if len(src_comment_data) == 1:
status = src_comment_data[0]['status']
rw_status = ttypes.ReviewStatus.FALSE_POSITIVE
if status == 'confirmed':
rw_status = ttypes.ReviewStatus.CONFIRMED
elif status == 'intentional':
rw_status = ttypes.ReviewStatus.INTENTIONAL
self._setReviewStatus(report_id,
rw_status,
src_comment_data[0]['message'],
session)
elif len(src_comment_data) > 1:
LOG.warning(
"Multiple source code comment can be found "
"for '%s' checker in '%s' at line %s. "
"This bug will not be suppressed!",
checker_name, source_file, report_line)
wrong_src_code = "{0}|{1}|{2}".format(source_file,
report_line,
checker_name)
wrong_src_code_comments.append(wrong_src_code)
LOG.debug("Storing done for report %d", report_id)
reports_to_delete = set()
for bug_hash, reports in hash_map_reports.items():
if bug_hash in new_bug_hashes:
reports_to_delete.update(map(lambda x: x.id, reports))
else:
for report in reports:
# We set the fix date of a report only if the report
# has not been fixed before.
if report.fixed_at:
continue
checker = report.checker_id
if checker in disabled_checkers:
report.detection_status = 'off'
elif checker_is_unavailable(checker):
report.detection_status = 'unavailable'
else:
report.detection_status = 'resolved'
report.fixed_at = run_history_time
if reports_to_delete:
self.__removeReports(session, list(reports_to_delete))
@staticmethod
@exc_to_thrift_reqfail
def __store_run_lock(session, name, username):
"""
Store a RunLock record for the given run name into the database.
"""
# If the run can be stored, we need to lock it first.
run_lock = session.query(RunLock) \
.filter(RunLock.name == name) \
.with_for_update(nowait=True).one_or_none()
if not run_lock:
# If there is no lock record for the given run name, the run
# is not locked -- create a new lock.
run_lock = RunLock(name, username)
session.add(run_lock)
elif run_lock.has_expired(
db_cleanup.RUN_LOCK_TIMEOUT_IN_DATABASE):
# There can be a lock in the database, which has already
# expired. In this case, we assume that the previous operation
# has failed, and thus, we can re-use the already present lock.
run_lock.touch()
run_lock.username = username
else:
# In case the lock exists and it has not expired, we must
# consider the run a locked one.
when = run_lock.when_expires(
db_cleanup.RUN_LOCK_TIMEOUT_IN_DATABASE)
username = run_lock.username if run_lock.username is not None \
else "another user"
LOG.info("Refusing to store into run '%s' as it is locked by "
"%s. Lock will expire at '%s'.", name, username, when)
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.DATABASE,
"The run named '{0}' is being stored into by {1}. If the "
"other store operation has failed, this lock will expire "
"at '{2}'.".format(name, username, when))
# At any rate, if the lock has been created or updated, commit it
# into the database.
try:
session.commit()
except (sqlalchemy.exc.IntegrityError,
sqlalchemy.orm.exc.StaleDataError):
# The commit of this lock can fail.
#
# In case two store ops attempt to lock the same run name at the
# same time, committing the lock in the transaction that commits
# later will result in an IntegrityError due to the primary key
# constraint.
#
# In case two store ops attempt to lock the same run name with
# reuse and one of the operation hangs long enough before COMMIT
# so that the other operation commits and thus removes the lock
# record, StaleDataError is raised. In this case, also consider
# the run locked, as the data changed while the transaction was
# waiting, as another run wholly completed.
LOG.info("Run '%s' got locked while current transaction "
"tried to acquire a lock. Considering run as locked.",
name)
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.DATABASE,
"The run named '{0}' is being stored into by another "
"user.".format(name))
@staticmethod
@exc_to_thrift_reqfail
def __free_run_lock(session, name):
"""
Remove the lock from the database for the given run name.
"""
# Using with_for_update() here so the database (in case it supports
# this operation) locks the lock record's row from any other access.
run_lock = session.query(RunLock) \
.filter(RunLock.name == name) \
.with_for_update(nowait=True).one()
session.delete(run_lock)
session.commit()
def __check_run_limit(self, run_name):
"""
Checks the maximum allowed of uploadable runs for the current product.
"""
max_run_count = self.__manager.get_max_run_count()
with DBSession(self.__config_database) as session:
product = session.query(Product).get(self.__product.id)
if product.run_limit:
max_run_count = product.run_limit
# Session that handles constraints on the run.
with DBSession(self.__Session) as session:
if max_run_count:
LOG.debug("Check the maximum number of allowed "
"runs which is %d", max_run_count)
run = session.query(Run) \
.filter(Run.name == run_name) \
.one_or_none()
# If max_run_count is not set in the config file, it will allow
# the user to upload unlimited runs.
run_count = session.query(Run.id).count()
# If we are not updating a run or the run count is reached the
# limit it will throw an exception.
if not run and run_count >= max_run_count:
remove_run_count = run_count - max_run_count + 1
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.GENERAL,
'You reached the maximum number of allowed runs '
'({0}/{1})! Please remove at least {2} run(s) before '
'you try it again.'.format(run_count,
max_run_count,
remove_run_count))
@exc_to_thrift_reqfail
@timeit
def massStoreRun(self, name, tag, version, b64zip, force,
trim_path_prefixes):
self.__require_store()
user = self.__auth_session.user if self.__auth_session else None
# Check constraints of the run.
self.__check_run_limit(name)
with DBSession(self.__Session) as session:
ThriftRequestHandler.__store_run_lock(session, name, user)
wrong_src_code_comments = []
try:
with TemporaryDirectory() as zip_dir:
unzip(b64zip, zip_dir)
LOG.debug("Using unzipped folder '%s'", zip_dir)
source_root = os.path.join(zip_dir, 'root')
report_dir = os.path.join(zip_dir, 'reports')
metadata_file = os.path.join(report_dir, 'metadata.json')
skip_file = os.path.join(report_dir, 'skip_file')
content_hash_file = os.path.join(zip_dir,
'content_hashes.json')
skip_handler = skiplist_handler.SkipListHandler()
if os.path.exists(skip_file):
LOG.debug("Pocessing skip file %s", skip_file)
try:
with open(skip_file) as sf:
skip_handler = \
skiplist_handler.SkipListHandler(sf.read())
except (IOError, OSError) as err:
LOG.error("Failed to open skip file")
LOG.error(err)
filename_to_hash = util.load_json_or_empty(content_hash_file,
{})
file_path_to_id = self.__store_source_files(source_root,
filename_to_hash,
trim_path_prefixes)
run_history_time = datetime.now()
check_commands, check_durations, cc_version, statistics, \
checkers = store_handler.metadata_info(metadata_file)
command = ''
if len(check_commands) == 1:
command = ' '.join(check_commands[0])
elif len(check_commands) > 1:
command = "multiple analyze calls: " + \
'; '.join([' '.join(com)
for com in check_commands])
durations = 0
if check_durations:
# Round the duration to seconds.
durations = int(sum(check_durations))
# This session's transaction buffer stores the actual run data
# into the database.
with DBSession(self.__Session) as session:
# Load the lock record for "FOR UPDATE" so that the
# transaction that handles the run's store operations
# has a lock on the database row itself.
run_lock = session.query(RunLock) \
.filter(RunLock.name == name) \
.with_for_update(nowait=True).one()
# Do not remove this seemingly dummy print, we need to make
# sure that the execution of the SQL statement is not
# optimised away and the fetched row is not garbage
# collected.
LOG.debug("Storing into run '%s' locked at '%s'.",
name, run_lock.locked_at)
# Actual store operation begins here.
user_name = self.__get_username()
run_id = store_handler.addCheckerRun(session,
command,
name,
tag,
user_name,
run_history_time,
version,
force,
cc_version,
statistics)
self.__store_reports(session,
report_dir,
source_root,
run_id,
file_path_to_id,
run_history_time,
self.__context.severity_map,
wrong_src_code_comments,
skip_handler,
checkers)
store_handler.setRunDuration(session,
run_id,
durations)
store_handler.finishCheckerRun(session, run_id)
session.commit()
return run_id
finally:
# In any case if the "try" block's execution began, a run lock must
# exist, which can now be removed, as storage either completed
# successfully, or failed in a detectable manner.
# (If the failure is undetectable, the coded grace period expiry
# of the lock will allow further store operations to the given
# run name.)
with DBSession(self.__Session) as session:
ThriftRequestHandler.__free_run_lock(session, name)
if wrong_src_code_comments:
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.SOURCE_FILE,
"Multiple source code comment can be found with the same "
"checker name for same bug!",
wrong_src_code_comments)
@exc_to_thrift_reqfail
@timeit
def allowsStoringAnalysisStatistics(self):
self.__require_store()
return True if self.__manager.get_analysis_statistics_dir() else False
@exc_to_thrift_reqfail
@timeit
def getAnalysisStatisticsLimits(self):
self.__require_store()
cfg = dict()
# Get the limit of failure zip size.
failure_zip_size = self.__manager.get_failure_zip_size()
if failure_zip_size:
cfg[ttypes.StoreLimitKind.FAILURE_ZIP_SIZE] = failure_zip_size
# Get the limit of compilation database size.
compilation_database_size = \
self.__manager.get_compilation_database_size()
if compilation_database_size:
cfg[ttypes.StoreLimitKind.COMPILATION_DATABASE_SIZE] = \
compilation_database_size
return cfg
@exc_to_thrift_reqfail
@timeit
def storeAnalysisStatistics(self, run_name, b64zip):
self.__require_store()
report_dir_store = self.__manager.get_analysis_statistics_dir()
if report_dir_store:
try:
product_dir = os.path.join(report_dir_store,
self.__product.endpoint)
# Create report store directory.
if not os.path.exists(product_dir):
os.makedirs(product_dir)
# Removes and replaces special characters in the run name.
run_name = slugify(run_name)
run_zip_file = os.path.join(product_dir, run_name + '.zip')
with open(run_zip_file, 'w') as run_zip:
run_zip.write(zlib.decompress(
base64.b64decode(b64zip)))
return True
except Exception as ex:
LOG.error(str(ex))
return False
return False
@exc_to_thrift_reqfail
@timeit
def getAnalysisStatistics(self, run_id, run_history_id):
self.__require_access()
analyzer_statistics = {}
with DBSession(self.__Session) as session:
query = session.query(AnalyzerStatistic,
Run.id)
if run_id:
query = query.filter(Run.id == run_id)
elif run_history_id:
query = query.filter(RunHistory.id == run_history_id)
query = query \
.outerjoin(RunHistory,
RunHistory.id == AnalyzerStatistic.run_history_id) \
.outerjoin(Run,
Run.id == RunHistory.run_id)
for stat, run_id in query:
failed_files = zlib.decompress(stat.failed_files).split('\n') \
if stat.failed_files else None
analyzer_version = zlib.decompress(stat.version) \
if stat.version else None
analyzer_statistics[stat.analyzer_type] = \
ttypes.AnalyzerStatistics(version=analyzer_version,
failed=stat.failed,
failedFilePaths=failed_files,
successful=stat.successful)
return analyzer_statistics
| 1 | 11,399 | max was wrong here. As its buitlt-in, not a number, and a TypeError was thrown. | Ericsson-codechecker | c |
@@ -1,3 +1,5 @@
+#! /usr/bin/env python
+
# MIT License
# Copyright (c) 2018 Jose Amores | 1 | # MIT License
# Copyright (c) 2018 Jose Amores
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Sebastian Baar <[email protected]>
# This program is published under a GPLv2 license
# scapy.contrib.description = Scalable service-Oriented MiddlewarE/IP (SOME/IP)
# scapy.contrib.status = loads
import struct
from scapy.layers.inet import TCP, UDP
from scapy.compat import raw
from scapy.config import conf
from scapy.modules.six.moves import range
from scapy.packet import Packet, bind_layers
from scapy.fields import ShortField, BitEnumField, ConditionalField, \
BitField, PacketField, IntField, ByteField, ByteEnumField
class _SOMEIP_MessageId(Packet):
""" MessageId subpacket."""
name = "MessageId"
fields_desc = [
ShortField("srv_id", 0),
BitEnumField("sub_id", 0, 1, {0: "METHOD_ID", 1: "EVENT_ID"}),
ConditionalField(BitField("method_id", 0, 15),
lambda pkt: pkt.sub_id == 0),
ConditionalField(BitField("event_id", 0, 15),
lambda pkt: pkt.sub_id == 1)
]
def extract_padding(self, s):
return "", s
class _SOMEIP_RequestId(Packet):
""" RequestId subpacket."""
name = "RequestId"
fields_desc = [
ShortField("client_id", 0),
ShortField("session_id", 0)
]
def extract_padding(self, s):
return "", s
class SOMEIP(Packet):
""" SOME/IP Packet."""
PROTOCOL_VERSION = 0x01
INTERFACE_VERSION = 0x01
LEN_OFFSET = 0x08
LEN_OFFSET_TP = 0x0c
TYPE_REQUEST = 0x00
TYPE_REQUEST_NO_RET = 0x01
TYPE_NOTIFICATION = 0x02
TYPE_REQUEST_ACK = 0x40
TYPE_REQUEST_NORET_ACK = 0x41
TYPE_NOTIFICATION_ACK = 0x42
TYPE_RESPONSE = 0x80
TYPE_ERROR = 0x81
TYPE_RESPONSE_ACK = 0xc0
TYPE_ERROR_ACK = 0xc1
TYPE_TP_REQUEST = 0x20
TYPE_TP_REQUEST_NO_RET = 0x21
TYPE_TP_NOTIFICATION = 0x22
TYPE_TP_RESPONSE = 0x23
TYPE_TP_ERROR = 0x24
RET_E_OK = 0x00
RET_E_NOT_OK = 0x01
RET_E_UNKNOWN_SERVICE = 0x02
RET_E_UNKNOWN_METHOD = 0x03
RET_E_NOT_READY = 0x04
RET_E_NOT_REACHABLE = 0x05
RET_E_TIMEOUT = 0x06
RET_E_WRONG_PROTOCOL_V = 0x07
RET_E_WRONG_INTERFACE_V = 0x08
RET_E_MALFORMED_MSG = 0x09
RET_E_WRONG_MESSAGE_TYPE = 0x0a
_OVERALL_LEN_NOPAYLOAD = 16
name = "SOME/IP"
fields_desc = [
PacketField("msg_id", _SOMEIP_MessageId(),
_SOMEIP_MessageId),
IntField("len", None),
PacketField("req_id", _SOMEIP_RequestId(),
_SOMEIP_RequestId),
ByteField("proto_ver", PROTOCOL_VERSION),
ByteField("iface_ver", INTERFACE_VERSION),
ByteEnumField("msg_type", TYPE_REQUEST, {
TYPE_REQUEST: "REQUEST",
TYPE_REQUEST_NO_RET: "REQUEST_NO_RETURN",
TYPE_NOTIFICATION: "NOTIFICATION",
TYPE_REQUEST_ACK: "REQUEST_ACK",
TYPE_REQUEST_NORET_ACK: "REQUEST_NO_RETURN_ACK",
TYPE_NOTIFICATION_ACK: "NOTIFICATION_ACK",
TYPE_RESPONSE: "RESPONSE",
TYPE_ERROR: "ERROR",
TYPE_RESPONSE_ACK: "RESPONSE_ACK",
TYPE_ERROR_ACK: "ERROR_ACK",
TYPE_TP_REQUEST: "TP_REQUEST",
TYPE_TP_REQUEST_NO_RET: "TP_REQUEST_NO_RETURN",
TYPE_TP_NOTIFICATION: "TP_NOTIFICATION",
TYPE_TP_RESPONSE: "TP_RESPONSE",
TYPE_TP_ERROR: "TP_ERROR",
}),
ByteEnumField("retcode", 0, {
RET_E_OK: "E_OK",
RET_E_NOT_OK: "E_NOT_OK",
RET_E_UNKNOWN_SERVICE: "E_UNKNOWN_SERVICE",
RET_E_UNKNOWN_METHOD: "E_UNKNOWN_METHOD",
RET_E_NOT_READY: "E_NOT_READY",
RET_E_NOT_REACHABLE: "E_NOT_REACHABLE",
RET_E_TIMEOUT: "E_TIMEOUT",
RET_E_WRONG_PROTOCOL_V: "E_WRONG_PROTOCOL_VERSION",
RET_E_WRONG_INTERFACE_V: "E_WRONG_INTERFACE_VERSION",
RET_E_MALFORMED_MSG: "E_MALFORMED_MESSAGE",
RET_E_WRONG_MESSAGE_TYPE: "E_WRONG_MESSAGE_TYPE",
}),
ConditionalField(BitField("offset", 0, 28),
lambda pkt: SOMEIP._is_tp(pkt)),
ConditionalField(BitField("res", 0, 3),
lambda pkt: SOMEIP._is_tp(pkt)),
ConditionalField(BitField("more_seg", 0, 1),
lambda pkt: SOMEIP._is_tp(pkt))
]
def post_build(self, pkt, pay):
length = self.len
if length is None:
if SOMEIP._is_tp(self):
length = SOMEIP.LEN_OFFSET_TP + len(pay)
else:
length = SOMEIP.LEN_OFFSET + len(pay)
pkt = pkt[:4] + struct.pack("!I", length) + pkt[8:]
return pkt + pay
def answers(self, other):
if other.__class__ == self.__class__:
if self.msg_type in [SOMEIP.TYPE_REQUEST_NO_RET,
SOMEIP.TYPE_REQUEST_NORET_ACK,
SOMEIP.TYPE_NOTIFICATION,
SOMEIP.TYPE_TP_REQUEST_NO_RET,
SOMEIP.TYPE_TP_NOTIFICATION]:
return 0
return self.payload.answers(other.payload)
return 0
@staticmethod
def _is_tp(pkt):
"""Returns true if pkt is using SOMEIP-TP, else returns false."""
tp = [SOMEIP.TYPE_TP_REQUEST, SOMEIP.TYPE_TP_REQUEST_NO_RET,
SOMEIP.TYPE_TP_NOTIFICATION, SOMEIP.TYPE_TP_RESPONSE,
SOMEIP.TYPE_TP_ERROR]
if isinstance(pkt, Packet):
return pkt.msg_type in tp
else:
return pkt[15] in tp
def fragment(self, fragsize=1392):
"""Fragment SOME/IP-TP"""
fnb = 0
fl = self
lst = list()
while fl.underlayer is not None:
fnb += 1
fl = fl.underlayer
for p in fl:
s = raw(p[fnb].payload)
nb = (len(s) + fragsize) // fragsize
for i in range(nb):
q = p.copy()
del q[fnb].payload
q[fnb].len = SOMEIP.LEN_OFFSET_TP + \
len(s[i * fragsize:(i + 1) * fragsize])
q[fnb].more_seg = 1
if i == nb - 1:
q[fnb].more_seg = 0
q[fnb].offset += i * fragsize // 16
r = conf.raw_layer(load=s[i * fragsize:(i + 1) * fragsize])
r.overload_fields = p[fnb].payload.overload_fields.copy()
q.add_payload(r)
lst.append(q)
return lst
def _bind_someip_layers():
for i in range(15):
bind_layers(UDP, SOMEIP, sport=30490 + i)
bind_layers(TCP, SOMEIP, sport=30490 + i)
_bind_someip_layers()
| 1 | 16,523 | This change is not needed, please revert it. | secdev-scapy | py |
@@ -40,7 +40,9 @@ def train(params, train_set, num_boost_round=100,
Customized objective function.
feval : callable or None, optional (default=None)
Customized evaluation function.
+ Should accept two parameters: preds, train_data.
Note: should return (eval_name, eval_result, is_higher_better) or list of such tuples.
+ To ignore the default metric in params, set it to the string ``"None"``
init_model : string or None, optional (default=None)
Filename of LightGBM model or Booster instance used for continue training.
feature_name : list of strings or 'auto', optional (default="auto") | 1 | # coding: utf-8
# pylint: disable = invalid-name, W0105
"""Training Library containing training routines of LightGBM."""
from __future__ import absolute_import
import collections
import warnings
from operator import attrgetter
import numpy as np
from . import callback
from .basic import Booster, Dataset, LightGBMError, _InnerPredictor
from .compat import (SKLEARN_INSTALLED, _LGBMGroupKFold, _LGBMStratifiedKFold,
integer_types, range_, string_type)
def train(params, train_set, num_boost_round=100,
valid_sets=None, valid_names=None,
fobj=None, feval=None, init_model=None,
feature_name='auto', categorical_feature='auto',
early_stopping_rounds=None, evals_result=None,
verbose_eval=True, learning_rates=None,
keep_training_booster=False, callbacks=None):
"""Perform the training with given parameters.
Parameters
----------
params : dict
Parameters for training.
train_set : Dataset
Data to be trained.
num_boost_round: int, optional (default=100)
Number of boosting iterations.
valid_sets: list of Datasets or None, optional (default=None)
List of data to be evaluated during training.
valid_names: list of string or None, optional (default=None)
Names of ``valid_sets``.
fobj : callable or None, optional (default=None)
Customized objective function.
feval : callable or None, optional (default=None)
Customized evaluation function.
Note: should return (eval_name, eval_result, is_higher_better) or list of such tuples.
init_model : string or None, optional (default=None)
Filename of LightGBM model or Booster instance used for continue training.
feature_name : list of strings or 'auto', optional (default="auto")
Feature names.
If 'auto' and data is pandas DataFrame, data columns names are used.
categorical_feature : list of strings or int, or 'auto', optional (default="auto")
Categorical features.
If list of int, interpreted as indices.
If list of strings, interpreted as feature names (need to specify ``feature_name`` as well).
If 'auto' and data is pandas DataFrame, pandas categorical columns are used.
early_stopping_rounds: int or None, optional (default=None)
Activates early stopping. The model will train until the validation score stops improving.
Requires at least one validation data and one metric. If there's more than one, will check all of them.
If early stopping occurs, the model will add ``best_iteration`` field.
evals_result: dict or None, optional (default=None)
This dictionary used to store all evaluation results of all the items in ``valid_sets``.
Example
-------
With a ``valid_sets`` = [valid_set, train_set],
``valid_names`` = ['eval', 'train']
and a ``params`` = ('metric':'logloss')
returns: {'train': {'logloss': ['0.48253', '0.35953', ...]},
'eval': {'logloss': ['0.480385', '0.357756', ...]}}.
verbose_eval : bool or int, optional (default=True)
Requires at least one validation data.
If True, the eval metric on the valid set is printed at each boosting stage.
If int, the eval metric on the valid set is printed at every ``verbose_eval`` boosting stage.
The last boosting stage or the boosting stage found by using ``early_stopping_rounds`` is also printed.
Example
-------
With ``verbose_eval`` = 4 and at least one item in evals,
an evaluation metric is printed every 4 (instead of 1) boosting stages.
learning_rates: list, callable or None, optional (default=None)
List of learning rates for each boosting round
or a customized function that calculates ``learning_rate``
in terms of current number of round (e.g. yields learning rate decay).
keep_training_booster : bool, optional (default=False)
Whether the returned Booster will be used to keep training.
If False, the returned value will be converted into _InnerPredictor before returning.
You can still use _InnerPredictor as ``init_model`` for future continue training.
callbacks : list of callables or None, optional (default=None)
List of callback functions that are applied at each iteration.
See Callbacks in Python API for more information.
Returns
-------
booster : Booster
The trained Booster model.
"""
# create predictor first
for alias in ["num_boost_round", "num_iterations", "num_iteration", "num_tree", "num_trees", "num_round", "num_rounds", "n_estimators"]:
if alias in params:
num_boost_round = int(params.pop(alias))
warnings.warn("Found `{}` in params. Will use it instead of argument".format(alias))
break
for alias in ["early_stopping_round", "early_stopping_rounds", "early_stopping"]:
if alias in params and params[alias] is not None:
early_stopping_rounds = int(params.pop(alias))
warnings.warn("Found `{}` in params. Will use it instead of argument".format(alias))
break
if num_boost_round <= 0:
raise ValueError("num_boost_round should be greater than zero.")
if isinstance(init_model, string_type):
predictor = _InnerPredictor(model_file=init_model)
elif isinstance(init_model, Booster):
predictor = init_model._to_predictor()
else:
predictor = None
init_iteration = predictor.num_total_iteration if predictor is not None else 0
# check dataset
if not isinstance(train_set, Dataset):
raise TypeError("Training only accepts Dataset object")
train_set._update_params(params)
train_set._set_predictor(predictor)
train_set.set_feature_name(feature_name)
train_set.set_categorical_feature(categorical_feature)
is_valid_contain_train = False
train_data_name = "training"
reduced_valid_sets = []
name_valid_sets = []
if valid_sets is not None:
if isinstance(valid_sets, Dataset):
valid_sets = [valid_sets]
if isinstance(valid_names, string_type):
valid_names = [valid_names]
for i, valid_data in enumerate(valid_sets):
# reduce cost for prediction training data
if valid_data is train_set:
is_valid_contain_train = True
if valid_names is not None:
train_data_name = valid_names[i]
continue
if not isinstance(valid_data, Dataset):
raise TypeError("Traninig only accepts Dataset object")
valid_data._update_params(params)
valid_data.set_reference(train_set)
reduced_valid_sets.append(valid_data)
if valid_names is not None and len(valid_names) > i:
name_valid_sets.append(valid_names[i])
else:
name_valid_sets.append('valid_' + str(i))
# process callbacks
if callbacks is None:
callbacks = set()
else:
for i, cb in enumerate(callbacks):
cb.__dict__.setdefault('order', i - len(callbacks))
callbacks = set(callbacks)
# Most of legacy advanced options becomes callbacks
if verbose_eval is True:
callbacks.add(callback.print_evaluation())
elif isinstance(verbose_eval, integer_types):
callbacks.add(callback.print_evaluation(verbose_eval))
if early_stopping_rounds is not None:
callbacks.add(callback.early_stopping(early_stopping_rounds, verbose=bool(verbose_eval)))
if learning_rates is not None:
callbacks.add(callback.reset_parameter(learning_rate=learning_rates))
if evals_result is not None:
callbacks.add(callback.record_evaluation(evals_result))
callbacks_before_iter = {cb for cb in callbacks if getattr(cb, 'before_iteration', False)}
callbacks_after_iter = callbacks - callbacks_before_iter
callbacks_before_iter = sorted(callbacks_before_iter, key=attrgetter('order'))
callbacks_after_iter = sorted(callbacks_after_iter, key=attrgetter('order'))
# construct booster
try:
booster = Booster(params=params, train_set=train_set)
if is_valid_contain_train:
booster.set_train_data_name(train_data_name)
for valid_set, name_valid_set in zip(reduced_valid_sets, name_valid_sets):
booster.add_valid(valid_set, name_valid_set)
finally:
train_set._reverse_update_params()
for valid_set in reduced_valid_sets:
valid_set._reverse_update_params()
booster.best_iteration = 0
# start training
for i in range_(init_iteration, init_iteration + num_boost_round):
for cb in callbacks_before_iter:
cb(callback.CallbackEnv(model=booster,
params=params,
iteration=i,
begin_iteration=init_iteration,
end_iteration=init_iteration + num_boost_round,
evaluation_result_list=None))
booster.update(fobj=fobj)
evaluation_result_list = []
# check evaluation result.
if valid_sets is not None:
if is_valid_contain_train:
evaluation_result_list.extend(booster.eval_train(feval))
evaluation_result_list.extend(booster.eval_valid(feval))
try:
for cb in callbacks_after_iter:
cb(callback.CallbackEnv(model=booster,
params=params,
iteration=i,
begin_iteration=init_iteration,
end_iteration=init_iteration + num_boost_round,
evaluation_result_list=evaluation_result_list))
except callback.EarlyStopException as earlyStopException:
booster.best_iteration = earlyStopException.best_iteration + 1
evaluation_result_list = earlyStopException.best_score
break
booster.best_score = collections.defaultdict(dict)
for dataset_name, eval_name, score, _ in evaluation_result_list:
booster.best_score[dataset_name][eval_name] = score
if not keep_training_booster:
booster._load_model_from_string(booster._save_model_to_string(), False)
booster.free_dataset()
return booster
class CVBooster(object):
""""Auxiliary data struct to hold all boosters of CV."""
def __init__(self):
self.boosters = []
self.best_iteration = -1
def append(self, booster):
"""add a booster to CVBooster"""
self.boosters.append(booster)
def __getattr__(self, name):
"""redirect methods call of CVBooster"""
def handlerFunction(*args, **kwargs):
"""call methods with each booster, and concatenate their results"""
ret = []
for booster in self.boosters:
ret.append(getattr(booster, name)(*args, **kwargs))
return ret
return handlerFunction
def _make_n_folds(full_data, folds, nfold, params, seed, fpreproc=None, stratified=True, shuffle=True):
"""
Make an n-fold list of Booster from random indices.
"""
full_data = full_data.construct()
num_data = full_data.num_data()
if folds is not None:
if not hasattr(folds, '__iter__'):
raise AttributeError("folds should be a generator or iterator of (train_idx, test_idx)")
else:
if 'objective' in params and params['objective'] == 'lambdarank':
if not SKLEARN_INSTALLED:
raise LightGBMError('Scikit-learn is required for lambdarank cv.')
# lambdarank task, split according to groups
group_info = full_data.get_group().astype(int)
flatted_group = np.repeat(range(len(group_info)), repeats=group_info)
group_kfold = _LGBMGroupKFold(n_splits=nfold)
folds = group_kfold.split(X=np.zeros(num_data), groups=flatted_group)
elif stratified:
if not SKLEARN_INSTALLED:
raise LightGBMError('Scikit-learn is required for stratified cv.')
skf = _LGBMStratifiedKFold(n_splits=nfold, shuffle=shuffle, random_state=seed)
folds = skf.split(X=np.zeros(num_data), y=full_data.get_label())
else:
if shuffle:
randidx = np.random.RandomState(seed).permutation(num_data)
else:
randidx = np.arange(num_data)
kstep = int(num_data / nfold)
test_id = [randidx[i: i + kstep] for i in range_(0, num_data, kstep)]
train_id = [np.concatenate([test_id[i] for i in range_(nfold) if k != i]) for k in range_(nfold)]
folds = zip(train_id, test_id)
ret = CVBooster()
for train_idx, test_idx in folds:
train_set = full_data.subset(train_idx)
valid_set = full_data.subset(test_idx)
# run preprocessing on the data set if needed
if fpreproc is not None:
train_set, valid_set, tparam = fpreproc(train_set, valid_set, params.copy())
else:
tparam = params
cvbooster = Booster(tparam, train_set)
cvbooster.add_valid(valid_set, 'valid')
ret.append(cvbooster)
return ret
def _agg_cv_result(raw_results):
"""
Aggregate cross-validation results.
"""
cvmap = collections.defaultdict(list)
metric_type = {}
for one_result in raw_results:
for one_line in one_result:
metric_type[one_line[1]] = one_line[3]
cvmap[one_line[1]].append(one_line[2])
return [('cv_agg', k, np.mean(v), metric_type[k], np.std(v)) for k, v in cvmap.items()]
def cv(params, train_set, num_boost_round=100,
folds=None, nfold=5, stratified=True, shuffle=True,
metrics=None, fobj=None, feval=None, init_model=None,
feature_name='auto', categorical_feature='auto',
early_stopping_rounds=None, fpreproc=None,
verbose_eval=None, show_stdv=True, seed=0,
callbacks=None):
"""Perform the cross-validation with given paramaters.
Parameters
----------
params : dict
Parameters for Booster.
train_set : Dataset
Data to be trained on.
num_boost_round : int, optional (default=100)
Number of boosting iterations.
folds : a generator or iterator of (train_idx, test_idx) tuples or None, optional (default=None)
The train and test indices for the each fold.
This argument has highest priority over other data split arguments.
nfold : int, optional (default=5)
Number of folds in CV.
stratified : bool, optional (default=True)
Whether to perform stratified sampling.
shuffle: bool, optional (default=True)
Whether to shuffle before splitting data.
metrics : string, list of strings or None, optional (default=None)
Evaluation metrics to be monitored while CV.
If not None, the metric in ``params`` will be overridden.
fobj : callable or None, optional (default=None)
Custom objective function.
feval : callable or None, optional (default=None)
Custom evaluation function.
init_model : string or None, optional (default=None)
Filename of LightGBM model or Booster instance used for continue training.
feature_name : list of strings or 'auto', optional (default="auto")
Feature names.
If 'auto' and data is pandas DataFrame, data columns names are used.
categorical_feature : list of strings or int, or 'auto', optional (default="auto")
Categorical features.
If list of int, interpreted as indices.
If list of strings, interpreted as feature names (need to specify ``feature_name`` as well).
If 'auto' and data is pandas DataFrame, pandas categorical columns are used.
early_stopping_rounds: int or None, optional (default=None)
Activates early stopping. CV error needs to decrease at least
every ``early_stopping_rounds`` round(s) to continue.
Last entry in evaluation history is the one from best iteration.
fpreproc : callable or None, optional (default=None)
Preprocessing function that takes (dtrain, dtest, params)
and returns transformed versions of those.
verbose_eval : bool, int, or None, optional (default=None)
Whether to display the progress.
If None, progress will be displayed when np.ndarray is returned.
If True, progress will be displayed at every boosting stage.
If int, progress will be displayed at every given ``verbose_eval`` boosting stage.
show_stdv : bool, optional (default=True)
Whether to display the standard deviation in progress.
Results are not affected by this parameter, and always contains std.
seed : int, optional (default=0)
Seed used to generate the folds (passed to numpy.random.seed).
callbacks : list of callables or None, optional (default=None)
List of callback functions that are applied at each iteration.
See Callbacks in Python API for more information.
Returns
-------
eval_hist : dict
Evaluation history.
The dictionary has the following format:
{'metric1-mean': [values], 'metric1-stdv': [values],
'metric2-mean': [values], 'metric2-stdv': [values],
...}.
"""
if not isinstance(train_set, Dataset):
raise TypeError("Traninig only accepts Dataset object")
for alias in ["num_boost_round", "num_iterations", "num_iteration", "num_tree", "num_trees", "num_round", "num_rounds", "n_estimators"]:
if alias in params:
warnings.warn("Found `{}` in params. Will use it instead of argument".format(alias))
num_boost_round = params.pop(alias)
break
for alias in ["early_stopping_round", "early_stopping_rounds", "early_stopping"]:
if alias in params:
warnings.warn("Found `{}` in params. Will use it instead of argument".format(alias))
early_stopping_rounds = params.pop(alias)
break
if num_boost_round <= 0:
raise ValueError("num_boost_round should be greater than zero.")
if isinstance(init_model, string_type):
predictor = _InnerPredictor(model_file=init_model)
elif isinstance(init_model, Booster):
predictor = init_model._to_predictor()
else:
predictor = None
train_set._update_params(params)
train_set._set_predictor(predictor)
train_set.set_feature_name(feature_name)
train_set.set_categorical_feature(categorical_feature)
if metrics is not None:
params['metric'] = metrics
results = collections.defaultdict(list)
cvfolds = _make_n_folds(train_set, folds=folds, nfold=nfold,
params=params, seed=seed, fpreproc=fpreproc,
stratified=stratified, shuffle=shuffle)
# setup callbacks
if callbacks is None:
callbacks = set()
else:
for i, cb in enumerate(callbacks):
cb.__dict__.setdefault('order', i - len(callbacks))
callbacks = set(callbacks)
if early_stopping_rounds is not None:
callbacks.add(callback.early_stopping(early_stopping_rounds, verbose=False))
if verbose_eval is True:
callbacks.add(callback.print_evaluation(show_stdv=show_stdv))
elif isinstance(verbose_eval, integer_types):
callbacks.add(callback.print_evaluation(verbose_eval, show_stdv=show_stdv))
callbacks_before_iter = {cb for cb in callbacks if getattr(cb, 'before_iteration', False)}
callbacks_after_iter = callbacks - callbacks_before_iter
callbacks_before_iter = sorted(callbacks_before_iter, key=attrgetter('order'))
callbacks_after_iter = sorted(callbacks_after_iter, key=attrgetter('order'))
for i in range_(num_boost_round):
for cb in callbacks_before_iter:
cb(callback.CallbackEnv(model=cvfolds,
params=params,
iteration=i,
begin_iteration=0,
end_iteration=num_boost_round,
evaluation_result_list=None))
cvfolds.update(fobj=fobj)
res = _agg_cv_result(cvfolds.eval_valid(feval))
for _, key, mean, _, std in res:
results[key + '-mean'].append(mean)
results[key + '-stdv'].append(std)
try:
for cb in callbacks_after_iter:
cb(callback.CallbackEnv(model=cvfolds,
params=params,
iteration=i,
begin_iteration=0,
end_iteration=num_boost_round,
evaluation_result_list=res))
except callback.EarlyStopException as earlyStopException:
cvfolds.best_iteration = earlyStopException.best_iteration + 1
for k in results:
results[k] = results[k][:cvfolds.best_iteration]
break
return dict(results)
| 1 | 18,374 | @ClimbsRocks Please add that it could be a string. `feval : callable, string or None, optional (default=None)` | microsoft-LightGBM | cpp |
@@ -1,7 +1,7 @@
# frozen_string_literal: true
module Faker
- class Sports
+ class Sports < Base
class Basketball < Base
class << self
## | 1 | # frozen_string_literal: true
module Faker
class Sports
class Basketball < Base
class << self
##
# Produces the name of a basketball team.
#
# @return [String]
#
# @example
# Faker::Sports::Basketball.team #=> "Golden State Warriors"
#
# @faker.version 1.9.4
def team
fetch('basketball.teams')
end
##
# Produces the name of a basketball player.
#
# @return [String]
#
# @example
# Faker::Sports::Basketball.player #=> "LeBron James"
#
# @faker.version 1.9.4
def player
fetch('basketball.players')
end
##
# Produces the name of a basketball coach.
#
# @return [String]
#
# @example
# Faker::Sports::Basketball.coach #=> "Gregg Popovich"
#
# @faker.version 1.9.4
def coach
fetch('basketball.coaches')
end
##
# Produces a position in basketball.
#
# @return [String]
#
# @example
# Faker::Sports::Basketball.position #=> "Point Guard"
#
# @faker.version 1.9.4
def position
fetch('basketball.positions')
end
end
end
end
end
| 1 | 9,364 | now that I have started using, I realized, I could have named my new class singular `Sport` and not having to make this change. let me know, I will update | faker-ruby-faker | rb |
@@ -23,7 +23,7 @@ func GetApps() map[string][]App {
apps := make(map[string][]App)
for platformType := range PluginMap {
labels := map[string]string{
- "com.ddev.platform": platformType,
+ "com.ddev.platform": "ddev",
"com.docker.compose.service": "web",
}
sites, err := dockerutil.FindContainersByLabels(labels) | 1 | package platform
import (
"fmt"
"path/filepath"
"strings"
log "github.com/Sirupsen/logrus"
"github.com/fatih/color"
"github.com/fsouza/go-dockerclient"
"github.com/gosuri/uitable"
"errors"
"github.com/drud/ddev/pkg/dockerutil"
"github.com/drud/ddev/pkg/fileutil"
"github.com/drud/ddev/pkg/util"
homedir "github.com/mitchellh/go-homedir"
)
// GetApps returns a list of ddev applictions keyed by platform.
func GetApps() map[string][]App {
apps := make(map[string][]App)
for platformType := range PluginMap {
labels := map[string]string{
"com.ddev.platform": platformType,
"com.docker.compose.service": "web",
}
sites, err := dockerutil.FindContainersByLabels(labels)
if err == nil {
for _, siteContainer := range sites {
site, err := GetPluginApp(platformType)
// This should absolutely never happen, so just fatal on the off chance it does.
if err != nil {
log.Fatalf("could not get application for plugin type %s", platformType)
}
approot, ok := siteContainer.Labels["com.ddev.approot"]
if !ok {
break
}
_, ok = apps[platformType]
if !ok {
apps[platformType] = []App{}
}
err = site.Init(approot)
if err == nil {
apps[platformType] = append(apps[platformType], site)
}
}
}
}
return apps
}
// RenderAppTable will format a table for user display based on a list of apps.
func RenderAppTable(platform string, apps []App) {
if len(apps) > 0 {
fmt.Printf("%v %s %v found.\n", len(apps), platform, util.FormatPlural(len(apps), "site", "sites"))
table := CreateAppTable()
for _, site := range apps {
RenderAppRow(table, site)
}
fmt.Println(table)
}
}
// CreateAppTable will create a new app table for describe and list output
func CreateAppTable() *uitable.Table {
table := uitable.New()
table.MaxColWidth = 140
table.Separator = " "
table.AddRow("NAME", "TYPE", "LOCATION", "URL", "STATUS")
return table
}
// RenderAppRow will add an application row to an existing table for describe and list output.
func RenderAppRow(table *uitable.Table, site App) {
// test tilde expansion
appRoot := site.AppRoot()
userDir, err := homedir.Dir()
if err == nil {
appRoot = strings.Replace(appRoot, userDir, "~", 1)
}
status := site.SiteStatus()
if status == "stopped" {
status = color.YellowString(status)
} else {
status = color.CyanString(status)
}
table.AddRow(
site.GetName(),
site.GetType(),
appRoot,
site.URL(),
status,
)
}
// Cleanup will clean up ddev apps even if the composer file has been deleted.
func Cleanup(app App) error {
client := dockerutil.GetDockerClient()
// Find all containers which match the current site name.
labels := map[string]string{
"com.ddev.site-name": app.GetName(),
}
containers, err := dockerutil.FindContainersByLabels(labels)
if err != nil {
return err
}
// First, try stopping the listed containers if they are running.
for i := range containers {
if containers[i].State == "running" || containers[i].State == "restarting" || containers[i].State == "paused" {
containerName := containers[i].Names[0][1:len(containers[i].Names[0])]
fmt.Printf("Stopping container: %s\n", containerName)
err = client.StopContainer(containers[i].ID, 60)
if err != nil {
return fmt.Errorf("could not stop container %s: %v", containerName, err)
}
}
}
// Try to remove the containers once they are stopped.
for i := range containers {
containerName := containers[i].Names[0][1:len(containers[i].Names[0])]
removeOpts := docker.RemoveContainerOptions{
ID: containers[i].ID,
RemoveVolumes: true,
Force: true,
}
fmt.Printf("Removing container: %s\n", containerName)
if err := client.RemoveContainer(removeOpts); err != nil {
return fmt.Errorf("could not remove container %s: %v", containerName, err)
}
}
return StopRouter()
}
// CheckForConf checks for a config.yaml at the cwd or parent dirs.
func CheckForConf(confPath string) (string, error) {
if fileutil.FileExists(confPath + "/.ddev/config.yaml") {
return confPath, nil
}
pathList := strings.Split(confPath, "/")
for _ = range pathList {
confPath = filepath.Dir(confPath)
if fileutil.FileExists(confPath + "/.ddev/config.yaml") {
return confPath, nil
}
}
return "", errors.New("no .ddev/config.yaml file was found in this directory or any parent")
}
// ddevContainersRunning determines if any ddev-controlled containers are currently running.
func ddevContainersRunning() (bool, error) {
containers, err := dockerutil.GetDockerContainers(false)
if err != nil {
return false, err
}
for _, container := range containers {
if _, ok := container.Labels["com.ddev.platform"]; ok {
return true, nil
}
}
return false, nil
}
| 1 | 11,384 | Not sure why this is changing to a hard-coded string. | drud-ddev | php |
@@ -453,6 +453,15 @@ class FlowContentView(RequestHandler):
))
+class Commands(RequestHandler):
+ def post(self):
+ result = self.master.commands.execute(self.json["command"])
+ if result is None:
+ self.write({"result": ""})
+ return
+ self.write({"result": str(result)})
+
+
class Events(RequestHandler):
def get(self):
self.write([logentry_to_json(e) for e in self.master.events.data]) | 1 | import asyncio
import hashlib
import json
import logging
import os.path
import re
from io import BytesIO
from typing import ClassVar, Optional
import tornado.escape
import tornado.web
import tornado.websocket
import mitmproxy.flow
import mitmproxy.tools.web.master # noqa
from mitmproxy import contentviews
from mitmproxy import flowfilter
from mitmproxy import http
from mitmproxy import io
from mitmproxy import log
from mitmproxy import optmanager
from mitmproxy import version
from mitmproxy.addons import export
from mitmproxy.utils.strutils import always_str
def flow_to_json(flow: mitmproxy.flow.Flow) -> dict:
"""
Remove flow message content and cert to save transmission space.
Args:
flow: The original flow.
Sync with web/src/flow.ts.
"""
f = {
"id": flow.id,
"intercepted": flow.intercepted,
"is_replay": flow.is_replay,
"type": flow.type,
"modified": flow.modified(),
"marked": flow.marked,
}
if flow.client_conn:
f["client_conn"] = {
"id": flow.client_conn.id,
"peername": flow.client_conn.peername,
"sockname": flow.client_conn.sockname,
"tls_established": flow.client_conn.tls_established,
"sni": flow.client_conn.sni,
"cipher": flow.client_conn.cipher,
"alpn": always_str(flow.client_conn.alpn, "ascii", "backslashreplace"),
"tls_version": flow.client_conn.tls_version,
"timestamp_start": flow.client_conn.timestamp_start,
"timestamp_tls_setup": flow.client_conn.timestamp_tls_setup,
"timestamp_end": flow.client_conn.timestamp_end,
# Legacy properties
"address": flow.client_conn.peername,
"cipher_name": flow.client_conn.cipher,
"alpn_proto_negotiated": always_str(flow.client_conn.alpn, "ascii", "backslashreplace"),
}
if flow.server_conn:
f["server_conn"] = {
"id": flow.server_conn.id,
"peername": flow.server_conn.peername,
"sockname": flow.server_conn.sockname,
"address": flow.server_conn.address,
"tls_established": flow.server_conn.tls_established,
"sni": flow.server_conn.sni,
"cipher": flow.server_conn.cipher,
"alpn": always_str(flow.server_conn.alpn, "ascii", "backslashreplace"),
"tls_version": flow.server_conn.tls_version,
"timestamp_start": flow.server_conn.timestamp_start,
"timestamp_tcp_setup": flow.server_conn.timestamp_tcp_setup,
"timestamp_tls_setup": flow.server_conn.timestamp_tls_setup,
"timestamp_end": flow.server_conn.timestamp_end,
# Legacy properties
"ip_address": flow.server_conn.peername,
"source_address": flow.server_conn.sockname,
"alpn_proto_negotiated": always_str(flow.server_conn.alpn, "ascii", "backslashreplace"),
}
if flow.error:
f["error"] = flow.error.get_state()
if isinstance(flow, http.HTTPFlow):
content_length: Optional[int]
content_hash: Optional[str]
if flow.request:
if flow.request.raw_content:
content_length = len(flow.request.raw_content)
content_hash = hashlib.sha256(flow.request.raw_content).hexdigest()
else:
content_length = None
content_hash = None
f["request"] = {
"method": flow.request.method,
"scheme": flow.request.scheme,
"host": flow.request.host,
"port": flow.request.port,
"path": flow.request.path,
"http_version": flow.request.http_version,
"headers": tuple(flow.request.headers.items(True)),
"contentLength": content_length,
"contentHash": content_hash,
"timestamp_start": flow.request.timestamp_start,
"timestamp_end": flow.request.timestamp_end,
"is_replay": flow.is_replay == "request", # TODO: remove, use flow.is_replay instead.
"pretty_host": flow.request.pretty_host,
}
if flow.response:
if flow.response.raw_content:
content_length = len(flow.response.raw_content)
content_hash = hashlib.sha256(flow.response.raw_content).hexdigest()
else:
content_length = None
content_hash = None
f["response"] = {
"http_version": flow.response.http_version,
"status_code": flow.response.status_code,
"reason": flow.response.reason,
"headers": tuple(flow.response.headers.items(True)),
"contentLength": content_length,
"contentHash": content_hash,
"timestamp_start": flow.response.timestamp_start,
"timestamp_end": flow.response.timestamp_end,
"is_replay": flow.is_replay == "response", # TODO: remove, use flow.is_replay instead.
}
if flow.response.data.trailers:
f["response"]["trailers"] = tuple(flow.response.data.trailers.items(True))
return f
def logentry_to_json(e: log.LogEntry) -> dict:
return {
"id": id(e), # we just need some kind of id.
"message": e.msg,
"level": e.level
}
class APIError(tornado.web.HTTPError):
pass
class RequestHandler(tornado.web.RequestHandler):
application: "Application"
def write(self, chunk):
# Writing arrays on the top level is ok nowadays.
# http://flask.pocoo.org/docs/0.11/security/#json-security
if isinstance(chunk, list):
chunk = tornado.escape.json_encode(chunk)
self.set_header("Content-Type", "application/json; charset=UTF-8")
super().write(chunk)
def set_default_headers(self):
super().set_default_headers()
self.set_header("Server", version.MITMPROXY)
self.set_header("X-Frame-Options", "DENY")
self.add_header("X-XSS-Protection", "1; mode=block")
self.add_header("X-Content-Type-Options", "nosniff")
self.add_header(
"Content-Security-Policy",
"default-src 'self'; "
"connect-src 'self' ws:; "
"style-src 'self' 'unsafe-inline'"
)
@property
def json(self):
if not self.request.headers.get("Content-Type", "").startswith("application/json"):
raise APIError(400, "Invalid Content-Type, expected application/json.")
try:
return json.loads(self.request.body.decode())
except Exception as e:
raise APIError(400, "Malformed JSON: {}".format(str(e)))
@property
def filecontents(self):
"""
Accept either a multipart/form file upload or just take the plain request body.
"""
if self.request.files:
return next(iter(self.request.files.values()))[0].body
else:
return self.request.body
@property
def view(self) -> "mitmproxy.addons.view.View":
return self.application.master.view
@property
def master(self) -> "mitmproxy.tools.web.master.WebMaster":
return self.application.master
@property
def flow(self) -> mitmproxy.flow.Flow:
flow_id = str(self.path_kwargs["flow_id"])
# FIXME: Add a facility to addon.view to safely access the store
flow = self.view.get_by_id(flow_id)
if flow:
return flow
else:
raise APIError(404, "Flow not found.")
def write_error(self, status_code: int, **kwargs):
if "exc_info" in kwargs and isinstance(kwargs["exc_info"][1], APIError):
self.finish(kwargs["exc_info"][1].log_message)
else:
super().write_error(status_code, **kwargs)
class IndexHandler(RequestHandler):
def get(self):
token = self.xsrf_token # https://github.com/tornadoweb/tornado/issues/645
assert token
self.render("index.html", static=False, version=version.VERSION)
class FilterHelp(RequestHandler):
def get(self):
self.write(dict(
commands=flowfilter.help
))
class WebSocketEventBroadcaster(tornado.websocket.WebSocketHandler):
# raise an error if inherited class doesn't specify its own instance.
connections: ClassVar[set]
def open(self):
self.connections.add(self)
def on_close(self):
self.connections.remove(self)
@classmethod
def broadcast(cls, **kwargs):
message = json.dumps(kwargs, ensure_ascii=False).encode("utf8", "surrogateescape")
for conn in cls.connections:
try:
conn.write_message(message)
except Exception: # pragma: no cover
logging.error("Error sending message", exc_info=True)
class ClientConnection(WebSocketEventBroadcaster):
connections: ClassVar[set] = set()
class Flows(RequestHandler):
def get(self):
self.write([flow_to_json(f) for f in self.view])
class DumpFlows(RequestHandler):
def get(self):
self.set_header("Content-Disposition", "attachment; filename=flows")
self.set_header("Content-Type", "application/octet-stream")
bio = BytesIO()
fw = io.FlowWriter(bio)
for f in self.view:
fw.add(f)
self.write(bio.getvalue())
bio.close()
def post(self):
self.view.clear()
bio = BytesIO(self.filecontents)
for i in io.FlowReader(bio).stream():
asyncio.ensure_future(self.master.load_flow(i))
bio.close()
class ExportFlow(RequestHandler):
def post(self, flow_id, format):
out = export.formats[format](self.flow)
self.write({
"export": always_str(out, "utf8", "backslashreplace")
})
class ClearAll(RequestHandler):
def post(self):
self.view.clear()
self.master.events.clear()
class ResumeFlows(RequestHandler):
def post(self):
for f in self.view:
if not f.intercepted:
continue
f.resume()
self.view.update([f])
class KillFlows(RequestHandler):
def post(self):
for f in self.view:
if f.killable:
f.kill()
self.view.update([f])
class ResumeFlow(RequestHandler):
def post(self, flow_id):
self.flow.resume()
self.view.update([self.flow])
class KillFlow(RequestHandler):
def post(self, flow_id):
if self.flow.killable:
self.flow.kill()
self.view.update([self.flow])
class FlowHandler(RequestHandler):
def delete(self, flow_id):
if self.flow.killable:
self.flow.kill()
self.view.remove([self.flow])
def put(self, flow_id):
flow = self.flow
flow.backup()
try:
for a, b in self.json.items():
if a == "request" and hasattr(flow, "request"):
request = flow.request
for k, v in b.items():
if k in ["method", "scheme", "host", "path", "http_version"]:
setattr(request, k, str(v))
elif k == "port":
request.port = int(v)
elif k == "headers":
request.headers.clear()
for header in v:
request.headers.add(*header)
elif k == "trailers":
request.trailers.clear()
for trailer in v:
request.trailers.add(*trailer)
elif k == "content":
request.text = v
else:
raise APIError(400, f"Unknown update request.{k}: {v}")
elif a == "response" and hasattr(flow, "response"):
response = flow.response
for k, v in b.items():
if k in ["msg", "http_version"]:
setattr(response, k, str(v))
elif k == "code":
response.status_code = int(v)
elif k == "headers":
response.headers.clear()
for header in v:
response.headers.add(*header)
elif k == "trailers":
response.trailers.clear()
for trailer in v:
response.trailers.add(*trailer)
elif k == "content":
response.text = v
else:
raise APIError(400, f"Unknown update response.{k}: {v}")
else:
raise APIError(400, f"Unknown update {a}: {b}")
except APIError:
flow.revert()
raise
self.view.update([flow])
class DuplicateFlow(RequestHandler):
def post(self, flow_id):
f = self.flow.copy()
self.view.add([f])
self.write(f.id)
class RevertFlow(RequestHandler):
def post(self, flow_id):
if self.flow.modified():
self.flow.revert()
self.view.update([self.flow])
class ReplayFlow(RequestHandler):
def post(self, flow_id):
self.master.commands.call("replay.client", [self.flow])
class FlowContent(RequestHandler):
def post(self, flow_id, message):
self.flow.backup()
message = getattr(self.flow, message)
message.content = self.filecontents
self.view.update([self.flow])
def get(self, flow_id, message):
message = getattr(self.flow, message)
if not message.raw_content:
raise APIError(400, "No content.")
content_encoding = message.headers.get("Content-Encoding", None)
if content_encoding:
content_encoding = re.sub(r"[^\w]", "", content_encoding)
self.set_header("Content-Encoding", content_encoding)
original_cd = message.headers.get("Content-Disposition", None)
filename = None
if original_cd:
filename = re.search(r'filename=([-\w" .()]+)', original_cd)
if filename:
filename = filename.group(1)
if not filename:
filename = self.flow.request.path.split("?")[0].split("/")[-1]
filename = re.sub(r'[^-\w" .()]', "", filename)
cd = f"attachment; filename={filename}"
self.set_header("Content-Disposition", cd)
self.set_header("Content-Type", "application/text")
self.set_header("X-Content-Type-Options", "nosniff")
self.set_header("X-Frame-Options", "DENY")
self.write(message.raw_content)
class FlowContentView(RequestHandler):
def get(self, flow_id, message, content_view):
message = getattr(self.flow, message)
description, lines, error = contentviews.get_message_content_view(
content_view.replace('_', ' '), message, self.flow
)
# if error:
# add event log
self.write(dict(
lines=list(lines),
description=description
))
class Events(RequestHandler):
def get(self):
self.write([logentry_to_json(e) for e in self.master.events.data])
class Options(RequestHandler):
def get(self):
self.write(optmanager.dump_dicts(self.master.options))
def put(self):
update = self.json
try:
self.master.options.update(**update)
except Exception as err:
raise APIError(400, f"{err}")
class SaveOptions(RequestHandler):
def post(self):
# try:
# optmanager.save(self.master.options, CONFIG_PATH, True)
# except Exception as err:
# raise APIError(400, "{}".format(err))
pass
class DnsRebind(RequestHandler):
def get(self):
raise tornado.web.HTTPError(
403,
reason="To protect against DNS rebinding, mitmweb can only be accessed by IP at the moment. "
"(https://github.com/mitmproxy/mitmproxy/issues/3234)"
)
class Conf(RequestHandler):
def get(self):
conf = {
"static": False,
"version": version.VERSION,
"contentViews": [v.name for v in contentviews.views]
}
self.write(f"MITMWEB_CONF = {json.dumps(conf)};")
self.set_header("content-type", "application/javascript")
class Application(tornado.web.Application):
master: "mitmproxy.tools.web.master.WebMaster"
def __init__(self, master: "mitmproxy.tools.web.master.WebMaster", debug: bool) -> None:
self.master = master
super().__init__(
default_host="dns-rebind-protection",
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
xsrf_cookies=True,
cookie_secret=os.urandom(256),
debug=debug,
autoreload=False,
)
self.add_handlers("dns-rebind-protection", [(r"/.*", DnsRebind)])
self.add_handlers(
# make mitmweb accessible by IP only to prevent DNS rebinding.
r'^(localhost|[0-9.]+|\[[0-9a-fA-F:]+\])$',
[
(r"/", IndexHandler),
(r"/filter-help(?:\.json)?", FilterHelp),
(r"/updates", ClientConnection),
(r"/events(?:\.json)?", Events),
(r"/flows(?:\.json)?", Flows),
(r"/flows/dump", DumpFlows),
(r"/flows/resume", ResumeFlows),
(r"/flows/kill", KillFlows),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)", FlowHandler),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/resume", ResumeFlow),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/kill", KillFlow),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/duplicate", DuplicateFlow),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/replay", ReplayFlow),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/revert", RevertFlow),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/export/(?P<format>[a-z][a-z_]+).json", ExportFlow),
(r"/flows/(?P<flow_id>[0-9a-f\-]+)/(?P<message>request|response)/content.data", FlowContent),
(
r"/flows/(?P<flow_id>[0-9a-f\-]+)/(?P<message>request|response)/content/(?P<content_view>[0-9a-zA-Z\-\_]+)(?:\.json)?",
FlowContentView),
(r"/clear", ClearAll),
(r"/options(?:\.json)?", Options),
(r"/options/save", SaveOptions),
(r"/conf\.js", Conf),
]
)
| 1 | 15,791 | Is there a reason why we `str` the result? It would be nice to eventually support more datatypes here, so we want to generally aim for arbitrary JSON. | mitmproxy-mitmproxy | py |
@@ -29,7 +29,7 @@ import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
-import org.apache.commons.lang.SerializationUtils;
+import org.apache.commons.lang3.SerializationUtils;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit; | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.pig;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.commons.lang.SerializationUtils;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.iceberg.CombinedScanTask;
import org.apache.iceberg.DataFile;
import org.apache.iceberg.FileScanTask;
import org.apache.iceberg.PartitionSpec;
import org.apache.iceberg.Schema;
import org.apache.iceberg.Table;
import org.apache.iceberg.TableScan;
import org.apache.iceberg.expressions.Expression;
import org.apache.iceberg.hadoop.HadoopInputFile;
import org.apache.iceberg.io.CloseableIterable;
import org.apache.iceberg.io.InputFile;
import org.apache.iceberg.parquet.Parquet;
import org.apache.iceberg.types.Type;
import org.apache.iceberg.types.TypeUtil;
import org.apache.iceberg.types.Types;
import org.apache.pig.data.DataByteArray;
import org.apache.pig.impl.util.ObjectSerializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.iceberg.pig.SchemaUtil.project;
public class IcebergPigInputFormat<T> extends InputFormat<Void, T> {
private static final Logger LOG = LoggerFactory.getLogger(IcebergPigInputFormat.class);
static final String ICEBERG_SCHEMA = "iceberg.schema";
static final String ICEBERG_PROJECTED_FIELDS = "iceberg.projected.fields";
static final String ICEBERG_FILTER_EXPRESSION = "iceberg.filter.expression";
private Table table;
private List<InputSplit> splits;
IcebergPigInputFormat(Table table) {
this.table = table;
}
@Override
@SuppressWarnings("unchecked")
public List<InputSplit> getSplits(JobContext context) throws IOException {
if (splits != null) {
LOG.info("Returning cached splits: " + splits.size());
return splits;
}
splits = Lists.newArrayList();
TableScan scan = table.newScan();
//Apply Filters
Expression filterExpression = (Expression) ObjectSerializer.deserialize(context.getConfiguration().get(ICEBERG_FILTER_EXPRESSION));
if (filterExpression != null) {
LOG.info("Filter Expression: " + filterExpression);
scan = scan.filter(filterExpression);
}
//Wrap in Splits
try (CloseableIterable<CombinedScanTask> tasks = scan.planTasks()) {
tasks.forEach((scanTask) -> splits.add(new IcebergSplit(scanTask)));
}
return splits;
}
@Override
public RecordReader<Void, T> createRecordReader(InputSplit split, TaskAttemptContext context) {
return new IcebergRecordReader<>();
}
private static class IcebergSplit extends InputSplit implements Writable {
private static final String[] ANYWHERE = new String[] { "*" };
private CombinedScanTask task;
IcebergSplit(CombinedScanTask task) {
this.task = task;
}
public IcebergSplit() {
}
@Override
public long getLength() {
return task.files().stream().mapToLong(FileScanTask::length).sum();
}
@Override
public String[] getLocations() {
return ANYWHERE;
}
@Override
public void write(DataOutput out) throws IOException {
byte[] data = SerializationUtils.serialize(this.task);
out.writeInt(data.length);
out.write(data);
}
@Override
public void readFields(DataInput in) throws IOException {
byte[] data = new byte[in.readInt()];
in.readFully(data);
this.task = (CombinedScanTask) SerializationUtils.deserialize(data);
}
}
public class IcebergRecordReader<T> extends RecordReader<Void, T> {
private TaskAttemptContext context;
private Iterator<FileScanTask> tasks;
private FileScanTask currentTask;
private CloseableIterable reader;
private Iterator<T> recordIterator;
private T currentRecord;
@Override
public void initialize(InputSplit split, TaskAttemptContext context) throws IOException {
this.context = context;
CombinedScanTask task = ((IcebergSplit) split).task;
tasks = task.files().iterator();
advance();
}
@SuppressWarnings("unchecked")
private boolean advance() throws IOException {
if (reader != null) {
reader.close();
}
if (!tasks.hasNext()) {
return false;
}
currentTask = tasks.next();
Schema tableSchema = (Schema) ObjectSerializer.deserialize(context.getConfiguration().get(ICEBERG_SCHEMA));
List<String> projectedFields = (List<String>) ObjectSerializer.deserialize(context.getConfiguration().get(ICEBERG_PROJECTED_FIELDS));
Schema projectedSchema = projectedFields != null ? project(tableSchema, projectedFields) : tableSchema;
PartitionSpec spec = currentTask.asFileScanTask().spec();
DataFile file = currentTask.file();
InputFile inputFile = HadoopInputFile.fromLocation(file.path(), context.getConfiguration());
Set<Integer> idColumns = spec.identitySourceIds();
// schema needed for the projection and filtering
boolean hasJoinedPartitionColumns = !idColumns.isEmpty();
switch (file.format()) {
case PARQUET:
Map<Integer, Object> partitionValueMap = Maps.newHashMap();
if (hasJoinedPartitionColumns) {
Schema readSchema = TypeUtil.selectNot(projectedSchema, idColumns);
Schema projectedPartitionSchema = TypeUtil.select(projectedSchema, idColumns);
Map<String, Integer> partitionSpecFieldIndexMap = Maps.newHashMap();
for(int i=0; i<spec.fields().size(); i++) {
partitionSpecFieldIndexMap.put(spec.fields().get(i).name(), i);
}
for (Types.NestedField field : projectedPartitionSchema.columns()) {
int partitionIndex = partitionSpecFieldIndexMap.get(field.name());
Object partitionValue = file.partition().get(partitionIndex, Object.class);
partitionValueMap.put(field.fieldId(), convertPartitionValue(field.type(), partitionValue));
}
reader = Parquet.read(inputFile)
.project(readSchema)
.split(currentTask.start(), currentTask.length())
.filter(currentTask.residual())
.createReaderFunc(fileSchema -> PigParquetReader.buildReader(fileSchema, projectedSchema, partitionValueMap))
.build();
} else {
reader = Parquet.read(inputFile)
.project(projectedSchema)
.split(currentTask.start(), currentTask.length())
.filter(currentTask.residual())
.createReaderFunc(fileSchema -> PigParquetReader.buildReader(fileSchema, projectedSchema, partitionValueMap))
.build();
}
recordIterator = reader.iterator();
break;
default:
throw new UnsupportedOperationException("Unsupported file format: " + file.format());
}
return true;
}
private Object convertPartitionValue(Type type, Object value) {
if(type.typeId() == Types.BinaryType.get().typeId()) {
ByteBuffer buffer = (ByteBuffer) value;
return new DataByteArray(buffer.get(new byte[buffer.remaining()]).array());
}
return value;
}
@Override
public boolean nextKeyValue() throws IOException {
if (recordIterator.hasNext()) {
currentRecord = recordIterator.next();
return true;
}
while (advance()) {
if (recordIterator.hasNext()) {
currentRecord = recordIterator.next();
return true;
}
}
return false;
}
@Override
public Void getCurrentKey() {
return null;
}
@Override
public T getCurrentValue() {
return currentRecord;
}
@Override
public float getProgress() {
return 0;
}
@Override
public void close() {
}
}
}
| 1 | 15,920 | Why did we update the to this api? | apache-iceberg | java |
@@ -164,4 +164,17 @@ public class Preferences {
public SharedPreferences getPreferences() {
return mStorage;
}
+
+ public static <T extends Enum<T>> T getEnumStringPref(SharedPreferences prefs, String key, T defaultEnum) {
+ String stringPref = prefs.getString(key, defaultEnum.name());
+ try {
+ return Enum.valueOf(defaultEnum.getDeclaringClass(), stringPref);
+ } catch (Exception ex) {
+ Log.w(K9.LOG_TAG, "Unable to convert preference key [" + key +
+ "] value [" + stringPref + "] to enum of type " + defaultEnum.getDeclaringClass(), ex);
+
+ return defaultEnum;
+ }
+ }
+
} | 1 |
package com.fsck.k9;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import android.content.Context;
import android.content.SharedPreferences;
import android.util.Log;
import com.fsck.k9.mail.store.RemoteStore;
import com.fsck.k9.mailstore.LocalStore;
import com.fsck.k9.preferences.Editor;
import com.fsck.k9.preferences.Storage;
public class Preferences {
private static Preferences preferences;
public static synchronized Preferences getPreferences(Context context) {
Context appContext = context.getApplicationContext();
if (preferences == null) {
preferences = new Preferences(appContext);
}
return preferences;
}
private Storage mStorage;
private Map<String, Account> accounts = null;
private List<Account> accountsInOrder = null;
private Account newAccount;
private Context mContext;
private Preferences(Context context) {
mStorage = Storage.getStorage(context);
mContext = context;
if (mStorage.isEmpty()) {
Log.i(K9.LOG_TAG, "Preferences storage is zero-size, importing from Android-style preferences");
Editor editor = mStorage.edit();
editor.copy(context.getSharedPreferences("AndroidMail.Main", Context.MODE_PRIVATE));
editor.commit();
}
}
public synchronized void loadAccounts() {
accounts = new HashMap<String, Account>();
accountsInOrder = new LinkedList<Account>();
String accountUuids = getPreferences().getString("accountUuids", null);
if ((accountUuids != null) && (accountUuids.length() != 0)) {
String[] uuids = accountUuids.split(",");
for (String uuid : uuids) {
Account newAccount = new Account(this, uuid);
accounts.put(uuid, newAccount);
accountsInOrder.add(newAccount);
}
}
if ((newAccount != null) && newAccount.getAccountNumber() != -1) {
accounts.put(newAccount.getUuid(), newAccount);
accountsInOrder.add(newAccount);
newAccount = null;
}
}
/**
* Returns an array of the accounts on the system. If no accounts are
* registered the method returns an empty array.
* @return all accounts
*/
public synchronized List<Account> getAccounts() {
if (accounts == null) {
loadAccounts();
}
return Collections.unmodifiableList(accountsInOrder);
}
/**
* Returns an array of the accounts on the system. If no accounts are
* registered the method returns an empty array.
* @return all accounts with {@link Account#isAvailable(Context)}
*/
public synchronized Collection<Account> getAvailableAccounts() {
List<Account> allAccounts = getAccounts();
Collection<Account> retval = new ArrayList<Account>(accounts.size());
for (Account account : allAccounts) {
if (account.isEnabled() && account.isAvailable(mContext)) {
retval.add(account);
}
}
return retval;
}
public synchronized Account getAccount(String uuid) {
if (accounts == null) {
loadAccounts();
}
Account account = accounts.get(uuid);
return account;
}
public synchronized Account newAccount() {
newAccount = new Account(mContext);
accounts.put(newAccount.getUuid(), newAccount);
accountsInOrder.add(newAccount);
return newAccount;
}
public synchronized void deleteAccount(Account account) {
if (accounts != null) {
accounts.remove(account.getUuid());
}
if (accountsInOrder != null) {
accountsInOrder.remove(account);
}
try {
RemoteStore.removeInstance(account);
} catch (Exception e) {
Log.e(K9.LOG_TAG, "Failed to reset remote store for account " + account.getUuid(), e);
}
LocalStore.removeAccount(account);
account.deleteCertificates();
account.delete(this);
if (newAccount == account) {
newAccount = null;
}
}
/**
* Returns the Account marked as default. If no account is marked as default
* the first account in the list is marked as default and then returned. If
* there are no accounts on the system the method returns null.
*/
public Account getDefaultAccount() {
String defaultAccountUuid = getPreferences().getString("defaultAccountUuid", null);
Account defaultAccount = getAccount(defaultAccountUuid);
if (defaultAccount == null) {
Collection<Account> accounts = getAvailableAccounts();
if (!accounts.isEmpty()) {
defaultAccount = accounts.iterator().next();
setDefaultAccount(defaultAccount);
}
}
return defaultAccount;
}
public void setDefaultAccount(Account account) {
getPreferences().edit().putString("defaultAccountUuid", account.getUuid()).commit();
}
public SharedPreferences getPreferences() {
return mStorage;
}
}
| 1 | 12,938 | should probably just catch `IllegalArgumentException` here | k9mail-k-9 | java |
@@ -38,7 +38,9 @@ type truncateFunc func(t Time, d Duration) Time
func (w *Window) getTruncateFunc(d Duration) (truncateFunc, error) {
switch months, nsecs := d.Months(), d.Nanoseconds(); {
case months != 0 && nsecs != 0:
- return nil, errors.New(codes.Invalid, "duration used as an interval cannot mix month and nanosecond units")
+ const docURL = "https://v2.docs.influxdata.com/v2.0/reference/flux/stdlib/built-in/transformations/window/#calendar-months-and-years"
+ return nil, errors.New(codes.Invalid, "duration used as an interval cannot mix month and nanosecond units").
+ WithDocURL(docURL)
case months != 0:
return truncateByMonths, nil
case nsecs != 0: | 1 | package execute
import (
"time"
"github.com/influxdata/flux/codes"
"github.com/influxdata/flux/internal/errors"
"github.com/influxdata/flux/values"
)
type Window struct {
Every Duration
Period Duration
Offset Duration
}
// NewWindow creates a window with the given parameters,
// and normalizes the offset to a small positive duration.
// It also validates that the durations are valid when
// used within a window.
func NewWindow(every, period, offset Duration) (Window, error) {
// Normalize the offset to a small positive duration
offset = offset.Normalize(every)
w := Window{
Every: every,
Period: period,
Offset: offset,
}
if err := w.IsValid(); err != nil {
return Window{}, err
}
return w, nil
}
type truncateFunc func(t Time, d Duration) Time
func (w *Window) getTruncateFunc(d Duration) (truncateFunc, error) {
switch months, nsecs := d.Months(), d.Nanoseconds(); {
case months != 0 && nsecs != 0:
return nil, errors.New(codes.Invalid, "duration used as an interval cannot mix month and nanosecond units")
case months != 0:
return truncateByMonths, nil
case nsecs != 0:
return truncateByNsecs, nil
default:
return nil, errors.New(codes.Invalid, "duration used as an interval cannot be zero")
}
}
// truncate will truncate the time using the duration.
func (w *Window) truncate(t Time) Time {
fn, err := w.getTruncateFunc(w.Every)
if err != nil {
panic(err)
}
return fn(t, w.Every)
}
// IsValid will check if this Window is valid and it will
// return an error if it isn't.
func (w Window) IsValid() error {
_, err := w.getTruncateFunc(w.Every)
return err
}
// GetEarliestBounds returns the bounds for the earliest window bounds
// that contains the given time t. For underlapping windows that
// do not contain time t, the window directly after time t will be returned.
func (w Window) GetEarliestBounds(t Time) Bounds {
// translate to not-offset coordinate
t = t.Add(w.Offset.Mul(-1))
stop := w.truncate(t).Add(w.Every)
// translate to offset coordinate
stop = stop.Add(w.Offset)
start := stop.Add(w.Period.Mul(-1))
return Bounds{
Start: start,
Stop: stop,
}
}
// GetOverlappingBounds returns a slice of bounds for each window
// that overlaps the input bounds b.
func (w Window) GetOverlappingBounds(b Bounds) []Bounds {
if b.IsEmpty() {
return []Bounds{}
}
// Estimate the number of windows by using a rough approximation.
c := (b.Duration().Duration() / w.Every.Duration()) + (w.Period.Duration() / w.Every.Duration())
bs := make([]Bounds, 0, c)
bi := w.GetEarliestBounds(b.Start)
for bi.Start < b.Stop {
bs = append(bs, bi)
bi.Start = bi.Start.Add(w.Every)
bi.Stop = bi.Stop.Add(w.Every)
}
return bs
}
// truncateByNsecs will truncate the time to the given number
// of nanoseconds.
func truncateByNsecs(t Time, d Duration) Time {
remainder := int64(t) % d.Nanoseconds()
return t - Time(remainder)
}
// truncateByMonths will truncate the time to the given
// number of months.
func truncateByMonths(t Time, d Duration) Time {
ts := t.Time()
year, month, _ := ts.Date()
// Determine the total number of months and truncate
// the number of months by the duration amount.
total := int64(year*12) + int64(month-1)
remainder := total % d.Months()
total -= remainder
// Recreate a new time from the year and month combination.
year, month = int(total/12), time.Month(total%12)+1
ts = time.Date(year, month, 1, 0, 0, 0, 0, time.UTC)
return values.ConvertTime(ts)
}
| 1 | 15,095 | What does this error message mean? | influxdata-flux | go |
@@ -864,7 +864,7 @@ def get_utf8_value(value):
return value
if not isinstance(value, six.string_types):
- value = six.text_type(value)
+ value = six.text_type(value).encode('utf-8')
if isinstance(value, six.text_type):
value = value.encode('utf-8') | 1 | # Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Parts of this code were copied or derived from sample code supplied by AWS.
# The following notice applies to that code.
#
# This software code is made available "AS IS" without warranties of any
# kind. You may copy, display, modify and redistribute the software
# code either by itself or as incorporated into your code; provided that
# you do not remove any proprietary notices. Your use of this software
# code is at your own risk and you waive any claim against Amazon
# Digital Services, Inc. or its affiliates with respect to your use of
# this software code. (c) 2006 Amazon Digital Services, Inc. or its
# affiliates.
"""
Some handy utility functions used by several classes.
"""
import subprocess
import time
import logging.handlers
import boto
import boto.provider
import tempfile
import random
import smtplib
import datetime
import re
import email.mime.multipart
import email.mime.base
import email.mime.text
import email.utils
import email.encoders
import gzip
import threading
import locale
from boto.compat import six, StringIO, urllib, encodebytes
from contextlib import contextmanager
from hashlib import md5, sha512
_hashfn = sha512
from boto.compat import json
try:
from boto.compat.json import JSONDecodeError
except ImportError:
JSONDecodeError = ValueError
# List of Query String Arguments of Interest
qsa_of_interest = ['acl', 'cors', 'defaultObjectAcl', 'location', 'logging',
'partNumber', 'policy', 'requestPayment', 'torrent',
'versioning', 'versionId', 'versions', 'website',
'uploads', 'uploadId', 'response-content-type',
'response-content-language', 'response-expires',
'response-cache-control', 'response-content-disposition',
'response-content-encoding', 'delete', 'lifecycle',
'tagging', 'restore',
# storageClass is a QSA for buckets in Google Cloud Storage.
# (StorageClass is associated to individual keys in S3, but
# having it listed here should cause no problems because
# GET bucket?storageClass is not part of the S3 API.)
'storageClass',
# websiteConfig is a QSA for buckets in Google Cloud
# Storage.
'websiteConfig',
# compose is a QSA for objects in Google Cloud Storage.
'compose',
# billing is a QSA for buckets in Google Cloud Storage.
'billing',
# userProject is a QSA for requests in Google Cloud Storage.
'userProject',
# encryptionConfig is a QSA for requests in Google Cloud
# Storage.
'encryptionConfig']
_first_cap_regex = re.compile('(.)([A-Z][a-z]+)')
_number_cap_regex = re.compile('([a-z])([0-9]+)')
_end_cap_regex = re.compile('([a-z0-9])([A-Z])')
def unquote_v(nv):
if len(nv) == 1:
return nv
else:
return (nv[0], urllib.parse.unquote(nv[1]))
def canonical_string(method, path, headers, expires=None,
provider=None):
"""
Generates the aws canonical string for the given parameters
"""
if not provider:
provider = boto.provider.get_default()
interesting_headers = {}
for key in headers:
lk = key.lower()
if headers[key] is not None and \
(lk in ['content-md5', 'content-type', 'date'] or
lk.startswith(provider.header_prefix)):
interesting_headers[lk] = str(headers[key]).strip()
# these keys get empty strings if they don't exist
if 'content-type' not in interesting_headers:
interesting_headers['content-type'] = ''
if 'content-md5' not in interesting_headers:
interesting_headers['content-md5'] = ''
# just in case someone used this. it's not necessary in this lib.
if provider.date_header in interesting_headers:
interesting_headers['date'] = ''
# if you're using expires for query string auth, then it trumps date
# (and provider.date_header)
if expires:
interesting_headers['date'] = str(expires)
sorted_header_keys = sorted(interesting_headers.keys())
buf = "%s\n" % method
for key in sorted_header_keys:
val = interesting_headers[key]
if key.startswith(provider.header_prefix):
buf += "%s:%s\n" % (key, val)
else:
buf += "%s\n" % val
# don't include anything after the first ? in the resource...
# unless it is one of the QSA of interest, defined above
t = path.split('?')
buf += t[0]
if len(t) > 1:
qsa = t[1].split('&')
qsa = [a.split('=', 1) for a in qsa]
qsa = [unquote_v(a) for a in qsa if a[0] in qsa_of_interest]
if len(qsa) > 0:
qsa.sort(key=lambda x: x[0])
qsa = ['='.join(a) for a in qsa]
buf += '?'
buf += '&'.join(qsa)
return buf
def merge_meta(headers, metadata, provider=None):
if not provider:
provider = boto.provider.get_default()
metadata_prefix = provider.metadata_prefix
final_headers = headers.copy()
for k in metadata.keys():
if k.lower() in boto.s3.key.Key.base_user_settable_fields:
final_headers[k] = metadata[k]
else:
final_headers[metadata_prefix + k] = metadata[k]
return final_headers
def get_aws_metadata(headers, provider=None):
if not provider:
provider = boto.provider.get_default()
metadata_prefix = provider.metadata_prefix
metadata = {}
for hkey in headers.keys():
if hkey.lower().startswith(metadata_prefix):
val = urllib.parse.unquote(headers[hkey])
if isinstance(val, bytes):
try:
val = val.decode('utf-8')
except UnicodeDecodeError:
# Just leave the value as-is
pass
metadata[hkey[len(metadata_prefix):]] = val
del headers[hkey]
return metadata
def retry_url(url, retry_on_404=True, num_retries=10, timeout=None):
"""
Retry a url. This is specifically used for accessing the metadata
service on an instance. Since this address should never be proxied
(for security reasons), we create a ProxyHandler with a NULL
dictionary to override any proxy settings in the environment.
"""
for i in range(0, num_retries):
try:
proxy_handler = urllib.request.ProxyHandler({})
opener = urllib.request.build_opener(proxy_handler)
req = urllib.request.Request(url)
r = opener.open(req, timeout=timeout)
result = r.read()
if(not isinstance(result, six.string_types) and
hasattr(result, 'decode')):
result = result.decode('utf-8')
return result
except urllib.error.HTTPError as e:
code = e.getcode()
if code == 404 and not retry_on_404:
return ''
except Exception as e:
boto.log.exception('Caught exception reading instance data')
# If not on the last iteration of the loop then sleep.
if i + 1 != num_retries:
boto.log.debug('Sleeping before retrying')
time.sleep(min(2 ** i,
boto.config.get('Boto', 'max_retry_delay', 60)))
boto.log.error('Unable to read instance data, giving up')
return ''
def _get_instance_metadata(url, num_retries, timeout=None):
return LazyLoadMetadata(url, num_retries, timeout)
class LazyLoadMetadata(dict):
def __init__(self, url, num_retries, timeout=None):
self._url = url
self._num_retries = num_retries
self._leaves = {}
self._dicts = []
self._timeout = timeout
data = boto.utils.retry_url(self._url, num_retries=self._num_retries, timeout=self._timeout)
if data:
fields = data.split('\n')
for field in fields:
if field.endswith('/'):
key = field[0:-1]
self._dicts.append(key)
else:
p = field.find('=')
if p > 0:
key = field[p + 1:]
resource = field[0:p] + '/openssh-key'
else:
key = resource = field
self._leaves[key] = resource
self[key] = None
def _materialize(self):
for key in self:
self[key]
def __getitem__(self, key):
if key not in self:
# allow dict to throw the KeyError
return super(LazyLoadMetadata, self).__getitem__(key)
# already loaded
val = super(LazyLoadMetadata, self).__getitem__(key)
if val is not None:
return val
if key in self._leaves:
resource = self._leaves[key]
last_exception = None
for i in range(0, self._num_retries):
try:
val = boto.utils.retry_url(
self._url + urllib.parse.quote(resource,
safe="/:"),
num_retries=self._num_retries,
timeout=self._timeout)
if val and val[0] == '{':
val = json.loads(val)
break
else:
p = val.find('\n')
if p > 0:
val = val.split('\n')
break
except JSONDecodeError as e:
boto.log.debug(
"encountered '%s' exception: %s" % (
e.__class__.__name__, e))
boto.log.debug(
'corrupted JSON data found: %s' % val)
last_exception = e
except Exception as e:
boto.log.debug("encountered unretryable" +
" '%s' exception, re-raising" % (
e.__class__.__name__))
last_exception = e
raise
boto.log.error("Caught exception reading meta data" +
" for the '%s' try" % (i + 1))
if i + 1 != self._num_retries:
next_sleep = min(
random.random() * 2 ** i,
boto.config.get('Boto', 'max_retry_delay', 60))
time.sleep(next_sleep)
else:
boto.log.error('Unable to read meta data, giving up')
boto.log.error(
"encountered '%s' exception: %s" % (
last_exception.__class__.__name__, last_exception))
raise last_exception
self[key] = val
elif key in self._dicts:
self[key] = LazyLoadMetadata(self._url + key + '/',
self._num_retries)
return super(LazyLoadMetadata, self).__getitem__(key)
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def values(self):
self._materialize()
return super(LazyLoadMetadata, self).values()
def items(self):
self._materialize()
return super(LazyLoadMetadata, self).items()
def __str__(self):
self._materialize()
return super(LazyLoadMetadata, self).__str__()
def __repr__(self):
self._materialize()
return super(LazyLoadMetadata, self).__repr__()
def _build_instance_metadata_url(url, version, path):
"""
Builds an EC2 metadata URL for fetching information about an instance.
Example:
>>> _build_instance_metadata_url('http://169.254.169.254', 'latest', 'meta-data/')
http://169.254.169.254/latest/meta-data/
:type url: string
:param url: URL to metadata service, e.g. 'http://169.254.169.254'
:type version: string
:param version: Version of the metadata to get, e.g. 'latest'
:type path: string
:param path: Path of the metadata to get, e.g. 'meta-data/'. If a trailing
slash is required it must be passed in with the path.
:return: The full metadata URL
"""
return '%s/%s/%s' % (url, version, path)
def get_instance_metadata(version='latest', url='http://169.254.169.254',
data='meta-data/', timeout=None, num_retries=5):
"""
Returns the instance metadata as a nested Python dictionary.
Simple values (e.g. local_hostname, hostname, etc.) will be
stored as string values. Values such as ancestor-ami-ids will
be stored in the dict as a list of string values. More complex
fields such as public-keys and will be stored as nested dicts.
If the timeout is specified, the connection to the specified url
will time out after the specified number of seconds.
"""
try:
metadata_url = _build_instance_metadata_url(url, version, data)
return _get_instance_metadata(metadata_url, num_retries=num_retries, timeout=timeout)
except urllib.error.URLError:
boto.log.exception("Exception caught when trying to retrieve "
"instance metadata for: %s", data)
return None
def get_instance_identity(version='latest', url='http://169.254.169.254',
timeout=None, num_retries=5):
"""
Returns the instance identity as a nested Python dictionary.
"""
iid = {}
base_url = _build_instance_metadata_url(url, version,
'dynamic/instance-identity/')
try:
data = retry_url(base_url, num_retries=num_retries, timeout=timeout)
fields = data.split('\n')
for field in fields:
val = retry_url(base_url + '/' + field + '/', num_retries=num_retries, timeout=timeout)
if val[0] == '{':
val = json.loads(val)
if field:
iid[field] = val
return iid
except urllib.error.URLError:
return None
def get_instance_userdata(version='latest', sep=None,
url='http://169.254.169.254', timeout=None, num_retries=5):
ud_url = _build_instance_metadata_url(url, version, 'user-data')
user_data = retry_url(ud_url, retry_on_404=False, num_retries=num_retries, timeout=timeout)
if user_data:
if sep:
l = user_data.split(sep)
user_data = {}
for nvpair in l:
t = nvpair.split('=')
user_data[t[0].strip()] = t[1].strip()
return user_data
ISO8601 = '%Y-%m-%dT%H:%M:%SZ'
ISO8601_MS = '%Y-%m-%dT%H:%M:%S.%fZ'
RFC1123 = '%a, %d %b %Y %H:%M:%S %Z'
LOCALE_LOCK = threading.Lock()
@contextmanager
def setlocale(name):
"""
A context manager to set the locale in a threadsafe manner.
"""
with LOCALE_LOCK:
saved = locale.setlocale(locale.LC_ALL)
try:
yield locale.setlocale(locale.LC_ALL, name)
finally:
locale.setlocale(locale.LC_ALL, saved)
def get_ts(ts=None):
if not ts:
ts = time.gmtime()
return time.strftime(ISO8601, ts)
def parse_ts(ts):
with setlocale('C'):
ts = ts.strip()
try:
dt = datetime.datetime.strptime(ts, ISO8601)
return dt
except ValueError:
try:
dt = datetime.datetime.strptime(ts, ISO8601_MS)
return dt
except ValueError:
dt = datetime.datetime.strptime(ts, RFC1123)
return dt
def find_class(module_name, class_name=None):
if class_name:
module_name = "%s.%s" % (module_name, class_name)
modules = module_name.split('.')
c = None
try:
for m in modules[1:]:
if c:
c = getattr(c, m)
else:
c = getattr(__import__(".".join(modules[0:-1])), m)
return c
except:
return None
def update_dme(username, password, dme_id, ip_address):
"""
Update your Dynamic DNS record with DNSMadeEasy.com
"""
dme_url = 'https://www.dnsmadeeasy.com/servlet/updateip'
dme_url += '?username=%s&password=%s&id=%s&ip=%s'
s = urllib.request.urlopen(dme_url % (username, password, dme_id, ip_address))
return s.read()
def fetch_file(uri, file=None, username=None, password=None):
"""
Fetch a file based on the URI provided.
If you do not pass in a file pointer a tempfile.NamedTemporaryFile,
or None if the file could not be retrieved is returned.
The URI can be either an HTTP url, or "s3://bucket_name/key_name"
"""
boto.log.info('Fetching %s' % uri)
if file is None:
file = tempfile.NamedTemporaryFile()
try:
if uri.startswith('s3://'):
bucket_name, key_name = uri[len('s3://'):].split('/', 1)
c = boto.connect_s3(aws_access_key_id=username,
aws_secret_access_key=password)
bucket = c.get_bucket(bucket_name)
key = bucket.get_key(key_name)
key.get_contents_to_file(file)
else:
if username and password:
passman = urllib.request.HTTPPasswordMgrWithDefaultRealm()
passman.add_password(None, uri, username, password)
authhandler = urllib.request.HTTPBasicAuthHandler(passman)
opener = urllib.request.build_opener(authhandler)
urllib.request.install_opener(opener)
s = urllib.request.urlopen(uri)
file.write(s.read())
file.seek(0)
except:
raise
boto.log.exception('Problem Retrieving file: %s' % uri)
file = None
return file
class ShellCommand(object):
def __init__(self, command, wait=True, fail_fast=False, cwd=None):
self.exit_code = 0
self.command = command
self.log_fp = StringIO()
self.wait = wait
self.fail_fast = fail_fast
self.run(cwd=cwd)
def run(self, cwd=None):
boto.log.info('running:%s' % self.command)
self.process = subprocess.Popen(self.command, shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=cwd)
if(self.wait):
while self.process.poll() is None:
time.sleep(1)
t = self.process.communicate()
self.log_fp.write(t[0])
self.log_fp.write(t[1])
boto.log.info(self.log_fp.getvalue())
self.exit_code = self.process.returncode
if self.fail_fast and self.exit_code != 0:
raise Exception("Command " + self.command +
" failed with status " + self.exit_code)
return self.exit_code
def setReadOnly(self, value):
raise AttributeError
def getStatus(self):
return self.exit_code
status = property(getStatus, setReadOnly, None,
'The exit code for the command')
def getOutput(self):
return self.log_fp.getvalue()
output = property(getOutput, setReadOnly, None,
'The STDIN and STDERR output of the command')
class AuthSMTPHandler(logging.handlers.SMTPHandler):
"""
This class extends the SMTPHandler in the standard Python logging module
to accept a username and password on the constructor and to then use those
credentials to authenticate with the SMTP server. To use this, you could
add something like this in your boto config file:
[handler_hand07]
class=boto.utils.AuthSMTPHandler
level=WARN
formatter=form07
args=('localhost', 'username', 'password', 'from@abc', ['user1@abc', 'user2@xyz'], 'Logger Subject')
"""
def __init__(self, mailhost, username, password,
fromaddr, toaddrs, subject):
"""
Initialize the handler.
We have extended the constructor to accept a username/password
for SMTP authentication.
"""
super(AuthSMTPHandler, self).__init__(mailhost, fromaddr,
toaddrs, subject)
self.username = username
self.password = password
def emit(self, record):
"""
Emit a record.
Format the record and send it to the specified addressees.
It would be really nice if I could add authorization to this class
without having to resort to cut and paste inheritance but, no.
"""
try:
port = self.mailport
if not port:
port = smtplib.SMTP_PORT
smtp = smtplib.SMTP(self.mailhost, port)
smtp.login(self.username, self.password)
msg = self.format(record)
msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % (
self.fromaddr,
','.join(self.toaddrs),
self.getSubject(record),
email.utils.formatdate(), msg)
smtp.sendmail(self.fromaddr, self.toaddrs, msg)
smtp.quit()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class LRUCache(dict):
"""A dictionary-like object that stores only a certain number of items, and
discards its least recently used item when full.
>>> cache = LRUCache(3)
>>> cache['A'] = 0
>>> cache['B'] = 1
>>> cache['C'] = 2
>>> len(cache)
3
>>> cache['A']
0
Adding new items to the cache does not increase its size. Instead, the least
recently used item is dropped:
>>> cache['D'] = 3
>>> len(cache)
3
>>> 'B' in cache
False
Iterating over the cache returns the keys, starting with the most recently
used:
>>> for key in cache:
... print key
D
A
C
This code is based on the LRUCache class from Genshi which is based on
`Myghty <http://www.myghty.org>`_'s LRUCache from ``myghtyutils.util``,
written by Mike Bayer and released under the MIT license (Genshi uses the
BSD License).
"""
class _Item(object):
def __init__(self, key, value):
self.previous = self.next = None
self.key = key
self.value = value
def __repr__(self):
return repr(self.value)
def __init__(self, capacity):
self._dict = dict()
self.capacity = capacity
self.head = None
self.tail = None
def __contains__(self, key):
return key in self._dict
def __iter__(self):
cur = self.head
while cur:
yield cur.key
cur = cur.next
def __len__(self):
return len(self._dict)
def __getitem__(self, key):
item = self._dict[key]
self._update_item(item)
return item.value
def __setitem__(self, key, value):
item = self._dict.get(key)
if item is None:
item = self._Item(key, value)
self._dict[key] = item
self._insert_item(item)
else:
item.value = value
self._update_item(item)
self._manage_size()
def __repr__(self):
return repr(self._dict)
def _insert_item(self, item):
item.previous = None
item.next = self.head
if self.head is not None:
self.head.previous = item
else:
self.tail = item
self.head = item
self._manage_size()
def _manage_size(self):
while len(self._dict) > self.capacity:
del self._dict[self.tail.key]
if self.tail != self.head:
self.tail = self.tail.previous
self.tail.next = None
else:
self.head = self.tail = None
def _update_item(self, item):
if self.head == item:
return
previous = item.previous
previous.next = item.next
if item.next is not None:
item.next.previous = previous
else:
self.tail = previous
item.previous = None
item.next = self.head
self.head.previous = self.head = item
class Password(object):
"""
Password object that stores itself as hashed.
Hash defaults to SHA512 if available, MD5 otherwise.
"""
hashfunc = _hashfn
def __init__(self, str=None, hashfunc=None):
"""
Load the string from an initial value, this should be the
raw hashed password.
"""
self.str = str
if hashfunc:
self.hashfunc = hashfunc
def set(self, value):
if not isinstance(value, bytes):
value = value.encode('utf-8')
self.str = self.hashfunc(value).hexdigest()
def __str__(self):
return str(self.str)
def __eq__(self, other):
if other is None:
return False
if not isinstance(other, bytes):
other = other.encode('utf-8')
return str(self.hashfunc(other).hexdigest()) == str(self.str)
def __len__(self):
if self.str:
return len(self.str)
else:
return 0
def notify(subject, body=None, html_body=None, to_string=None,
attachments=None, append_instance_id=True):
attachments = attachments or []
if append_instance_id:
subject = "[%s] %s" % (
boto.config.get_value("Instance", "instance-id"), subject)
if not to_string:
to_string = boto.config.get_value('Notification', 'smtp_to', None)
if to_string:
try:
from_string = boto.config.get_value('Notification',
'smtp_from', 'boto')
msg = email.mime.multipart.MIMEMultipart()
msg['From'] = from_string
msg['Reply-To'] = from_string
msg['To'] = to_string
msg['Date'] = email.utils.formatdate(localtime=True)
msg['Subject'] = subject
if body:
msg.attach(email.mime.text.MIMEText(body))
if html_body:
part = email.mime.base.MIMEBase('text', 'html')
part.set_payload(html_body)
email.encoders.encode_base64(part)
msg.attach(part)
for part in attachments:
msg.attach(part)
smtp_host = boto.config.get_value('Notification',
'smtp_host', 'localhost')
# Alternate port support
if boto.config.get_value("Notification", "smtp_port"):
server = smtplib.SMTP(smtp_host, int(
boto.config.get_value("Notification", "smtp_port")))
else:
server = smtplib.SMTP(smtp_host)
# TLS support
if boto.config.getbool("Notification", "smtp_tls"):
server.ehlo()
server.starttls()
server.ehlo()
smtp_user = boto.config.get_value('Notification', 'smtp_user', '')
smtp_pass = boto.config.get_value('Notification', 'smtp_pass', '')
if smtp_user:
server.login(smtp_user, smtp_pass)
server.sendmail(from_string, to_string, msg.as_string())
server.quit()
except:
boto.log.exception('notify failed')
def get_utf8_value(value):
if not six.PY2 and isinstance(value, bytes):
return value
if not isinstance(value, six.string_types):
value = six.text_type(value)
if isinstance(value, six.text_type):
value = value.encode('utf-8')
return value
def mklist(value):
if not isinstance(value, list):
if isinstance(value, tuple):
value = list(value)
else:
value = [value]
return value
def pythonize_name(name):
"""Convert camel case to a "pythonic" name.
Examples::
pythonize_name('CamelCase') -> 'camel_case'
pythonize_name('already_pythonized') -> 'already_pythonized'
pythonize_name('HTTPRequest') -> 'http_request'
pythonize_name('HTTPStatus200Ok') -> 'http_status_200_ok'
pythonize_name('UPPER') -> 'upper'
pythonize_name('') -> ''
"""
s1 = _first_cap_regex.sub(r'\1_\2', name)
s2 = _number_cap_regex.sub(r'\1_\2', s1)
return _end_cap_regex.sub(r'\1_\2', s2).lower()
def write_mime_multipart(content, compress=False, deftype='text/plain', delimiter=':'):
"""Description:
:param content: A list of tuples of name-content pairs. This is used
instead of a dict to ensure that scripts run in order
:type list of tuples:
:param compress: Use gzip to compress the scripts, defaults to no compression
:type bool:
:param deftype: The type that should be assumed if nothing else can be figured out
:type str:
:param delimiter: mime delimiter
:type str:
:return: Final mime multipart
:rtype: str:
"""
wrapper = email.mime.multipart.MIMEMultipart()
for name, con in content:
definite_type = guess_mime_type(con, deftype)
maintype, subtype = definite_type.split('/', 1)
if maintype == 'text':
mime_con = email.mime.text.MIMEText(con, _subtype=subtype)
else:
mime_con = email.mime.base.MIMEBase(maintype, subtype)
mime_con.set_payload(con)
# Encode the payload using Base64
email.encoders.encode_base64(mime_con)
mime_con.add_header('Content-Disposition', 'attachment', filename=name)
wrapper.attach(mime_con)
rcontent = wrapper.as_string()
if compress:
buf = StringIO()
gz = gzip.GzipFile(mode='wb', fileobj=buf)
try:
gz.write(rcontent)
finally:
gz.close()
rcontent = buf.getvalue()
return rcontent
def guess_mime_type(content, deftype):
"""Description: Guess the mime type of a block of text
:param content: content we're finding the type of
:type str:
:param deftype: Default mime type
:type str:
:rtype: <type>:
:return: <description>
"""
# Mappings recognized by cloudinit
starts_with_mappings = {
'#include': 'text/x-include-url',
'#!': 'text/x-shellscript',
'#cloud-config': 'text/cloud-config',
'#upstart-job': 'text/upstart-job',
'#part-handler': 'text/part-handler',
'#cloud-boothook': 'text/cloud-boothook'
}
rtype = deftype
for possible_type, mimetype in starts_with_mappings.items():
if content.startswith(possible_type):
rtype = mimetype
break
return(rtype)
def compute_md5(fp, buf_size=8192, size=None):
"""
Compute MD5 hash on passed file and return results in a tuple of values.
:type fp: file
:param fp: File pointer to the file to MD5 hash. The file pointer
will be reset to its current location before the
method returns.
:type buf_size: integer
:param buf_size: Number of bytes per read request.
:type size: int
:param size: (optional) The Maximum number of bytes to read from
the file pointer (fp). This is useful when uploading
a file in multiple parts where the file is being
split inplace into different parts. Less bytes may
be available.
:rtype: tuple
:return: A tuple containing the hex digest version of the MD5 hash
as the first element, the base64 encoded version of the
plain digest as the second element and the data size as
the third element.
"""
return compute_hash(fp, buf_size, size, hash_algorithm=md5)
def compute_hash(fp, buf_size=8192, size=None, hash_algorithm=md5):
hash_obj = hash_algorithm()
spos = fp.tell()
if size and size < buf_size:
s = fp.read(size)
else:
s = fp.read(buf_size)
while s:
if not isinstance(s, bytes):
s = s.encode('utf-8')
hash_obj.update(s)
if size:
size -= len(s)
if size <= 0:
break
if size and size < buf_size:
s = fp.read(size)
else:
s = fp.read(buf_size)
hex_digest = hash_obj.hexdigest()
base64_digest = encodebytes(hash_obj.digest()).decode('utf-8')
if base64_digest[-1] == '\n':
base64_digest = base64_digest[0:-1]
# data_size based on bytes read.
data_size = fp.tell() - spos
fp.seek(spos)
return (hex_digest, base64_digest, data_size)
def find_matching_headers(name, headers):
"""
Takes a specific header name and a dict of headers {"name": "value"}.
Returns a list of matching header names, case-insensitive.
"""
return [h for h in headers if h.lower() == name.lower()]
def merge_headers_by_name(name, headers):
"""
Takes a specific header name and a dict of headers {"name": "value"}.
Returns a string of all header values, comma-separated, that match the
input header name, case-insensitive.
"""
matching_headers = find_matching_headers(name, headers)
return ','.join(str(headers[h]) for h in matching_headers
if headers[h] is not None)
class RequestHook(object):
"""
This can be extended and supplied to the connection object
to gain access to request and response object after the request completes.
One use for this would be to implement some specific request logging.
"""
def handle_request_data(self, request, response, error=False):
pass
def host_is_ipv6(hostname):
"""
Detect (naively) if the hostname is an IPV6 host.
Return a boolean.
"""
# empty strings or anything that is not a string is automatically not an
# IPV6 address
if not hostname or not isinstance(hostname, str):
return False
if hostname.startswith('['):
return True
if len(hostname.split(':')) > 2:
return True
# Anything else that doesn't start with brackets or doesn't have more than
# one ':' should not be an IPV6 address. This is very naive but the rest of
# the connection chain should error accordingly for typos or ill formed
# addresses
return False
def parse_host(hostname):
"""
Given a hostname that may have a port name, ensure that the port is trimmed
returning only the host, including hostnames that are IPV6 and may include
brackets.
"""
# ensure that hostname does not have any whitespaces
hostname = hostname.strip()
if host_is_ipv6(hostname):
return hostname.split(']:', 1)[0].strip('[]')
else:
return hostname.split(':', 1)[0]
| 1 | 12,086 | I don't think this change is necessary. If we cast `value` to type `six.text_type`, then the next if statement should evaluate to True and do the encoding for us. | boto-boto | py |
@@ -54,6 +54,11 @@ class CartItem < ActiveRecord::Base
)
end
end
+ else
+ self.cart_item_traits.build(
+ name: trait[0],
+ value: handle_trait_values_for(trait)
+ )
end
end
end | 1 | class CartItem < ActiveRecord::Base
include PropMixin
belongs_to :cart
has_many :cart_item_traits
has_many :comments, as: :commentable
has_many :properties, as: :hasproperties
def green?
cart_item_traits.map(&:name).include?('green')
end
def features
cart_item_traits.select{ |trait| trait.name.include?("feature") }.map(&:value)
end
def socio
cart_item_traits.select{ |trait| trait.name.include?("socio") }.map(&:value)
end
def formatted_price
"$#{'%.2f' % price}"
end
def subtotal
"$#{'%.2f' % (price * quantity)}"
end
# matches .attributes
def to_a
[
self.description,
self.details,
self.vendor,
self.url,
self.notes,
self.part_number,
self.green?,
self.features,
self.socio,
self.quantity,
self.price,
self.quantity * self.price
]
end
def initialize_traits(traits_params)
traits_params.each do |trait|
if trait[1].kind_of?(Array)
trait[1].each do |individual|
if individual.present?
self.cart_item_traits.build(
name: trait[0],
value: individual
)
end
end
end
end
end
# matches #to_a
def self.attributes
[
'description',
'details',
'vendor',
'url',
'notes',
'part_number',
'green',
'features',
'socio',
'quantity',
'unit price',
'price for quantity'
]
end
def self.from_params(params)
cart = self.new(
:vendor => params.fetch(:vendor, nil),
:description => params.fetch(:description, nil),
:url => params.fetch(:url, nil),
:notes => params.fetch(:notes, nil),
:quantity => params.fetch(:qty , 0),
:details => params.fetch(:details, nil),
:part_number => params.fetch(:partNumber , nil),
:price => params.fetch(:price, nil).gsub(/[\$\,]/,"").to_f
)
traits_params = params[:traits] || []
cart.initialize_traits(traits_params)
props = params[:properties]
unless props.blank?
cart.set_props(props)
end
cart
end
end
| 1 | 12,105 | Can we move this into a method(s)? | 18F-C2 | rb |
@@ -22,3 +22,13 @@ func uitoa(val uint) string {
buf[i] = byte(val + '0')
return string(buf[i:])
}
+
+// clen returns the index of the first NULL byte in n or len(n) if n contains no NULL byte.
+func clen(n []byte) int {
+ for i := 0; i < len(n); i++ {
+ if n[i] == 0 {
+ return i
+ }
+ }
+ return len(n)
+} | 1 | // Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package syscall
func itoa(val int) string { // do it here rather than with fmt to avoid dependency
if val < 0 {
return "-" + uitoa(uint(-val))
}
return uitoa(uint(val))
}
func uitoa(val uint) string {
var buf [32]byte // big enough for int64
i := len(buf) - 1
for val >= 10 {
buf[i] = byte(val%10 + '0')
i--
val /= 10
}
buf[i] = byte(val + '0')
return string(buf[i:])
}
| 1 | 12,951 | You could perhaps call this `strlen` (although I don't particularly care about the name as it is an implementation detail). | tinygo-org-tinygo | go |
@@ -15,13 +15,8 @@ public class Regula
{
public const int Iterations = 4000000;
- public static volatile object VolatileObject;
-
[MethodImpl(MethodImplOptions.NoInlining)]
- private static void Escape(object obj)
- {
- VolatileObject = obj;
- }
+ private static void Escape(object _) { }
[Benchmark(Description = nameof(Regula))]
public bool Test() | 1 | // Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
//
// The modified regula-falsi routine adapted from Conte and De Boor
using BenchmarkDotNet.Attributes;
using System.Runtime.CompilerServices;
using MicroBenchmarks;
namespace Benchstone.BenchF
{
[BenchmarkCategory(Categories.Runtime, Categories.Benchstones, Categories.JIT, Categories.BenchF)]
public class Regula
{
public const int Iterations = 4000000;
public static volatile object VolatileObject;
[MethodImpl(MethodImplOptions.NoInlining)]
private static void Escape(object obj)
{
VolatileObject = obj;
}
[Benchmark(Description = nameof(Regula))]
public bool Test()
{
double error, fxi;
double a, b, xi;
int idbg, iflag;
iflag = 0;
idbg = 0;
xi = 0;
error = 0.0;
fxi = 0.0;
for (int i = 1; i <= Iterations; i++)
{
a = 1.0;
b = 2.0;
Inner(ref a, ref b, 0.0000001, 0.0000000001, 30, out iflag);
if (iflag > 2)
{
goto L999;
}
xi = (a + b) / 2.0;
error = System.Math.Abs(b - a) / 2.0;
fxi = FG(xi);
if (idbg != 0)
{
System.Console.WriteLine(" the root is {0:E}", xi);
System.Console.WriteLine(" plus/minus {0}\n", error);
System.Console.WriteLine(" fg(root):= {0:E}\n", fxi);
}
L999:
{
}
}
// Escape iflag, xi, error, and fxi so that they appear live
Escape(iflag);
Escape(xi);
Escape(error);
Escape(fxi);
return true;
}
private static double FG(double x)
{
return (-1.0 - (x * (1.0 - (x * x))));
}
private static void Inner(ref double a, ref double b, double xtol, double ftol, int ntol, out int iflag)
{
double signfa, prevfw, fa, fb, fw, w;
iflag = 0;
fa = FG(a);
if (fa < 0.0)
{
signfa = -1.0;
}
else
{
signfa = 1.0;
}
fb = FG(b);
if (signfa * fb <= 0.0)
{
goto L5;
}
iflag = 3;
goto L99;
L5:
w = a;
fw = fa;
for (int i = 1; i <= ntol; i++)
{
if (System.Math.Abs(b - a) / 2.0 <= xtol)
{
goto L99;
}
if (System.Math.Abs(fw) > ftol)
{
goto L9;
}
a = w;
b = w;
iflag = 1;
goto L99;
L9:
w = (fa * b - fb * a) / (fa - fb);
if (fw < 0.0)
{
prevfw = -1.0;
}
else
{
prevfw = 1.0;
}
fw = FG(w);
if (signfa * fw < 0.0)
{
goto L10;
}
a = w;
fa = fw;
if (fw * prevfw > 0.0)
{
fb = fb / 2.0;
}
goto L20;
L10:
b = w;
fb = fw;
if (fw * prevfw > 0.0)
{
fa = fa / 2.0;
}
L20:
{
}
}
iflag = 2;
L99:
{
}
}
}
}
| 1 | 11,804 | should we have a common `Escape()` method that can be used everywhere? | dotnet-performance | .cs |
@@ -228,7 +228,6 @@ export function diff(
*/
export function commitRoot(commitQueue, root) {
if (options._commit) options._commit(root, commitQueue);
-
commitQueue.some(c => {
try {
commitQueue = c._renderCallbacks; | 1 | import { EMPTY_OBJ, EMPTY_ARR } from '../constants';
import { Component } from '../component';
import { Fragment } from '../create-element';
import { diffChildren, toChildArray } from './children';
import { diffProps } from './props';
import { assign, removeNode } from '../util';
import options from '../options';
/**
* Diff two virtual nodes and apply proper changes to the DOM
* @param {import('../internal').PreactElement} parentDom The parent of the DOM element
* @param {import('../internal').VNode} newVNode The new virtual node
* @param {import('../internal').VNode} oldVNode The old virtual node
* @param {object} context The current context object
* @param {boolean} isSvg Whether or not this element is an SVG node
* @param {Array<import('../internal').PreactElement>} excessDomChildren
* @param {Array<import('../internal').Component>} commitQueue List of components
* which have callbacks to invoke in commitRoot
* @param {Element | Text} oldDom The current attached DOM
* element any new dom elements should be placed around. Likely `null` on first
* render (except when hydrating). Can be a sibling DOM element when diffing
* Fragments that have siblings. In most cases, it starts out as `oldChildren[0]._dom`.
* @param {boolean} [isHydrating] Whether or not we are in hydration
*/
export function diff(
parentDom,
newVNode,
oldVNode,
context,
isSvg,
excessDomChildren,
commitQueue,
oldDom,
isHydrating
) {
let tmp,
newType = newVNode.type;
// When passing through createElement it assigns the object
// constructor as undefined. This to prevent JSON-injection.
if (newVNode.constructor !== undefined) return null;
if ((tmp = options._diff)) tmp(newVNode);
try {
outer: if (typeof newType === 'function') {
let c, isNew, oldProps, oldState, snapshot, clearProcessingException;
let newProps = newVNode.props;
// Necessary for createContext api. Setting this property will pass
// the context value as `this.context` just for this component.
tmp = newType.contextType;
let provider = tmp && context[tmp._id];
let cctx = tmp
? provider
? provider.props.value
: tmp._defaultValue
: context;
// Get component and set it to `c`
if (oldVNode._component) {
c = newVNode._component = oldVNode._component;
clearProcessingException = c._processingException = c._pendingError;
} else {
// Instantiate the new component
if ('prototype' in newType && newType.prototype.render) {
newVNode._component = c = new newType(newProps, cctx); // eslint-disable-line new-cap
} else {
newVNode._component = c = new Component(newProps, cctx);
c.constructor = newType;
c.render = doRender;
}
if (provider) provider.sub(c);
c.props = newProps;
if (!c.state) c.state = {};
c.context = cctx;
c._context = context;
isNew = c._dirty = true;
c._renderCallbacks = [];
}
// Invoke getDerivedStateFromProps
if (c._nextState == null) {
c._nextState = c.state;
}
if (newType.getDerivedStateFromProps != null) {
if (c._nextState == c.state) {
c._nextState = assign({}, c._nextState);
}
assign(
c._nextState,
newType.getDerivedStateFromProps(newProps, c._nextState)
);
}
oldProps = c.props;
oldState = c.state;
// Invoke pre-render lifecycle methods
if (isNew) {
if (
newType.getDerivedStateFromProps == null &&
c.componentWillMount != null
) {
c.componentWillMount();
}
if (c.componentDidMount != null) {
c._renderCallbacks.push(c.componentDidMount);
}
} else {
if (
newType.getDerivedStateFromProps == null &&
newProps !== oldProps &&
c.componentWillReceiveProps != null
) {
c.componentWillReceiveProps(newProps, cctx);
}
if (
!c._force &&
c.shouldComponentUpdate != null &&
c.shouldComponentUpdate(newProps, c._nextState, cctx) === false
) {
c.props = newProps;
c.state = c._nextState;
c._dirty = false;
c._vnode = newVNode;
newVNode._dom = oldVNode._dom;
newVNode._children = oldVNode._children;
if (c._renderCallbacks.length) {
commitQueue.push(c);
}
for (tmp = 0; tmp < newVNode._children.length; tmp++) {
if (newVNode._children[tmp]) {
newVNode._children[tmp]._parent = newVNode;
}
}
break outer;
}
if (c.componentWillUpdate != null) {
c.componentWillUpdate(newProps, c._nextState, cctx);
}
if (c.componentDidUpdate != null) {
c._renderCallbacks.push(() => {
c.componentDidUpdate(oldProps, oldState, snapshot);
});
}
}
c.context = cctx;
c.props = newProps;
c.state = c._nextState;
if ((tmp = options._render)) tmp(newVNode);
c._dirty = false;
c._vnode = newVNode;
c._parentDom = parentDom;
tmp = c.render(c.props, c.state, c.context);
let isTopLevelFragment =
tmp != null && tmp.type == Fragment && tmp.key == null;
newVNode._children = toChildArray(
isTopLevelFragment ? tmp.props.children : tmp
);
if (c.getChildContext != null) {
context = assign(assign({}, context), c.getChildContext());
}
if (!isNew && c.getSnapshotBeforeUpdate != null) {
snapshot = c.getSnapshotBeforeUpdate(oldProps, oldState);
}
diffChildren(
parentDom,
newVNode,
oldVNode,
context,
isSvg,
excessDomChildren,
commitQueue,
oldDom,
isHydrating
);
c.base = newVNode._dom;
if (c._renderCallbacks.length) {
commitQueue.push(c);
}
if (clearProcessingException) {
c._pendingError = c._processingException = null;
}
c._force = false;
} else {
newVNode._dom = diffElementNodes(
oldVNode._dom,
newVNode,
oldVNode,
context,
isSvg,
excessDomChildren,
commitQueue,
isHydrating
);
}
if ((tmp = options.diffed)) tmp(newVNode);
} catch (e) {
options._catchError(e, newVNode, oldVNode);
}
return newVNode._dom;
}
/**
* @param {Array<import('../internal').Component>} commitQueue List of components
* which have callbacks to invoke in commitRoot
* @param {import('../internal').VNode} root
*/
export function commitRoot(commitQueue, root) {
if (options._commit) options._commit(root, commitQueue);
commitQueue.some(c => {
try {
commitQueue = c._renderCallbacks;
c._renderCallbacks = [];
commitQueue.some(cb => {
cb.call(c);
});
} catch (e) {
options._catchError(e, c._vnode);
}
});
}
/**
* Diff two virtual nodes representing DOM element
* @param {import('../internal').PreactElement} dom The DOM element representing
* the virtual nodes being diffed
* @param {import('../internal').VNode} newVNode The new virtual node
* @param {import('../internal').VNode} oldVNode The old virtual node
* @param {object} context The current context object
* @param {boolean} isSvg Whether or not this DOM node is an SVG node
* @param {*} excessDomChildren
* @param {Array<import('../internal').Component>} commitQueue List of components
* which have callbacks to invoke in commitRoot
* @param {boolean} isHydrating Whether or not we are in hydration
* @returns {import('../internal').PreactElement}
*/
function diffElementNodes(
dom,
newVNode,
oldVNode,
context,
isSvg,
excessDomChildren,
commitQueue,
isHydrating
) {
let i;
let oldProps = oldVNode.props;
let newProps = newVNode.props;
// Tracks entering and exiting SVG namespace when descending through the tree.
isSvg = newVNode.type === 'svg' || isSvg;
if (dom == null && excessDomChildren != null) {
for (i = 0; i < excessDomChildren.length; i++) {
const child = excessDomChildren[i];
if (
child != null &&
(newVNode.type === null
? child.nodeType === 3
: child.localName === newVNode.type)
) {
dom = child;
excessDomChildren[i] = null;
break;
}
}
}
if (dom == null) {
if (newVNode.type === null) {
return document.createTextNode(newProps);
}
dom = isSvg
? document.createElementNS('http://www.w3.org/2000/svg', newVNode.type)
: document.createElement(
newVNode.type,
newProps.is && { is: newProps.is }
);
// we created a new parent, so none of the previously attached children can be reused:
excessDomChildren = null;
}
if (newVNode.type === null) {
if (excessDomChildren != null) {
excessDomChildren[excessDomChildren.indexOf(dom)] = null;
}
if (oldProps !== newProps && dom.data != newProps) {
dom.data = newProps;
}
} else if (newVNode !== oldVNode) {
if (excessDomChildren != null) {
excessDomChildren[excessDomChildren.indexOf(dom)] = null;
excessDomChildren = EMPTY_ARR.slice.call(dom.childNodes);
}
oldProps = oldVNode.props || EMPTY_OBJ;
let oldHtml = oldProps.dangerouslySetInnerHTML;
let newHtml = newProps.dangerouslySetInnerHTML;
// During hydration, props are not diffed at all (including dangerouslySetInnerHTML)
// @TODO we should warn in debug mode when props don't match here.
if (!isHydrating) {
if (oldProps === EMPTY_OBJ) {
oldProps = {};
for (let i = 0; i < dom.attributes.length; i++) {
oldProps[dom.attributes[i].name] = dom.attributes[i].value;
}
}
if (newHtml || oldHtml) {
// Avoid re-applying the same '__html' if it did not changed between re-render
if (!newHtml || !oldHtml || newHtml.__html != oldHtml.__html) {
dom.innerHTML = (newHtml && newHtml.__html) || '';
}
}
}
diffProps(dom, newProps, oldProps, isSvg, isHydrating);
newVNode._children = newVNode.props.children;
// If the new vnode didn't have dangerouslySetInnerHTML, diff its children
if (!newHtml) {
diffChildren(
dom,
newVNode,
oldVNode,
context,
newVNode.type === 'foreignObject' ? false : isSvg,
excessDomChildren,
commitQueue,
EMPTY_OBJ,
isHydrating
);
}
// (as above, don't diff props during hydration)
if (!isHydrating) {
if (
'value' in newProps &&
newProps.value !== undefined &&
newProps.value !== dom.value
) {
dom.value = newProps.value == null ? '' : newProps.value;
}
if (
'checked' in newProps &&
newProps.checked !== undefined &&
newProps.checked !== dom.checked
) {
dom.checked = newProps.checked;
}
}
}
return dom;
}
/**
* Invoke or update a ref, depending on whether it is a function or object ref.
* @param {object|function} ref
* @param {any} value
* @param {import('../internal').VNode} vnode
*/
export function applyRef(ref, value, vnode) {
try {
if (typeof ref == 'function') ref(value);
else ref.current = value;
} catch (e) {
options._catchError(e, vnode);
}
}
/**
* Unmount a virtual node from the tree and apply DOM changes
* @param {import('../internal').VNode} vnode The virtual node to unmount
* @param {import('../internal').VNode} parentVNode The parent of the VNode that
* initiated the unmount
* @param {boolean} [skipRemove] Flag that indicates that a parent node of the
* current element is already detached from the DOM.
*/
export function unmount(vnode, parentVNode, skipRemove) {
let r;
if (options.unmount) options.unmount(vnode);
if ((r = vnode.ref)) {
if (!r.current || r.current === vnode._dom) applyRef(r, null, parentVNode);
}
let dom;
if (!skipRemove && typeof vnode.type !== 'function') {
skipRemove = (dom = vnode._dom) != null;
}
vnode._dom = vnode._lastDomChild = null;
if ((r = vnode._component) != null) {
if (r.componentWillUnmount) {
try {
r.componentWillUnmount();
} catch (e) {
options._catchError(e, parentVNode);
}
}
r.base = r._parentDom = null;
}
if ((r = vnode._children)) {
for (let i = 0; i < r.length; i++) {
if (r[i]) unmount(r[i], parentVNode, skipRemove);
}
}
if (dom != null) removeNode(dom);
}
/** The `.render()` method for a PFC backing instance. */
function doRender(props, state, context) {
return this.constructor(props, context);
}
| 1 | 14,858 | The whitespace removal above here is probably unintentional :slightly_smiling_face: | preactjs-preact | js |
@@ -90,14 +90,14 @@ func newServer(cfg config.Config, testing bool) (*Server, error) {
func (s *Server) Start(ctx context.Context) error {
cctx, cancel := context.WithCancel(context.Background())
s.subModuleCancel = cancel
- if err := s.p2pAgent.Start(cctx); err != nil {
- return errors.Wrap(err, "error when starting P2P agent")
- }
for _, cs := range s.chainservices {
if err := cs.Start(cctx); err != nil {
return errors.Wrap(err, "error when starting blockchain")
}
}
+ if err := s.p2pAgent.Start(cctx); err != nil {
+ return errors.Wrap(err, "error when starting P2P agent")
+ }
if err := s.dispatcher.Start(cctx); err != nil {
return errors.Wrap(err, "error when starting dispatcher")
} | 1 | // Copyright (c) 2019 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package itx
import (
"context"
"fmt"
"net/http"
"net/http/pprof"
"runtime"
"sync"
"github.com/pkg/errors"
"go.uber.org/zap"
"github.com/iotexproject/iotex-core/chainservice"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/dispatcher"
"github.com/iotexproject/iotex-core/p2p"
"github.com/iotexproject/iotex-core/pkg/ha"
"github.com/iotexproject/iotex-core/pkg/log"
"github.com/iotexproject/iotex-core/pkg/probe"
"github.com/iotexproject/iotex-core/pkg/routine"
"github.com/iotexproject/iotex-core/pkg/util/httputil"
)
// Server is the iotex server instance containing all components.
type Server struct {
cfg config.Config
rootChainService *chainservice.ChainService
chainservices map[uint32]*chainservice.ChainService
p2pAgent *p2p.Agent
dispatcher dispatcher.Dispatcher
initializedSubChains map[uint32]bool
mutex sync.RWMutex
subModuleCancel context.CancelFunc
}
// NewServer creates a new server
// TODO clean up config, make root config contains network, dispatch and chainservice
func NewServer(cfg config.Config) (*Server, error) {
return newServer(cfg, false)
}
// NewInMemTestServer creates a test server in memory
func NewInMemTestServer(cfg config.Config) (*Server, error) {
return newServer(cfg, true)
}
func newServer(cfg config.Config, testing bool) (*Server, error) {
// create dispatcher instance
dispatcher, err := dispatcher.NewDispatcher(cfg)
if err != nil {
return nil, errors.Wrap(err, "fail to create dispatcher")
}
p2pAgent := p2p.NewAgent(cfg, dispatcher.HandleBroadcast, dispatcher.HandleTell)
chains := make(map[uint32]*chainservice.ChainService)
var cs *chainservice.ChainService
var opts []chainservice.Option
if testing {
opts = []chainservice.Option{
chainservice.WithTesting(),
}
}
cs, err = chainservice.New(cfg, p2pAgent, dispatcher, opts...)
if err != nil {
return nil, errors.Wrap(err, "fail to create chain service")
}
// TODO: explorer dependency deleted here at #1085, need to revive by migrating to api
chains[cs.ChainID()] = cs
dispatcher.AddSubscriber(cs.ChainID(), cs)
svr := Server{
cfg: cfg,
p2pAgent: p2pAgent,
dispatcher: dispatcher,
rootChainService: cs,
chainservices: chains,
initializedSubChains: map[uint32]bool{},
}
// Setup sub-chain starter
// TODO: sub-chain infra should use main-chain API instead of protocol directly
return &svr, nil
}
// Start starts the server
func (s *Server) Start(ctx context.Context) error {
cctx, cancel := context.WithCancel(context.Background())
s.subModuleCancel = cancel
if err := s.p2pAgent.Start(cctx); err != nil {
return errors.Wrap(err, "error when starting P2P agent")
}
for _, cs := range s.chainservices {
if err := cs.Start(cctx); err != nil {
return errors.Wrap(err, "error when starting blockchain")
}
}
if err := s.dispatcher.Start(cctx); err != nil {
return errors.Wrap(err, "error when starting dispatcher")
}
return nil
}
// Stop stops the server
func (s *Server) Stop(ctx context.Context) error {
defer s.subModuleCancel()
if err := s.p2pAgent.Stop(ctx); err != nil {
return errors.Wrap(err, "error when stopping P2P agent")
}
if err := s.dispatcher.Stop(ctx); err != nil {
return errors.Wrap(err, "error when stopping dispatcher")
}
for _, cs := range s.chainservices {
if err := cs.Stop(ctx); err != nil {
return errors.Wrap(err, "error when stopping blockchain")
}
}
return nil
}
// NewSubChainService creates a new chain service in this server.
func (s *Server) NewSubChainService(cfg config.Config, opts ...chainservice.Option) error {
s.mutex.Lock()
defer s.mutex.Unlock()
return s.newSubChainService(cfg, opts...)
}
func (s *Server) newSubChainService(cfg config.Config, opts ...chainservice.Option) error {
// TODO: explorer dependency deleted here at #1085, need to revive by migrating to api
opts = append(opts, chainservice.WithSubChain())
cs, err := chainservice.New(cfg, s.p2pAgent, s.dispatcher, opts...)
if err != nil {
return err
}
s.chainservices[cs.ChainID()] = cs
return nil
}
// StopChainService stops the chain service run in the server.
func (s *Server) StopChainService(ctx context.Context, id uint32) error {
s.mutex.RLock()
defer s.mutex.RUnlock()
c, ok := s.chainservices[id]
if !ok {
return errors.New("Chain ID does not match any existing chains")
}
return c.Stop(ctx)
}
// P2PAgent returns the P2P agent
func (s *Server) P2PAgent() *p2p.Agent {
return s.p2pAgent
}
// ChainService returns the chainservice hold in Server with given id.
func (s *Server) ChainService(id uint32) *chainservice.ChainService {
s.mutex.RLock()
defer s.mutex.RUnlock()
return s.chainservices[id]
}
// Dispatcher returns the Dispatcher
func (s *Server) Dispatcher() dispatcher.Dispatcher {
return s.dispatcher
}
// StartServer starts a node server
func StartServer(ctx context.Context, svr *Server, probeSvr *probe.Server, cfg config.Config) {
if err := svr.Start(ctx); err != nil {
log.L().Fatal("Failed to start server.", zap.Error(err))
return
}
defer func() {
if err := svr.Stop(ctx); err != nil {
log.L().Panic("Failed to stop server.", zap.Error(err))
}
}()
probeSvr.Ready()
if cfg.System.HeartbeatInterval > 0 {
task := routine.NewRecurringTask(NewHeartbeatHandler(svr).Log, cfg.System.HeartbeatInterval)
if err := task.Start(ctx); err != nil {
log.L().Panic("Failed to start heartbeat routine.", zap.Error(err))
}
defer func() {
if err := task.Stop(ctx); err != nil {
log.L().Panic("Failed to stop heartbeat routine.", zap.Error(err))
}
}()
}
var adminserv http.Server
if cfg.System.HTTPAdminPort > 0 {
mux := http.NewServeMux()
log.RegisterLevelConfigMux(mux)
haCtl := ha.New(svr.rootChainService.Consensus())
mux.Handle("/ha", http.HandlerFunc(haCtl.Handle))
mux.Handle("/debug/pprof/", http.HandlerFunc(pprof.Index))
mux.Handle("/debug/pprof/cmdline", http.HandlerFunc(pprof.Cmdline))
mux.Handle("/debug/pprof/profile", http.HandlerFunc(pprof.Profile))
mux.Handle("/debug/pprof/symbol", http.HandlerFunc(pprof.Symbol))
mux.Handle("/debug/pprof/trace", http.HandlerFunc(pprof.Trace))
port := fmt.Sprintf(":%d", cfg.System.HTTPAdminPort)
adminserv = httputil.Server(port, mux)
defer func() {
if err := adminserv.Shutdown(ctx); err != nil {
log.L().Error("Error when serving metrics data.", zap.Error(err))
}
}()
go func() {
runtime.SetMutexProfileFraction(1)
runtime.SetBlockProfileRate(1)
ln, err := httputil.LimitListener(adminserv.Addr)
if err != nil {
log.L().Error("Error when listen to profiling port.", zap.Error(err))
return
}
if err := adminserv.Serve(ln); err != nil {
log.L().Error("Error when serving performance profiling data.", zap.Error(err))
}
}()
}
<-ctx.Done()
probeSvr.NotReady()
}
| 1 | 22,896 | should we start p2p even after dispatcher? b/c dispatcher handles msg from p2p | iotexproject-iotex-core | go |
@@ -101,13 +101,13 @@ func TestHandleInterfaceEmptySuccess(t *testing.T) {
handler := jsonHandler{reader: ifaceEmptyReader{}, handler: reflect.ValueOf(h)}
reqBuf := yarpc.NewBufferString(`["a", "b", "c"]`)
- _, _, err := handler.Handle(context.Background(), &yarpc.Request{
+ _, respBuf, err := handler.Handle(context.Background(), &yarpc.Request{
Procedure: "foo",
Encoding: "json",
}, reqBuf)
require.NoError(t, err)
- assert.JSONEq(t, `["a", "b", "c"]`, reqBuf.String())
+ assert.JSONEq(t, `["a", "b", "c"]`, respBuf.String())
}
func TestHandleSuccessWithResponseHeaders(t *testing.T) { | 1 | // Copyright (c) 2018 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package yarpcjson
import (
"context"
"encoding/json"
"errors"
"reflect"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/yarpc/v2"
)
type simpleRequest struct {
Name string
Attributes map[string]int32
}
type simpleResponse struct {
Success bool
}
func TestHandleStructSuccess(t *testing.T) {
h := func(ctx context.Context, body *simpleRequest) (*simpleResponse, error) {
assert.Equal(t, "simpleCall", yarpc.CallFromContext(ctx).Procedure())
assert.Equal(t, "foo", body.Name)
assert.Equal(t, map[string]int32{"bar": 42}, body.Attributes)
return &simpleResponse{Success: true}, nil
}
handler := jsonHandler{
reader: structReader{reflect.TypeOf(simpleRequest{})},
handler: reflect.ValueOf(h),
}
reqBuf := yarpc.NewBufferString(`{"name": "foo", "attributes": {"bar": 42}}`)
_, resBuf, err := handler.Handle(context.Background(), &yarpc.Request{
Procedure: "simpleCall",
Encoding: "json",
}, reqBuf)
require.NoError(t, err)
var response simpleResponse
require.NoError(t, json.Unmarshal(resBuf.Bytes(), &response))
assert.Equal(t, simpleResponse{Success: true}, response)
}
func TestHandleMapSuccess(t *testing.T) {
h := func(ctx context.Context, body map[string]interface{}) (map[string]string, error) {
assert.Equal(t, 42.0, body["foo"])
assert.Equal(t, []interface{}{"a", "b", "c"}, body["bar"])
return map[string]string{"success": "true"}, nil
}
handler := jsonHandler{
reader: mapReader{reflect.TypeOf(make(map[string]interface{}))},
handler: reflect.ValueOf(h),
}
reqBuf := yarpc.NewBufferString(`{"foo": 42, "bar": ["a", "b", "c"]}`)
_, resBuf, err := handler.Handle(context.Background(), &yarpc.Request{
Procedure: "foo",
Encoding: "json",
}, reqBuf)
require.NoError(t, err)
var response struct{ Success string }
require.NoError(t, json.Unmarshal(resBuf.Bytes(), &response))
assert.Equal(t, "true", response.Success)
}
func TestHandleInterfaceEmptySuccess(t *testing.T) {
h := func(ctx context.Context, body interface{}) (interface{}, error) {
return body, nil
}
handler := jsonHandler{reader: ifaceEmptyReader{}, handler: reflect.ValueOf(h)}
reqBuf := yarpc.NewBufferString(`["a", "b", "c"]`)
_, _, err := handler.Handle(context.Background(), &yarpc.Request{
Procedure: "foo",
Encoding: "json",
}, reqBuf)
require.NoError(t, err)
assert.JSONEq(t, `["a", "b", "c"]`, reqBuf.String())
}
func TestHandleSuccessWithResponseHeaders(t *testing.T) {
h := func(ctx context.Context, _ *simpleRequest) (*simpleResponse, error) {
require.NoError(t, yarpc.CallFromContext(ctx).WriteResponseHeader("foo", "bar"))
return &simpleResponse{Success: true}, nil
}
handler := jsonHandler{
reader: structReader{reflect.TypeOf(simpleRequest{})},
handler: reflect.ValueOf(h),
}
reqBuf := yarpc.NewBufferString(`{"name": "foo", "attributes": {"bar": 42}}`)
res, _, err := handler.Handle(context.Background(), &yarpc.Request{
Procedure: "simpleCall",
Encoding: "json",
}, reqBuf)
require.NoError(t, err)
assert.Equal(t, yarpc.NewHeaders().With("foo", "bar"), res.Headers)
}
func TestHandleBothResponseError(t *testing.T) {
h := func(ctx context.Context, body *simpleRequest) (*simpleResponse, error) {
assert.Equal(t, "simpleCall", yarpc.CallFromContext(ctx).Procedure())
assert.Equal(t, "foo", body.Name)
assert.Equal(t, map[string]int32{"bar": 42}, body.Attributes)
return &simpleResponse{Success: true}, errors.New("bar")
}
handler := jsonHandler{
reader: structReader{reflect.TypeOf(simpleRequest{})},
handler: reflect.ValueOf(h),
}
reqBuf := yarpc.NewBufferString(`{"name": "foo", "attributes": {"bar": 42}}`)
_, resBuf, err := handler.Handle(context.Background(), &yarpc.Request{
Procedure: "simpleCall",
Encoding: "json",
}, reqBuf)
require.Equal(t, errors.New("bar"), err)
var response simpleResponse
require.NoError(t, json.Unmarshal(resBuf.Bytes(), &response))
assert.Equal(t, simpleResponse{Success: true}, response)
}
| 1 | 17,486 | Did we decide once and for all to use req/resp throughout spring? | yarpc-yarpc-go | go |
@@ -8,8 +8,13 @@ describe "videos/_access_callout" do
render_callout video, signed_out: true
- expect(rendered).to have_css ".access-callout.auth-to-access"
- expect(rendered).to have_auth_to_access_link_for(video)
+ expect(rendered).to have_content(
+ I18n.t("videos.show.auth_to_access_callout_text")
+ )
+ expect(rendered).to have_link(
+ I18n.t("videos.show.auth_to_access_button_text"),
+ href: video_auth_to_access_path(video),
+ )
end
end
| 1 | require "rails_helper"
describe "videos/_access_callout" do
context "when the user is a guest" do
context "and the video is a free sample" do
it "displays an auth to access message and button for the video" do
video = build_stubbed(:video, :free_sample)
render_callout video, signed_out: true
expect(rendered).to have_css ".access-callout.auth-to-access"
expect(rendered).to have_auth_to_access_link_for(video)
end
end
context "when the video is not a free sample" do
it "displays the 'preview' message and CTA" do
video = build_stubbed(:video)
render_callout video, signed_out: true
expect(rendered).to have_css ".access-callout.subscription-required"
expect(rendered).to have_subscribe_cta
end
end
end
context "when the user is a sampler" do
context "and the video is a free sample" do
it "displays a subscribe CTA about viewing all the videos" do
video = build_stubbed(:video, :free_sample)
render_callout video, signed_out: false
expect(rendered).to have_css ".access-callout.subscription-required"
expect(rendered).to have_content(
I18n.t("videos.show.access_callout_for_sample_text"),
)
expect(rendered).to have_subscribe_cta
end
end
context "and the video is not a free sample" do
it "displays the 'preview' message and CTA" do
video = build_stubbed(:video)
render_callout video, signed_out: false
expect(rendered).to have_css ".access-callout.subscription-required"
expect(rendered).to have_content(
I18n.t("videos.show.access_callout_for_preview_text"),
)
expect(rendered).to have_subscribe_cta
end
end
end
def have_auth_to_access_link_for(video)
have_link(
I18n.t("videos.show.auth_to_access_button_text"),
href: video_auth_to_access_path(video),
)
end
def render_callout(video, signed_out: false)
allow(view).to receive(:signed_out?).and_return(signed_out)
render template: "videos/_access_callout", locals: { video: video }
end
def have_subscribe_cta
have_link(
"Subscribe Now",
href: new_checkout_path(:professional),
)
end
end
| 1 | 16,891 | Put a comma after the last parameter of a multiline method call. | thoughtbot-upcase | rb |
@@ -71,8 +71,18 @@ func populateScratchBucketGcsPath(scratchBucketGcsPath *string, zone string, mgc
if err != nil {
return "", daisy.Errf("invalid scratch bucket GCS path %v", scratchBucketGcsPath)
}
+
if !scratchBucketCreator.IsBucketInProject(*project, scratchBucketName) {
- return "", daisy.Errf("Scratch bucket `%v` is not in project `%v`", scratchBucketName, *project)
+ err := storageClient.DeleteGcsPath(*scratchBucketGcsPath)
+ var deletionResults string
+ if err == nil {
+ deletionResults = fmt.Sprintf("The resources in %q have been deleted.", *scratchBucketGcsPath)
+ } else {
+ deletionResults = fmt.Sprintf("Failed to delete resources in %q. Check with the owner of %q for more information",
+ *scratchBucketGcsPath, scratchBucketName)
+ }
+ return "", daisy.Errf("Scratch bucket %q is not in project %q. %s",
+ scratchBucketName, *project, deletionResults)
}
scratchBucketAttrs, err := storageClient.GetBucketAttrs(scratchBucketName) | 1 | // Copyright 2019 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package param
import (
"context"
"fmt"
"regexp"
"strings"
"google.golang.org/api/option"
"github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/common/domain"
"github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/common/utils/paramhelper"
"github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/common/utils/storage"
"github.com/GoogleCloudPlatform/compute-image-tools/daisy"
"github.com/GoogleCloudPlatform/compute-image-tools/daisy/compute"
)
// GetProjectID gets project id from flag if exists; otherwise, try to retrieve from GCE metadata.
func GetProjectID(mgce domain.MetadataGCEInterface, projectFlag string) (string, error) {
if projectFlag == "" {
if !mgce.OnGCE() {
return "", daisy.Errf("project cannot be determined because build is not running on GCE")
}
aProject, err := mgce.ProjectID()
if err != nil || aProject == "" {
return "", daisy.Errf("project cannot be determined %v", err)
}
return aProject, nil
}
return projectFlag, nil
}
func populateScratchBucketGcsPath(scratchBucketGcsPath *string, zone string, mgce domain.MetadataGCEInterface,
scratchBucketCreator domain.ScratchBucketCreatorInterface, file string, project *string,
storageClient domain.StorageClientInterface) (string, error) {
scratchBucketRegion := ""
if *scratchBucketGcsPath == "" {
fallbackZone := zone
if fallbackZone == "" && mgce.OnGCE() {
var err error
if fallbackZone, err = mgce.Zone(); err != nil {
// reset fallback zone if failed to get zone from running GCE
fallbackZone = ""
}
}
scratchBucketName, sbr, err := scratchBucketCreator.CreateScratchBucket(file, *project, fallbackZone)
scratchBucketRegion = sbr
if err != nil {
return "", daisy.Errf("failed to create scratch bucket: %v", err)
}
*scratchBucketGcsPath = fmt.Sprintf("gs://%v/", scratchBucketName)
} else {
scratchBucketName, err := storage.GetBucketNameFromGCSPath(*scratchBucketGcsPath)
if err != nil {
return "", daisy.Errf("invalid scratch bucket GCS path %v", scratchBucketGcsPath)
}
if !scratchBucketCreator.IsBucketInProject(*project, scratchBucketName) {
return "", daisy.Errf("Scratch bucket `%v` is not in project `%v`", scratchBucketName, *project)
}
scratchBucketAttrs, err := storageClient.GetBucketAttrs(scratchBucketName)
if err == nil {
scratchBucketRegion = scratchBucketAttrs.Location
}
}
return scratchBucketRegion, nil
}
// PopulateProjectIfMissing populates project id for cli tools
func PopulateProjectIfMissing(mgce domain.MetadataGCEInterface, projectFlag *string) error {
var err error
*projectFlag, err = GetProjectID(mgce, *projectFlag)
return err
}
// PopulateRegion populates region based on the value extracted from zone param
func PopulateRegion(region *string, zone string) error {
aRegion, err := paramhelper.GetRegion(zone)
if err != nil {
return err
}
*region = aRegion
return nil
}
// CreateComputeClient creates a new compute client
func CreateComputeClient(ctx *context.Context, oauth string, ce string) (compute.Client, error) {
computeOptions := []option.ClientOption{option.WithCredentialsFile(oauth)}
if ce != "" {
computeOptions = append(computeOptions, option.WithEndpoint(ce))
}
computeClient, err := compute.NewClient(*ctx, computeOptions...)
if err != nil {
return nil, daisy.Errf("failed to create compute client: %v", err)
}
return computeClient, nil
}
var fullResourceURLPrefix = "https://www.googleapis.com/compute/[^/]*/"
var fullResourceURLRegex = regexp.MustCompile(fmt.Sprintf("^(%s)", fullResourceURLPrefix))
func getResourcePath(scope string, resourceType string, resourceName string) string {
// handle full URL: transform to relative URL
if prefix := fullResourceURLRegex.FindString(resourceName); prefix != "" {
return strings.TrimPrefix(resourceName, prefix)
}
// handle relative (partial) URL: use it as-is
if strings.Contains(resourceName, "/") {
return resourceName
}
// handle pure name: treat it as current project
return fmt.Sprintf("%v/%v/%v", scope, resourceType, resourceName)
}
// GetGlobalResourcePath gets global resource path based on either a local resource name or a path
func GetGlobalResourcePath(resourceType string, resourceName string) string {
return getResourcePath("global", resourceType, resourceName)
}
// GetRegionalResourcePath gets regional resource path based on either a local resource name or a path
func GetRegionalResourcePath(region string, resourceType string, resourceName string) string {
return getResourcePath(fmt.Sprintf("regions/%v", region), resourceType, resourceName)
}
// GetZonalResourcePath gets zonal resource path based on either a local resource name or a path
func GetZonalResourcePath(zone string, resourceType string, resourceName string) string {
return getResourcePath(fmt.Sprintf("zones/%v", zone), resourceType, resourceName)
}
| 1 | 12,354 | This will delete the whole bucket, which could have unforseen consequences in normal use cases. We should be deleting args.SourceFile instead. | GoogleCloudPlatform-compute-image-tools | go |
@@ -721,5 +721,9 @@ public class Constants {
// Constant to allow test version to be passed as flow parameter. Passing test version will be
// allowed for Azkaban ADMIN role only
public static final String FLOW_PARAM_ALLOW_IMAGE_TEST_VERSION = "allow.image.test.version";
+
+ //
+ public static final String FLOW_PARAM_ALLOW_RESTART_ON_EXECUTION_STOPPED = "allow"
+ + ".restart.on.execution.stopped";
}
} | 1 | /*
* Copyright 2018 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban;
import java.time.Duration;
/**
* Constants used in configuration files or shared among classes.
*
* <p>Conventions:
*
* <p>Internal constants to be put in the {@link Constants} class
*
* <p>Configuration keys to be put in the {@link ConfigurationKeys} class
*
* <p>Flow level properties keys to be put in the {@link FlowProperties} class
*
* <p>Job level Properties keys to be put in the {@link JobProperties} class
*
* <p>Use '.' to separate name spaces and '_" to separate words in the same namespace. e.g.
* azkaban.job.some_key</p>
*/
public class Constants {
// Azkaban Flow Versions
public static final double DEFAULT_AZKABAN_FLOW_VERSION = 1.0;
public static final double AZKABAN_FLOW_VERSION_2_0 = 2.0;
// Flow 2.0 file suffix
public static final String PROJECT_FILE_SUFFIX = ".project";
public static final String FLOW_FILE_SUFFIX = ".flow";
// Flow 2.0 node type
public static final String NODE_TYPE = "type";
public static final String FLOW_NODE_TYPE = "flow";
// Flow 2.0 flow and job path delimiter
public static final String PATH_DELIMITER = ":";
// Job properties override suffix
public static final String JOB_OVERRIDE_SUFFIX = ".jor";
// Key for the root node of the DAG in runtime properties
public static final String ROOT_NODE_IDENTIFIER = "ROOT";
// Names and paths of various file names to configure Azkaban
public static final String AZKABAN_PROPERTIES_FILE = "azkaban.properties";
public static final String AZKABAN_PRIVATE_PROPERTIES_FILE = "azkaban.private.properties";
public static final String DEFAULT_CONF_PATH = "conf";
public static final String DEFAULT_EXECUTOR_PORT_FILE = "executor.port";
public static final String AZKABAN_SERVLET_CONTEXT_KEY = "azkaban_app";
public static final String AZKABAN_CONTAINER_CONTEXT_KEY = "flow_container";
// Internal username used to perform SLA action
public static final String AZKABAN_SLA_CHECKER_USERNAME = "azkaban_sla";
// Memory check retry interval when OOM in ms
public static final long MEMORY_CHECK_INTERVAL_MS = 1000 * 60 * 1;
// Max number of memory check retry
public static final int MEMORY_CHECK_RETRY_LIMIT = 720;
public static final int DEFAULT_PORT_NUMBER = 8081;
public static final int DEFAULT_SSL_PORT_NUMBER = 8443;
public static final int DEFAULT_JETTY_MAX_THREAD_COUNT = 20;
// Configures the form limits for the web application
public static final int MAX_FORM_CONTENT_SIZE = 10 * 1024 * 1024;
// One Schedule's default End Time: 01/01/2050, 00:00:00, UTC
public static final long DEFAULT_SCHEDULE_END_EPOCH_TIME = 2524608000000L;
// Default flow trigger max wait time
public static final Duration DEFAULT_FLOW_TRIGGER_MAX_WAIT_TIME = Duration.ofDays(10);
public static final Duration MIN_FLOW_TRIGGER_WAIT_TIME = Duration.ofMinutes(1);
public static final int DEFAULT_MIN_AGE_FOR_CLASSIFYING_A_FLOW_AGED_MINUTES = 20;
// The flow exec id for a flow trigger instance which hasn't started a flow yet
public static final int UNASSIGNED_EXEC_ID = -1;
// The flow exec id for a flow trigger instance unable to trigger a flow yet
public static final int FAILED_EXEC_ID = -2;
// Default locked flow error message
public static final String DEFAULT_LOCKED_FLOW_ERROR_MESSAGE =
"Flow %s in project %s is locked. This is either a repeatedly failing flow, or an ineffcient"
+ " flow. Please refer to the Dr. Elephant report for this flow for more information.";
// Default maximum number of concurrent runs for a single flow
public static final int DEFAULT_MAX_ONCURRENT_RUNS_ONEFLOW = 30;
// How often executors will poll new executions in Poll Dispatch model
public static final int DEFAULT_AZKABAN_POLLING_INTERVAL_MS = 1000;
// Executors can use cpu load calculated from this period to take/skip polling turns
public static final int DEFAULT_AZKABAN_POLLING_CRITERIA_CPU_LOAD_PERIOD_SEC = 60;
// Default value to feature enable setting. To be backward compatible, this value === FALSE
public static final boolean DEFAULT_AZKABAN_RAMP_ENABLED = false;
// Due to multiple AzkabanExec Server instance scenario, it will be required to persistent the ramp result into the DB.
// However, Frequent data persistence will sacrifice the performance with limited data accuracy.
// This setting value controls to push result into DB every N finished ramped workflows
public static final int DEFAULT_AZKABAN_RAMP_STATUS_PUSH_INTERVAL_MAX = 20;
// Due to multiple AzkabanExec Server instance, it will be required to persistent the ramp result into the DB.
// However, Frequent data persistence will sacrifice the performance with limited data accuracy.
// This setting value controls to pull result from DB every N new ramped workflows
public static final int DEFAULT_AZKABAN_RAMP_STATUS_PULL_INTERVAL_MAX = 50;
// Use Polling Service to sync the ramp status cross EXEC Server.
public static final boolean DEFAULT_AZKABAN_RAMP_STATUS_POOLING_ENABLED = false;
// How often executors will poll ramp status in Poll Dispatch model
public static final int DEFAULT_AZKABAN_RAMP_STATUS_POLLING_INTERVAL = 10;
// Username to be sent to UserManager when OAuth is in use, and real username is not available:
public static final String OAUTH_USERNAME_PLACEHOLDER = "<OAuth>";
// Used by UserManager for password validation (to tell apart real passwords from auth codes).
// Empirically, passwords are shorter than this, and ACs are longer:
public static final int OAUTH_MIN_AUTHCODE_LENGTH = 80;
// Used (or should be used) wherever a string representation of UTF_8 charset is needed:
public static final String UTF_8 = java.nio.charset.StandardCharsets.UTF_8.toString();
// Specifies the source(adhoc, scheduled, event) from where flow execution is triggered
public static final String EXECUTION_SOURCE_ADHOC = "adhoc";
public static final String EXECUTION_SOURCE_SCHEDULED = "schedule";
public static final String EXECUTION_SOURCE_EVENT = "event";
public static final String CONTENT_TYPE_TEXT_PLAIN = "text/plain";
public static final String CHARACTER_ENCODING_UTF_8 = "utf-8";
// Use in-memory keystore
public static final String USE_IN_MEMORY_KEYSTORE = "use.in-memory.keystore";
// AZ_HOME in containerized execution
public static final String AZ_HOME = "AZ_HOME";
// Azkaban event reporter constants
public static class EventReporterConstants {
public static final String FLOW_NAME = "flowName";
public static final String AZ_HOST = "azkabanHost";
public static final String AZ_WEBSERVER = "azkabanWebserver";
public static final String PROJECT_NAME = "projectName";
public static final String SUBMIT_USER = "submitUser";
public static final String START_TIME = "startTime";
public static final String END_TIME = "endTime";
public static final String FLOW_STATUS = "flowStatus";
public static final String EXECUTION_ID = "executionId";
public static final String SUBMIT_TIME = "submitTime";
public static final String FLOW_VERSION = "flowVersion";
public static final String FAILED_JOB_ID = "failedJobId";
public static final String MODIFIED_BY = "modifiedBy";
public static final String FLOW_KILL_DURATION = "flowKillDuration";
public static final String FLOW_PAUSE_DURATION = "flowPauseDuration";
public static final String FLOW_PREPARATION_DURATION = "flowPreparationDuration";
public static final String SLA_OPTIONS = "slaOptions";
public static final String VERSION_SET = "versionSet";
public static final String EXECUTOR_TYPE = "executorType";
public static final String PROJECT_FILE_UPLOAD_USER = "projectFileUploadUser";
public static final String PROJECT_FILE_UPLOADER_IP_ADDR = "projectFileUploaderIpAddr";
public static final String PROJECT_FILE_NAME = "projectFileName";
public static final String PROJECT_FILE_UPLOAD_TIME = "projectFileUploadTime";
public static final String JOB_ID = "jobId";
public static final String JOB_TYPE = "jobType";
public static final String VERSION = "version";
public static final String JOB_PROXY_USER = "jobProxyUser";
public static final String ATTEMPT_ID = "attemptId";
public static final String JOB_KILL_DURATION = "jobKillDuration";
public static final String QUEUE_DURATION = "queueDuration";
public static final String FAILURE_MESSAGE = "failureMessage";
public static final String JOB_STATUS = "jobStatus";
}
public static class ConfigurationKeys {
public static final String AZKABAN_CLUSTER_NAME = "azkaban.cluster.name";
public static final String AZKABAN_GLOBAL_PROPERTIES_EXT_PATH = "executor.global.properties";
// Property to enable appropriate dispatch model
public static final String AZKABAN_EXECUTION_DISPATCH_METHOD = "azkaban.execution.dispatch.method";
// Configures Azkaban to use new polling model for dispatching
public static final String AZKABAN_POLLING_INTERVAL_MS = "azkaban.polling.interval.ms";
public static final String AZKABAN_POLLING_LOCK_ENABLED = "azkaban.polling.lock.enabled";
public static final String AZKABAN_POLLING_CRITERIA_FLOW_THREADS_AVAILABLE =
"azkaban.polling_criteria.flow_threads_available";
public static final String AZKABAN_POLLING_CRITERIA_MIN_FREE_MEMORY_GB =
"azkaban.polling_criteria.min_free_memory_gb";
public static final String AZKABAN_POLLING_CRITERIA_MAX_CPU_UTILIZATION_PCT =
"azkaban.polling_criteria.max_cpu_utilization_pct";
public static final String AZKABAN_POLLING_CRITERIA_CPU_LOAD_PERIOD_SEC =
"azkaban.polling_criteria.cpu_load_period_sec";
// Configures properties for Azkaban executor health check
public static final String AZKABAN_EXECUTOR_HEALTHCHECK_INTERVAL_MIN = "azkaban.executor.healthcheck.interval.min";
public static final String AZKABAN_EXECUTOR_MAX_FAILURE_COUNT = "azkaban.executor.max.failurecount";
public static final String AZKABAN_ADMIN_ALERT_EMAIL = "azkaban.admin.alert.email";
// Configures Azkaban Flow Version in project YAML file
public static final String AZKABAN_FLOW_VERSION = "azkaban-flow-version";
// These properties are configurable through azkaban.properties
public static final String AZKABAN_PID_FILENAME = "azkaban.pid.filename";
// External URL template of a given topic, specified in the list defined above
//Deprecated, it is replaced by AZKABAN_SERVER_EXTERNAL_ANALYZER_TOPIC_URL
public static final String AZKABAN_SERVER_EXTERNAL_TOPIC_URL = "azkaban.server.external.${topic}.url";
// Designates one of the external link topics to correspond to an execution analyzer
//Deprecated, replaced by AZKABAN_SERVER_EXTERNAL_ANALYZER_TOPICS
public static final String AZKABAN_SERVER_EXTERNAL_ANALYZER_TOPIC = "azkaban.server.external.analyzer.topic";
//Deprecated, it is replaced by AZKABAN_SERVER_EXTERNAL_ANALYZER_TOPIC_LABEL
public static final String AZKABAN_SERVER_EXTERNAL_ANALYZER_LABEL = "azkaban.server.external.analyzer.label";
// Defines a list of external links, each referred to as a topic
// external links defined here will be translated into buttons and rendered in the Flow Execution page
public static final String AZKABAN_SERVER_EXTERNAL_ANALYZER_TOPICS = "azkaban.server.external.analyzer.topics";
// Defines timeout in milliseconds for azkaban to validate external links
// If this config is missing, azkaban will use default 3000 milliseconds as timeout.
// If validation fails, buttons is disabled in Flow Execution page.
public static final String AZKABAN_SERVER_EXTERNAL_ANALYZER_TIMEOUT_MS = "azkaban.server.external.analyzer.timeout.ms";
// Designates one of the external link topics to correspond to an execution analyzer
public static final String AZKABAN_SERVER_EXTERNAL_ANALYZER_TOPIC_LABEL = "azkaban.server"
+ ".external.analyzer.${topic}.label";
// External URL template of a given topic, specified in the list defined above
public static final String AZKABAN_SERVER_EXTERNAL_ANALYZER_TOPIC_URL = "azkaban.server"
+ ".external.analyzer.${topic}.url";
// Designates one of the external link topics to correspond to a job log viewer
public static final String AZKABAN_SERVER_EXTERNAL_LOGVIEWER_TOPIC = "azkaban.server.external.logviewer.topic";
public static final String AZKABAN_SERVER_EXTERNAL_LOGVIEWER_LABEL = "azkaban.server.external.logviewer.label";
/*
* Hadoop/Spark user job link.
* Example:
* a) azkaban.server.external.resource_manager_job_url=http://***rm***:8088/cluster/app/application_${application.id}
* b) azkaban.server.external.history_server_job_url=http://***jh***:19888/jobhistory/job/job_${application.id}
* c) azkaban.server.external.spark_history_server_job_url=http://***sh***:18080/history/application_${application.id}/1/jobs
* */
public static final String HADOOP_CLUSTER_URL = "azkaban.server.external.hadoop_cluster_url";
public static final String RESOURCE_MANAGER_JOB_URL = "azkaban.server.external.resource_manager_job_url";
public static final String HISTORY_SERVER_JOB_URL = "azkaban.server.external.history_server_job_url";
public static final String SPARK_HISTORY_SERVER_JOB_URL = "azkaban.server.external.spark_history_server_job_url";
// Configures the Kafka appender for logging user jobs, specified for the exec server
public static final String AZKABAN_SERVER_LOGGING_KAFKA_BROKERLIST = "azkaban.server.logging.kafka.brokerList";
public static final String AZKABAN_SERVER_LOGGING_KAFKA_TOPIC = "azkaban.server.logging.kafka.topic";
// Represent the class name of azkaban metrics reporter.
public static final String CUSTOM_METRICS_REPORTER_CLASS_NAME = "azkaban.metrics.reporter.name";
// Represent the metrics server URL.
public static final String METRICS_SERVER_URL = "azkaban.metrics.server.url";
public static final String IS_METRICS_ENABLED = "azkaban.is.metrics.enabled";
public static final String MIN_AGE_FOR_CLASSIFYING_A_FLOW_AGED_MINUTES = "azkaban.metrics"
+ ".min_age_for_classifying_a_flow_aged_minutes";
// User facing web server configurations used to construct the user facing server URLs. They are useful when there is a reverse proxy between Azkaban web servers and users.
// enduser -> myazkabanhost:443 -> proxy -> localhost:8081
// when this parameters set then these parameters are used to generate email links.
// if these parameters are not set then jetty.hostname, and jetty.port(if ssl configured jetty.ssl.port) are used.
public static final String AZKABAN_WEBSERVER_EXTERNAL_HOSTNAME = "azkaban.webserver.external_hostname";
public static final String AZKABAN_WEBSERVER_EXTERNAL_SSL_PORT = "azkaban.webserver.external_ssl_port";
public static final String AZKABAN_WEBSERVER_EXTERNAL_PORT = "azkaban.webserver.external_port";
// Hostname for the host, if not specified, canonical hostname will be used
public static final String AZKABAN_SERVER_HOST_NAME = "azkaban.server.hostname";
// List of users we prevent azkaban from running flows as. (ie: root, azkaban)
public static final String BLACK_LISTED_USERS = "azkaban.server.blacklist.users";
// Path name of execute-as-user executable
public static final String AZKABAN_SERVER_NATIVE_LIB_FOLDER = "azkaban.native.lib";
// Name of *nix group associated with the process running Azkaban
public static final String AZKABAN_SERVER_GROUP_NAME = "azkaban.group.name";
// Legacy configs section, new configs should follow the naming convention of azkaban.server.<rest of the name> for server configs.
// Jetty server configurations.
public static final String JETTY_HEADER_BUFFER_SIZE = "jetty.headerBufferSize";
public static final String JETTY_USE_SSL = "jetty.use.ssl";
public static final String JETTY_SSL_PORT = "jetty.ssl.port";
public static final String JETTY_PORT = "jetty.port";
public static final String EXECUTOR_PORT_FILE = "executor.portfile";
// To set a fixed port for executor-server. Otherwise some available port is used.
public static final String EXECUTOR_PORT = "executor.port";
public static final String EXECUTOR_SSL_PORT = "executor.ssl.port";
public static final String DEFAULT_TIMEZONE_ID = "default.timezone.id";
// Boolean config set on the Web server to prevent users from creating projects. When set to
// true only admins or users with CREATEPROJECTS permission can create projects.
public static final String LOCKDOWN_CREATE_PROJECTS_KEY = "lockdown.create.projects";
// Boolean config set on the Web server to prevent users from uploading projects. When set to
// true only admins or users with UPLOADPROJECTS permission can upload projects.
public static final String LOCKDOWN_UPLOAD_PROJECTS_KEY = "lockdown.upload.projects";
// Max flow running time in mins, server will kill flows running longer than this setting.
// if not set or <= 0, then there's no restriction on running time.
public static final String AZKABAN_MAX_FLOW_RUNNING_MINS = "azkaban.server.flow.max.running.minutes";
// Maximum number of tries to download a dependency (no more retry attempts will be made after this many download failures)
public static final String AZKABAN_DEPENDENCY_MAX_DOWNLOAD_TRIES = "azkaban.dependency.max.download.tries";
public static final String AZKABAN_DEPENDENCY_DOWNLOAD_THREADPOOL_SIZE =
"azkaban.dependency.download.threadpool.size";
public static final String AZKABAN_STORAGE_TYPE = "azkaban.storage.type";
public static final String AZKABAN_STORAGE_LOCAL_BASEDIR = "azkaban.storage.local.basedir";
public static final String HADOOP_CONF_DIR_PATH = "hadoop.conf.dir.path";
// This really should be azkaban.storage.hdfs.project_root.uri
public static final String AZKABAN_STORAGE_HDFS_PROJECT_ROOT_URI = "azkaban.storage.hdfs.root.uri";
public static final String AZKABAN_STORAGE_CACHE_DEPENDENCY_ENABLED = "azkaban.storage.cache.dependency.enabled";
public static final String AZKABAN_STORAGE_CACHE_DEPENDENCY_ROOT_URI = "azkaban.storage.cache.dependency_root.uri";
public static final String AZKABAN_STORAGE_ORIGIN_DEPENDENCY_ROOT_URI = "azkaban.storage.origin.dependency_root.uri";
public static final String AZKABAN_KERBEROS_PRINCIPAL = "azkaban.kerberos.principal";
public static final String AZKABAN_KEYTAB_PATH = "azkaban.keytab.path";
public static final String PROJECT_TEMP_DIR = "project.temp.dir";
// Event reporting properties
public static final String AZKABAN_EVENT_REPORTING_CLASS_PARAM =
"azkaban.event.reporting.class";
public static final String AZKABAN_EVENT_REPORTING_ENABLED = "azkaban.event.reporting.enabled";
// Comma separated list of properties to propagate from flow to Event reporter metadata
public static final String AZKABAN_EVENT_REPORTING_PROPERTIES_TO_PROPAGATE = "azkaban.event.reporting.propagateProperties";
public static final String AZKABAN_EVENT_REPORTING_KAFKA_BROKERS =
"azkaban.event.reporting.kafka.brokers";
public static final String AZKABAN_EVENT_REPORTING_KAFKA_TOPIC =
"azkaban.event.reporting.kafka.topic";
public static final String AZKABAN_EVENT_REPORTING_KAFKA_SCHEMA_REGISTRY_URL =
"azkaban.event.reporting.kafka.schema.registry.url";
/*
* The max number of artifacts retained per project.
* Accepted Values:
* - 0 : Save all artifacts. No clean up is done on storage.
* - 1, 2, 3, ... (any +ve integer 'n') : Maintain 'n' latest versions in storage
*
* Note: Having an unacceptable value results in an exception and the service would REFUSE
* to start.
*
* Example:
* a) azkaban.storage.artifact.max.retention=all
* implies save all artifacts
* b) azkaban.storage.artifact.max.retention=3
* implies save latest 3 versions saved in storage.
**/
public static final String AZKABAN_STORAGE_ARTIFACT_MAX_RETENTION = "azkaban.storage.artifact.max.retention";
// enable quartz scheduler and flow trigger if true.
public static final String ENABLE_QUARTZ = "azkaban.server.schedule.enable_quartz";
public static final String CUSTOM_CREDENTIAL_NAME = "azkaban.security.credential";
public static final String OAUTH_CREDENTIAL_NAME = "azkaban.oauth.credential";
public static final String SECURITY_USER_GROUP = "azkaban.security.user.group";
public static final String CSR_KEYSTORE_LOCATION = "azkaban.csr.keystore.location";
// dir to keep dependency plugins
public static final String DEPENDENCY_PLUGIN_DIR = "azkaban.dependency.plugin.dir";
public static final String USE_MULTIPLE_EXECUTORS = "azkaban.use.multiple.executors";
public static final String MAX_CONCURRENT_RUNS_ONEFLOW = "azkaban.max.concurrent.runs.oneflow";
// list of whitelisted flows, with specific max number of concurrent runs. Format:
// <project 1>,<flow 1>,<number>;<project 2>,<flow 2>,<number>
public static final String CONCURRENT_RUNS_ONEFLOW_WHITELIST =
"azkaban.concurrent.runs.oneflow.whitelist";
public static final String WEBSERVER_QUEUE_SIZE = "azkaban.webserver.queue.size";
public static final String ACTIVE_EXECUTOR_REFRESH_IN_MS =
"azkaban.activeexecutor.refresh.milisecinterval";
public static final String ACTIVE_EXECUTOR_REFRESH_IN_NUM_FLOW =
"azkaban.activeexecutor.refresh.flowinterval";
public static final String EXECUTORINFO_REFRESH_MAX_THREADS =
"azkaban.executorinfo.refresh.maxThreads";
public static final String MAX_DISPATCHING_ERRORS_PERMITTED = "azkaban.maxDispatchingErrors";
public static final String EXECUTOR_SELECTOR_FILTERS = "azkaban.executorselector.filters";
public static final String EXECUTOR_SELECTOR_COMPARATOR_PREFIX =
"azkaban.executorselector.comparator.";
public static final String QUEUEPROCESSING_ENABLED = "azkaban.queueprocessing.enabled";
public static final String QUEUE_PROCESSOR_WAIT_IN_MS = "azkaban.queue.processor.wait.in.ms";
public static final String SESSION_TIME_TO_LIVE = "session.time.to.live";
// allowed max number of sessions per user per IP
public static final String MAX_SESSION_NUMBER_PER_IP_PER_USER = "azkaban.session"
+ ".max_number_per_ip_per_user";
// allowed max size of shared project dir (percentage of partition size), e.g 0.8
public static final String PROJECT_CACHE_SIZE_PERCENTAGE =
"azkaban.project_cache_size_percentage_of_disk";
public static final String PROJECT_CACHE_THROTTLE_PERCENTAGE =
"azkaban.project_cache_throttle_percentage";
// how many older versions of project files are kept in DB before deleting them
public static final String PROJECT_VERSION_RETENTION = "project.version.retention";
// number of rows to be displayed on the executions page.
public static final String DISPLAY_EXECUTION_PAGE_SIZE = "azkaban.display.execution_page_size";
// locked flow error message. Parameters passed in are the flow name and project name.
public static final String AZKABAN_LOCKED_FLOW_ERROR_MESSAGE =
"azkaban.locked.flow.error.message";
// flow ramp related setting keys
// Default value to feature enable setting. To be backward compatible, this value === FALSE
public static final String AZKABAN_RAMP_ENABLED = "azkaban.ramp.enabled";
// Due to multiple AzkabanExec Server instance scenario, it will be required to persistent the ramp result into the DB.
// However, Frequent data persistence will sacrifice the performance with limited data accuracy.
// This setting value controls to push result into DB every N finished ramped workflows
public static final String AZKABAN_RAMP_STATUS_PUSH_INTERVAL_MAX = "azkaban.ramp.status.push.interval.max";
// Due to multiple AzkabanExec Server instance, it will be required to persistent the ramp result into the DB.
// However, Frequent data persistence will sacrifice the performance with limited data accuracy.
// This setting value controls to pull result from DB every N new ramped workflows
public static final String AZKABAN_RAMP_STATUS_PULL_INTERVAL_MAX = "azkaban.ramp.status.pull.interval.max";
// A Polling Service can be applied to determine the ramp status synchronization interval.
public static final String AZKABAN_RAMP_STATUS_POLLING_ENABLED = "azkaban.ramp.status.polling.enabled";
public static final String AZKABAN_RAMP_STATUS_POLLING_INTERVAL = "azkaban.ramp.status.polling.interval";
public static final String AZKABAN_RAMP_STATUS_POLLING_CPU_MAX = "azkaban.ramp.status.polling.cpu.max";
public static final String AZKABAN_RAMP_STATUS_POLLING_MEMORY_MIN = "azkaban.ramp.status.polling.memory.min";
public static final String EXECUTION_LOGS_RETENTION_MS = "execution.logs.retention.ms";
public static final String EXECUTION_LOGS_CLEANUP_INTERVAL_SECONDS =
"execution.logs.cleanup.interval.seconds";
public static final String EXECUTION_LOGS_CLEANUP_RECORD_LIMIT =
"execution.logs.cleanup.record.limit";
// Oauth2.0 configuration keys. If missing, no OAuth will be attempted, and the old
// username/password{+2FA} prompt will be given for interactive login:
public static final String OAUTH_PROVIDER_URI_KEY = "oauth.provider_uri"; // where to send user for OAuth flow, e.g.:
// oauth.provider_uri=https://login.microsoftonline.com/tenant-id/oauth2/v2.0/authorize\
// ?client_id=client_id\
// &response_type=code\
// &scope=openid\
// &response_mode=form_post\
// &state={state}\
// &redirect_uri={redirect_uri}
// Strings {state} and {redirect_uri}, if present verbatim in the property value, will be
// substituted at runtime with (URL-encoded) navigation target and OAuth responce handler URIs,
// respectively. See handleOauth() in LoginAbstractServlet.java for details.
public static final String OAUTH_REDIRECT_URI_KEY = "oauth.redirect_uri"; // how OAuth calls us back, e.g.:
// oauth.redirect_uri=http://localhost:8081/?action=oauth_callback
// By default job props always win over flow override props.
// If this flag is set to true, then override props override also override existing job props.
public static final String EXECUTOR_PROPS_RESOLVE_OVERRIDE_EXISTING_ENABLED =
"executor.props.resolve.overrideExisting.enabled";
// Executor client TLS properties
public static final String EXECUTOR_CLIENT_TLS_ENABLED = "azkaban.executor.client.tls.enabled";
public static final String EXECUTOR_CLIENT_TRUSTSTORE_PATH = "azkaban.executor.client.truststore";
public static final String EXECUTOR_CLIENT_TRUSTSTORE_PASSWORD = "azkaban.executor.client.trustpassword";
public static final String AZKABAN_EXECUTOR_REVERSE_PROXY_ENABLED =
"azkaban.executor.reverse.proxy.enabled";
public static final String AZKABAN_EXECUTOR_REVERSE_PROXY_HOSTNAME =
"azkaban.executor.reverse.proxy.hostname";
public static final String AZKABAN_EXECUTOR_REVERSE_PROXY_PORT =
"azkaban.executor.reverse.proxy.port";
// Job callback
public static final String AZKABAN_EXECUTOR_JOBCALLBACK_ENABLED =
"azkaban.executor.jobcallback.enabled";
}
public static class FlowProperties {
// Basic properties of flows as set by the executor server
public static final String AZKABAN_FLOW_PROJECT_NAME = "azkaban.flow.projectname";
public static final String AZKABAN_FLOW_FLOW_ID = "azkaban.flow.flowid";
public static final String AZKABAN_FLOW_SUBMIT_USER = "azkaban.flow.submituser";
public static final String AZKABAN_FLOW_EXEC_ID = "azkaban.flow.execid";
public static final String AZKABAN_FLOW_PROJECT_VERSION = "azkaban.flow.projectversion";
}
public static class JobProperties {
// Job property that enables/disables using Kafka logging of user job logs
public static final String AZKABAN_JOB_LOGGING_KAFKA_ENABLE = "azkaban.job.logging.kafka.enable";
/*
* this parameter is used to replace EXTRA_HCAT_LOCATION that could fail when one of the uris is not available.
* EXTRA_HCAT_CLUSTERS has the following format:
* other_hcat_clusters = "thrift://hcat1:port,thrift://hcat2:port;thrift://hcat3:port,thrift://hcat4:port"
* Each string in the parenthesis is regarded as a "cluster", and we will get a delegation token from each cluster.
* The uris(hcat servers) in a "cluster" ensures HA is provided.
**/
public static final String EXTRA_HCAT_CLUSTERS = "azkaban.job.hive.other_hcat_clusters";
/*
* the settings to be defined by user indicating if there are hcat locations other than the
* default one the system should pre-fetch hcat token from. Note: Multiple thrift uris are
* supported, use comma to separate the values, values are case insensitive.
**/
// Use EXTRA_HCAT_CLUSTERS instead
@Deprecated
public static final String EXTRA_HCAT_LOCATION = "other_hcat_location";
// If true, AZ will fetches the jobs' certificate from remote Certificate Authority.
public static final String ENABLE_JOB_SSL = "azkaban.job.enable.ssl";
// If true, AZ will fetch OAuth token from credential provider
public static final String ENABLE_OAUTH = "azkaban.enable.oauth";
// Job properties that indicate maximum memory size
public static final String JOB_MAX_XMS = "job.max.Xms";
public static final String MAX_XMS_DEFAULT = "1G";
public static final String JOB_MAX_XMX = "job.max.Xmx";
public static final String MAX_XMX_DEFAULT = "2G";
// The hadoop user the job should run under. If not specified, it will default to submit user.
public static final String USER_TO_PROXY = "user.to.proxy";
/**
* Format string for Log4j's EnhancedPatternLayout
*/
public static final String JOB_LOG_LAYOUT = "azkaban.job.log.layout";
}
public static class JobCallbackProperties {
public static final String JOBCALLBACK_CONNECTION_REQUEST_TIMEOUT = "jobcallback.connection.request.timeout";
public static final String JOBCALLBACK_CONNECTION_TIMEOUT = "jobcallback.connection.timeout";
public static final String JOBCALLBACK_SOCKET_TIMEOUT = "jobcallback.socket.timeout";
public static final String JOBCALLBACK_RESPONSE_WAIT_TIMEOUT = "jobcallback.response.wait.timeout";
public static final String JOBCALLBACK_THREAD_POOL_SIZE = "jobcallback.thread.pool.size";
}
public static class FlowTriggerProps {
// Flow trigger props
public static final String SCHEDULE_TYPE = "type";
public static final String CRON_SCHEDULE_TYPE = "cron";
public static final String SCHEDULE_VALUE = "value";
public static final String DEP_NAME = "name";
// Flow trigger dependency run time props
public static final String START_TIME = "startTime";
public static final String TRIGGER_INSTANCE_ID = "triggerInstanceId";
}
public static class PluginManager {
public static final String JOBTYPE_DEFAULTDIR = "plugins/jobtypes";
public static final String RAMPPOLICY_DEFAULTDIR = "plugins/ramppolicies";
// need jars.to.include property, will be loaded with user property
public static final String CONFFILE = "plugin.properties";
// not exposed to users
public static final String SYSCONFFILE = "private.properties";
// common properties for multiple plugins
public static final String COMMONCONFFILE = "common.properties";
// common private properties for multiple plugins
public static final String COMMONSYSCONFFILE = "commonprivate.properties";
// mapping for the jobType to default proxy user
public static final String DEFAULT_PROXY_USERS_FILE = "default-proxy-users.properties";
// allowed jobType classes for default proxy user
public static final String DEFAULT_PROXY_USERS_JOBTYPE_CLASSES = "default.proxyusers.jobtype"
+ ".classes";
// users not allowed as default proxy user
public static final String DEFAULT_PROXY_USERS_FILTER = "default.proxyusers.filter";
}
public static class ContainerizedDispatchManagerProperties {
public static final String AZKABAN_CONTAINERIZED_PREFIX = "azkaban.containerized.";
public static final String CONTAINERIZED_IMPL_TYPE = AZKABAN_CONTAINERIZED_PREFIX + "impl.type";
public static final String CONTAINERIZED_EXECUTION_BATCH_ENABLED =
AZKABAN_CONTAINERIZED_PREFIX + "execution.batch.enabled";
public static final String CONTAINERIZED_EXECUTION_BATCH_SIZE = AZKABAN_CONTAINERIZED_PREFIX +
"execution.batch.size";
public static final String CONTAINERIZED_EXECUTION_PROCESSING_THREAD_POOL_SIZE =
AZKABAN_CONTAINERIZED_PREFIX + "execution.processing.thread.pool.size";
public static final String CONTAINERIZED_CREATION_RATE_LIMIT =
AZKABAN_CONTAINERIZED_PREFIX + "creation.rate.limit";
public static final String CONTAINERIZED_RAMPUP =
AZKABAN_CONTAINERIZED_PREFIX + "rampup";
public static final String CONTAINERIZED_JOBTYPE_ALLOWLIST =
AZKABAN_CONTAINERIZED_PREFIX + "jobtype.allowlist";
public static final String CONTAINERIZED_PROXY_USER_DENYLIST =
AZKABAN_CONTAINERIZED_PREFIX + "proxy.user.denylist";
// Kubernetes related properties
public static final String AZKABAN_KUBERNETES_PREFIX = "azkaban.kubernetes.";
public static final String KUBERNETES_NAMESPACE = AZKABAN_KUBERNETES_PREFIX + "namespace";
public static final String KUBERNETES_KUBE_CONFIG_PATH = AZKABAN_KUBERNETES_PREFIX +
"kube.config.path";
// Kubernetes pod related properties
public static final String KUBERNETES_POD_PREFIX = AZKABAN_KUBERNETES_PREFIX + "pod.";
public static final String KUBERNETES_POD_NAME_PREFIX = KUBERNETES_POD_PREFIX + "name.prefix";
public static final String KUBERNETES_POD_NSCD_SOCKET_VOLUME_MOUNT_PATH =
KUBERNETES_POD_PREFIX + "nscd.socket.volume.mount.path";
public static final String KUBERNETES_POD_NSCD_SOCKET_HOST_PATH =
KUBERNETES_POD_PREFIX + "nscd.socket.host.path";
public static final String KUBERNETES_POD_NSCD_MOUNT_READ_ONLY =
KUBERNETES_POD_PREFIX + "nscd.mount.read.only";
public static final String KUBERNETES_POD_AZKABAN_BASE_IMAGE_NAME = AZKABAN_KUBERNETES_PREFIX +
"azkaban-base.image.name";
public static final String KUBERNETES_POD_AZKABAN_CONFIG_IMAGE_NAME =
AZKABAN_KUBERNETES_PREFIX + "azkaban-config.image.name";
// Kubernetes flow container related properties
public static final String KUBERNETES_FLOW_CONTAINER_PREFIX = AZKABAN_KUBERNETES_PREFIX +
"flow.container.";
public static final String KUBERNETES_FLOW_CONTAINER_NAME =
KUBERNETES_FLOW_CONTAINER_PREFIX + ".name";
public static final String KUBERNETES_FLOW_CONTAINER_CPU_LIMIT =
KUBERNETES_FLOW_CONTAINER_PREFIX +
"cpu.limit";
public static final String KUBERNETES_FLOW_CONTAINER_CPU_REQUEST =
KUBERNETES_FLOW_CONTAINER_PREFIX +
"cpu.request";
public static final String KUBERNETES_FLOW_CONTAINER_MEMORY_LIMIT =
KUBERNETES_FLOW_CONTAINER_PREFIX +
"memory.limit";
public static final String KUBERNETES_FLOW_CONTAINER_MEMORY_REQUEST =
KUBERNETES_FLOW_CONTAINER_PREFIX + "memory.request";
public static final String KUBERNETES_FLOW_CONTAINER_SECRET_NAME =
KUBERNETES_FLOW_CONTAINER_PREFIX + "secret.name";
public static final String KUBERNETES_FLOW_CONTAINER_SECRET_VOLUME =
KUBERNETES_FLOW_CONTAINER_PREFIX + "secret.volume";
public static final String KUBERNETES_FLOW_CONTAINER_SECRET_MOUNTPATH =
KUBERNETES_FLOW_CONTAINER_PREFIX + "secret.mountpath";
public static final String KUBERNETES_INIT_MOUNT_PATH_FOR_JOBTYPES =
KUBERNETES_FLOW_CONTAINER_PREFIX + "init.jobtypes.mount.path";
public static final String KUBERNETES_MOUNT_PATH_FOR_JOBTYPES =
KUBERNETES_FLOW_CONTAINER_PREFIX + "jobtypes.mount.path";
public static final String KUBERNETES_POD_TEMPLATE_PATH =
KUBERNETES_POD_PREFIX + "template.path";
public static final String KUBERNETES_DEPENDENCY_TYPES =
KUBERNETES_FLOW_CONTAINER_PREFIX + "dependencyTypes";
public static final String KUBERNETES_INIT_MOUNT_PATH_FOR_DEPENDENCIES =
KUBERNETES_FLOW_CONTAINER_PREFIX + "init.dependencies.mount.path";
public static final String KUBERNETES_MOUNT_PATH_FOR_DEPENDENCIES =
KUBERNETES_FLOW_CONTAINER_PREFIX + "dependencies.mount.path";
// Kubernetes service related properties
public static final String KUBERNETES_SERVICE_PREFIX = AZKABAN_KUBERNETES_PREFIX + "service.";
public static final String KUBERNETES_SERVICE_REQUIRED = KUBERNETES_SERVICE_PREFIX +
"required";
public static final String KUBERNETES_SERVICE_NAME_PREFIX = KUBERNETES_SERVICE_PREFIX +
"name.prefix";
public static final String KUBERNETES_SERVICE_PORT = KUBERNETES_SERVICE_PREFIX + "port";
public static final String KUBERNETES_SERVICE_CREATION_TIMEOUT_MS = KUBERNETES_SERVICE_PREFIX +
"creation.timeout.ms";
// Kubernetes Watch related properties
public static final String KUBERNETES_WATCH_PREFIX = AZKABAN_KUBERNETES_PREFIX + "watch.";
public static final String KUBERNETES_WATCH_ENABLED = KUBERNETES_WATCH_PREFIX + "enabled";
public static final String KUBERNETES_WATCH_EVENT_CACHE_MAX_ENTRIES =
KUBERNETES_WATCH_PREFIX + "cache.max.entries";
// Periodicity of lookup and cleanup of stale executions.
public static final String CONTAINERIZED_STALE_EXECUTION_CLEANUP_INTERVAL_MIN =
AZKABAN_CONTAINERIZED_PREFIX + "stale.execution.cleanup.interval.min";
public static final String ENV_VERSION_SET_ID = "VERSION_SET_ID";
public static final String ENV_FLOW_EXECUTION_ID = "FLOW_EXECUTION_ID";
public static final String ENV_JAVA_ENABLE_DEBUG = "JAVA_ENABLE_DEBUG";
public static final String ENV_ENABLE_DEV_POD = "ENABLE_DEV_POD";
}
public static class ImageMgmtConstants {
public static final String IMAGE_TYPE = "imageType";
public static final String IMAGE_VERSION = "imageVersion";
public static final String VERSION_STATE = "versionState";
public static final String ID_KEY = "id";
public static final String IMAGE_RAMPUP_PLAN = "imageRampupPlan";
}
public static class FlowParameters {
// Constants for Flow parameters
public static final String FLOW_PARAM_VERSION_SET_ID = "azkaban.version-set.id";
// Constant to enable java remote debug for Flow Container
public static final String FLOW_PARAM_JAVA_ENABLE_DEBUG = "java.enable.debug";
// Constant to enable pod for developer testing
public static final String FLOW_PARAM_ENABLE_DEV_POD = "enable.dev.pod";
// Constant to disable pod cleanup through the kubernetes watch
public static final String FLOW_PARAM_DISABLE_POD_CLEANUP = "disable.pod.cleanup";
// Constant to dispatch execution to Containerization
public static final String FLOW_PARAM_DISPATCH_EXECUTION_TO_CONTAINER = "dispatch.execution.to"
+ ".container";
// Constant for cpu request for flow container
public static final String FLOW_PARAM_FLOW_CONTAINER_CPU_REQUEST = "flow.container.cpu.request";
// Constant for memory request for flow container
public static final String FLOW_PARAM_FLOW_CONTAINER_MEMORY_REQUEST = "flow.container.memory"
+ ".request";
public static final String FLOW_PARAM_POD_ENV_VAR = "pod.env.var.";
// Constant to allow test version to be passed as flow parameter. Passing test version will be
// allowed for Azkaban ADMIN role only
public static final String FLOW_PARAM_ALLOW_IMAGE_TEST_VERSION = "allow.image.test.version";
}
}
| 1 | 22,457 | Lets move the whole string to next line for better readability | azkaban-azkaban | java |
@@ -104,7 +104,7 @@ app.controller('CalendarListController', ['$scope', '$rootScope', '$window', 'Ha
$scope.subscription.newSubscriptionLocked = false;
})
.catch(function() {
- OC.Notification.showTemporary(t('calendar', 'Error saving WebCal-calendar'));
+ OC.Notification.showTemporary(t('calendar', 'Could not save WebCal-calendar'));
$scope.subscription.newSubscriptionLocked = false;
});
}).catch(function(reason) { | 1 | /**
* Calendar App
*
* @author Raghu Nayyar
* @author Georg Ehrke
* @copyright 2016 Raghu Nayyar <[email protected]>
* @copyright 2016 Georg Ehrke <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
* License as published by the Free Software Foundation; either
* version 3 of the License, or any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU AFFERO GENERAL PUBLIC LICENSE for more details.
*
* You should have received a copy of the GNU Affero General Public
* License along with this library. If not, see <http://www.gnu.org/licenses/>.
*
*/
/**
* Controller: CalendarListController
* Description: Takes care of CalendarList in App Navigation.
*/
app.controller('CalendarListController', ['$scope', '$rootScope', '$window', 'HashService', 'CalendarService', 'WebCalService', 'is', 'CalendarListItem', 'Calendar', 'MailerService', 'ColorUtility', 'isSharingAPI', 'constants',
function ($scope, $rootScope, $window, HashService, CalendarService, WebCalService, is, CalendarListItem, Calendar, MailerService, ColorUtility, isSharingAPI, constants) {
'use strict';
$scope.calendarListItems = [];
$scope.is = is;
$scope.newCalendarInputVal = '';
$scope.newCalendarColorVal = '';
$scope.subscription = {};
$scope.subscription.newSubscriptionUrl = '';
$scope.subscription.newSubscriptionLocked = false;
$scope.publicdav = 'CalDAV';
$scope.publicdavdesc = t('calendar', 'CalDAV address for clients');
$scope.isSharingAPI = isSharingAPI;
$scope.canSharePublicLink = constants.canSharePublicLink;
$scope.$watchCollection('calendars', function(newCalendars, oldCalendars) {
newCalendars = newCalendars || [];
oldCalendars = oldCalendars || [];
newCalendars.filter(function(calendar) {
return oldCalendars.indexOf(calendar) === -1;
}).forEach(function(calendar) {
const item = CalendarListItem(calendar);
if (item) {
$scope.calendarListItems.push(item);
$scope.publicdavurl = $scope.$parent.calendars[0].caldav;
calendar.register(Calendar.hookFinishedRendering, function() {
if (!$scope.$$phase) {
$scope.$apply();
}
});
}
});
oldCalendars.filter(function(calendar) {
return newCalendars.indexOf(calendar) === -1;
}).forEach(function(calendar) {
$scope.calendarListItems = $scope.calendarListItems.filter(function(itemToCheck) {
return itemToCheck.calendar !== calendar;
});
});
});
$scope.create = function (name, color) {
CalendarService.create(name, color).then(function(calendar) {
$scope.calendars.push(calendar);
$rootScope.$broadcast('createdCalendar', calendar);
$rootScope.$broadcast('reloadCalendarList');
});
$scope.newCalendarInputVal = '';
$scope.newCalendarColorVal = '';
angular.element('#new-calendar-button').click();
};
$scope.createSubscription = function(url) {
$scope.subscription.newSubscriptionLocked = true;
WebCalService.get(url).then(function(splittedICal) {
const color = splittedICal.color || ColorUtility.randomColor();
let name = splittedICal.name || url;
if (name.length > 100) {
name = name.substr(0, 100);
}
CalendarService.createWebCal(name, color, url)
.then(function(calendar) {
angular.element('#new-subscription-button').click();
$scope.calendars.push(calendar);
$scope.subscription.newSubscriptionUrl = '';
$scope.$digest();
$scope.$parent.$digest();
$scope.subscription.newSubscriptionLocked = false;
})
.catch(function() {
OC.Notification.showTemporary(t('calendar', 'Error saving WebCal-calendar'));
$scope.subscription.newSubscriptionLocked = false;
});
}).catch(function(reason) {
if (reason.error) {
OC.Notification.showTemporary(reason.message);
$scope.subscription.newSubscriptionLocked = false;
} else if(reason.redirect) {
$scope.createSubscription(reason.new_url);
}
});
};
$scope.download = function (item) {
$window.open(item.calendar.downloadUrl);
};
$scope.integration = function (item) {
return '<iframe width="400" height="215" src="' + item.publicEmbedURL + '"></iframe>';
};
$scope.$watch('publicdav', function (newvalue) {
if ($scope.$parent.calendars[0]) {
if (newvalue === 'CalDAV') { // CalDAV address
$scope.publicdavurl = $scope.$parent.calendars[0].caldav;
$scope.publicdavdesc = t('calendar', 'CalDAV address for clients');
} else { // WebDAV address
var url = $scope.$parent.calendars[0].url;
// cut off last slash to have a fancy name for the ics
if (url.slice(url.length - 1) === '/') {
url = url.slice(0, url.length - 1);
}
url += '?export';
$scope.publicdavurl = $window.location.origin + url;
$scope.publicdavdesc = t('calendar', 'WebDAV address for subscriptions');
}
}
});
$scope.sendMail = function (item) {
item.toggleSendingMail();
MailerService.sendMail(item.email, item.publicSharingURL, item.calendar.displayname).then(function (response) {
if (response.status === 200) {
item.email = '';
OC.Notification.showTemporary(t('calendar', 'Email has been sent.'));
} else {
OC.Notification.showTemporary(t('calendar', 'There was an issue while sending your email.'));
}
});
};
$scope.goPublic = function (item) {
$window.open(item.publicSharingURL);
};
$scope.toggleSharesEditor = function (calendar) {
calendar.toggleSharesEditor();
};
$scope.togglePublish = function(item) {
if (item.calendar.published) {
item.calendar.publish().then(function (response) {
if (response) {
CalendarService.get(item.calendar.url).then(function (calendar) {
item.calendar.publicToken = calendar.publicToken;
item.calendar.published = true;
});
}
$scope.$apply();
});
} else {
item.calendar.unpublish().then(function (response) {
if (response) {
item.calendar.published = false;
}
$scope.$apply();
});
}
};
$scope.prepareUpdate = function (calendar) {
calendar.prepareUpdate();
};
$scope.onSelectSharee = function (item, model, label, calendarItem) {
const calendar = calendarItem.calendar;
// Create a default share with the user/group, read only
calendar.share(item.type, item.identifier, item.displayname, false, false).then(function() {
// Remove content from text box
calendarItem.selectedSharee = '';
$scope.$apply();
});
};
$scope.updateExistingUserShare = function(calendar, userId, displayname, writable) {
calendar.share(constants.SHARE_TYPE_USER, userId, displayname, writable, true).then(function() {
$scope.$apply();
});
};
$scope.updateExistingGroupShare = function(calendar, groupId, displayname, writable) {
calendar.share(constants.SHARE_TYPE_GROUP, groupId, displayname, writable, true).then(function() {
$scope.$apply();
});
};
$scope.unshareFromUser = function(calendar, userId) {
calendar.unshare(constants.SHARE_TYPE_USER, userId).then(function() {
$scope.$apply();
});
};
$scope.unshareFromGroup = function(calendar, groupId) {
calendar.unshare(constants.SHARE_TYPE_GROUP, groupId).then(function() {
$scope.$apply();
});
};
$scope.findSharee = function (val, calendar) {
return $.get(
OC.linkToOCS('apps/files_sharing/api/v1') + 'sharees',
{
format: 'json',
search: val.trim(),
perPage: 200,
itemType: 'principals'
}
).then(function(result) {
var users = result.ocs.data.exact.users.concat(result.ocs.data.users);
var groups = result.ocs.data.exact.groups.concat(result.ocs.data.groups);
var userShares = calendar.shares.users;
var groupShares = calendar.shares.groups;
var userSharesLength = userShares.length;
var groupSharesLength = groupShares.length;
var i, j;
// Filter out current user
var usersLength = users.length;
for (i = 0 ; i < usersLength; i++) {
if (users[i].value.shareWith === OC.currentUser) {
users.splice(i, 1);
break;
}
}
// Now filter out all sharees that are already shared with
for (i = 0; i < userSharesLength; i++) {
var share = userShares[i];
usersLength = users.length;
for (j = 0; j < usersLength; j++) {
if (users[j].value.shareWith === share.id) {
users.splice(j, 1);
break;
}
}
}
// Combine users and groups
users = users.map(function(item){
return {
display: item.label,
displayname: item.label,
type: constants.SHARE_TYPE_USER,
identifier: item.value.shareWith
};
});
groups = groups.map(function(item){
return {
display: item.label + ' (' + t('calendar', 'group') + ')',
displayname: item.label,
type: constants.SHARE_TYPE_GROUP,
identifier: item.value.shareWith
};
});
return groups.concat(users);
});
};
$scope.performUpdate = function (item) {
item.saveEditor();
item.calendar.update().then(function() {
$rootScope.$broadcast('updatedCalendar', item.calendar);
$rootScope.$broadcast('reloadCalendarList');
});
};
/**
* Updates the shares of the calendar
*/
$scope.performUpdateShares = function (calendar) {
calendar.update().then(function() {
calendar.dropPreviousState();
calendar.list.edit = false;
$rootScope.$broadcast('updatedCalendar', calendar);
$rootScope.$broadcast('reloadCalendarList');
});
};
$scope.triggerEnable = function(item) {
item.calendar.toggleEnabled();
item.calendar.update().then(function() {
$rootScope.$broadcast('updatedCalendarsVisibility', item.calendar);
$rootScope.$broadcast('reloadCalendarList');
});
};
$scope.remove = function (item) {
item.calendar.delete().then(function() {
$scope.$parent.calendars = $scope.$parent.calendars.filter(function(elem) {
return elem !== item.calendar;
});
if (!$scope.$$phase) {
$scope.$apply();
}
});
};
$rootScope.$on('reloadCalendarList', function() {
if (!$scope.$$phase) {
$scope.$apply();
}
});
HashService.runIfApplicable('subscribe_to_webcal', (map) => {
if (map.has('url')) {
const url = map.get('url');
$scope.subscription.newSubscriptionUrl = url;
$scope.subscription.newSubscriptionLocked = true;
angular.element('#new-subscription-button').click();
// wait for calendars to be initialized
// needed for creating a proper url
$scope.calendarsPromise.then(() => {
$scope.createSubscription(url);
});
}
});
}
]);
| 1 | 6,268 | Please change this back to `Error saving WebCal-calendar` (and `Error saving WebCal-calendar` only) | nextcloud-calendar | js |
@@ -284,7 +284,6 @@ function getDefaultService() {
Options.prototype.CAPABILITY_KEY = 'goog:chromeOptions'
Options.prototype.BROWSER_NAME_VALUE = Browser.CHROME
Driver.getDefaultService = getDefaultService
-Driver.prototype.VENDOR_COMMAND_PREFIX = 'goog'
// PUBLIC API
exports.Driver = Driver | 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
/**
* @fileoverview Defines a {@linkplain Driver WebDriver} client for the Chrome
* web browser. Before using this module, you must download the latest
* [ChromeDriver release] and ensure it can be found on your system [PATH].
*
* There are three primary classes exported by this module:
*
* 1. {@linkplain ServiceBuilder}: configures the
* {@link selenium-webdriver/remote.DriverService remote.DriverService}
* that manages the [ChromeDriver] child process.
*
* 2. {@linkplain Options}: defines configuration options for each new Chrome
* session, such as which {@linkplain Options#setProxy proxy} to use,
* what {@linkplain Options#addExtensions extensions} to install, or
* what {@linkplain Options#addArguments command-line switches} to use when
* starting the browser.
*
* 3. {@linkplain Driver}: the WebDriver client; each new instance will control
* a unique browser session with a clean user profile (unless otherwise
* configured through the {@link Options} class).
*
* __Headless Chrome__ <a id="headless"></a>
*
* To start Chrome in headless mode, simply call
* {@linkplain Options#headless Options.headless()}.
*
* let chrome = require('selenium-webdriver/chrome');
* let {Builder} = require('selenium-webdriver');
*
* let driver = new Builder()
* .forBrowser('chrome')
* .setChromeOptions(new chrome.Options().headless())
* .build();
*
* __Customizing the ChromeDriver Server__ <a id="custom-server"></a>
*
* By default, every Chrome session will use a single driver service, which is
* started the first time a {@link Driver} instance is created and terminated
* when this process exits. The default service will inherit its environment
* from the current process and direct all output to /dev/null. You may obtain
* a handle to this default service using
* {@link #getDefaultService getDefaultService()} and change its configuration
* with {@link #setDefaultService setDefaultService()}.
*
* You may also create a {@link Driver} with its own driver service. This is
* useful if you need to capture the server's log output for a specific session:
*
* let chrome = require('selenium-webdriver/chrome');
*
* let service = new chrome.ServiceBuilder()
* .loggingTo('/my/log/file.txt')
* .enableVerboseLogging()
* .build();
*
* let options = new chrome.Options();
* // configure browser options ...
*
* let driver = chrome.Driver.createSession(options, service);
*
* Users should only instantiate the {@link Driver} class directly when they
* need a custom driver service configuration (as shown above). For normal
* operation, users should start Chrome using the
* {@link selenium-webdriver.Builder}.
*
* __Working with Android__ <a id="android"></a>
*
* The [ChromeDriver][android] supports running tests on the Chrome browser as
* well as [WebView apps][webview] starting in Android 4.4 (KitKat). In order to
* work with Android, you must first start the adb
*
* adb start-server
*
* By default, adb will start on port 5037. You may change this port, but this
* will require configuring a [custom server](#custom-server) that will connect
* to adb on the {@linkplain ServiceBuilder#setAdbPort correct port}:
*
* let service = new chrome.ServiceBuilder()
* .setAdbPort(1234)
* build();
* // etc.
*
* The ChromeDriver may be configured to launch Chrome on Android using
* {@link Options#androidChrome()}:
*
* let driver = new Builder()
* .forBrowser('chrome')
* .setChromeOptions(new chrome.Options().androidChrome())
* .build();
*
* Alternatively, you can configure the ChromeDriver to launch an app with a
* Chrome-WebView by setting the {@linkplain Options#androidActivity
* androidActivity} option:
*
* let driver = new Builder()
* .forBrowser('chrome')
* .setChromeOptions(new chrome.Options()
* .androidPackage('com.example')
* .androidActivity('com.example.Activity'))
* .build();
*
* [Refer to the ChromeDriver site] for more information on using the
* [ChromeDriver with Android][android].
*
* [ChromeDriver]: https://chromedriver.chromium.org/
* [ChromeDriver release]: http://chromedriver.storage.googleapis.com/index.html
* [PATH]: http://en.wikipedia.org/wiki/PATH_%28variable%29
* [android]: https://chromedriver.chromium.org/getting-started/getting-started---android
* [webview]: https://developer.chrome.com/multidevice/webview/overview
*/
'use strict'
const io = require('./io')
const { Browser } = require('./lib/capabilities')
const chromium = require('./chromium')
/**
* Name of the ChromeDriver executable.
* @type {string}
* @const
*/
const CHROMEDRIVER_EXE =
process.platform === 'win32' ? 'chromedriver.exe' : 'chromedriver'
/** @type {remote.DriverService} */
let defaultService = null
/**
* Creates {@link selenium-webdriver/remote.DriverService} instances that manage
* a [ChromeDriver](https://chromedriver.chromium.org/)
* server in a child process.
*/
class ServiceBuilder extends chromium.ServiceBuilder {
/**
* @param {string=} opt_exe Path to the server executable to use. If omitted,
* the builder will attempt to locate the chromedriver on the current
* PATH.
* @throws {Error} If provided executable does not exist, or the chromedriver
* cannot be found on the PATH.
*/
constructor(opt_exe) {
let exe = opt_exe || locateSynchronously()
if (!exe) {
throw Error(
`The ChromeDriver could not be found on the current PATH. Please ` +
`download the latest version of the ChromeDriver from ` +
`http://chromedriver.storage.googleapis.com/index.html and ensure ` +
`it can be found on your PATH.`
)
}
super(exe)
}
}
/**
* Class for managing ChromeDriver specific options.
*/
class Options extends chromium.Options {
/**
* Sets the path to the Chrome binary to use. On Mac OS X, this path should
* reference the actual Chrome executable, not just the application binary
* (e.g. "/Applications/Google Chrome.app/Contents/MacOS/Google Chrome").
*
* The binary path be absolute or relative to the chromedriver server
* executable, but it must exist on the machine that will launch Chrome.
*
* @param {string} path The path to the Chrome binary to use.
* @return {!Options} A self reference.
*/
setChromeBinaryPath(path) {
return this.setBinaryPath(path)
}
/**
* Configures the ChromeDriver to launch Chrome on Android via adb. This
* function is shorthand for
* {@link #androidPackage options.androidPackage('com.android.chrome')}.
* @return {!Options} A self reference.
*/
androidChrome() {
return this.androidPackage('com.android.chrome')
}
/**
* Sets the path to Chrome's log file. This path should exist on the machine
* that will launch Chrome.
* @param {string} path Path to the log file to use.
* @return {!Options} A self reference.
*/
setChromeLogFile(path) {
return this.setBrowserLogFile(path)
}
/**
* Sets the directory to store Chrome minidumps in. This option is only
* supported when ChromeDriver is running on Linux.
* @param {string} path The directory path.
* @return {!Options} A self reference.
*/
setChromeMinidumpPath(path) {
return this.setBrowserMinidumpPath(path)
}
}
/**
* Creates a new WebDriver client for Chrome.
*/
class Driver extends chromium.Driver {
/**
* Creates a new session with the ChromeDriver.
*
* @param {(Capabilities|Options)=} opt_config The configuration options.
* @param {(remote.DriverService|http.Executor)=} opt_serviceExecutor Either
* a DriverService to use for the remote end, or a preconfigured executor
* for an externally managed endpoint. If neither is provided, the
* {@linkplain ##getDefaultService default service} will be used by
* default.
* @return {!Driver} A new driver instance.
*/
static createSession(opt_config, opt_serviceExecutor) {
let caps = opt_config || new Options()
return /** @type {!Driver} */ (super.createSession(
caps,
opt_serviceExecutor
))
}
}
/**
* _Synchronously_ attempts to locate the chromedriver executable on the current
* system.
*
* @return {?string} the located executable, or `null`.
*/
function locateSynchronously() {
return io.findInPath(CHROMEDRIVER_EXE, true)
}
/**
* Sets the default service to use for new ChromeDriver instances.
* @param {!remote.DriverService} service The service to use.
* @throws {Error} If the default service is currently running.
*/
function setDefaultService(service) {
if (defaultService && defaultService.isRunning()) {
throw Error(
`The previously configured ChromeDriver service is still running. ` +
`You must shut it down before you may adjust its configuration.`
)
}
defaultService = service
}
/**
* Returns the default ChromeDriver service. If such a service has not been
* configured, one will be constructed using the default configuration for
* a ChromeDriver executable found on the system PATH.
* @return {!remote.DriverService} The default ChromeDriver service.
*/
function getDefaultService() {
if (!defaultService) {
defaultService = new ServiceBuilder().build()
}
return defaultService
}
Options.prototype.CAPABILITY_KEY = 'goog:chromeOptions'
Options.prototype.BROWSER_NAME_VALUE = Browser.CHROME
Driver.getDefaultService = getDefaultService
Driver.prototype.VENDOR_COMMAND_PREFIX = 'goog'
// PUBLIC API
exports.Driver = Driver
exports.Options = Options
exports.ServiceBuilder = ServiceBuilder
exports.getDefaultService = getDefaultService
exports.setDefaultService = setDefaultService
exports.locateSynchronously = locateSynchronously
| 1 | 18,744 | The vendor prefix is still being used on Chromium based browsers like Edge Chromium and Chrome. Did you mean to remove this? | SeleniumHQ-selenium | rb |
@@ -3,14 +3,18 @@ using Datadog.Trace.Logging;
using Datadog.Trace.Logging.LogProviders;
using NLog;
using NLog.Config;
+using NLog.Layouts;
using NLog.Targets;
using Xunit;
namespace Datadog.Trace.Tests.Logging
{
[Collection(nameof(Datadog.Trace.Tests.Logging))]
+ [TestCaseOrderer("Datadog.Trace.TestHelpers.AlphabeticalOrderer", "Datadog.Trace.TestHelpers")]
public class NLogLogProviderTests
{
+ private const string ExpectedStringFormat = "\"{0}\": \"{1}\"";
+
private readonly ILogProvider _logProvider;
private readonly ILog _logger;
private readonly MemoryTarget _target; | 1 | using System.Collections.Generic;
using Datadog.Trace.Logging;
using Datadog.Trace.Logging.LogProviders;
using NLog;
using NLog.Config;
using NLog.Targets;
using Xunit;
namespace Datadog.Trace.Tests.Logging
{
[Collection(nameof(Datadog.Trace.Tests.Logging))]
public class NLogLogProviderTests
{
private readonly ILogProvider _logProvider;
private readonly ILog _logger;
private readonly MemoryTarget _target;
public NLogLogProviderTests()
{
var config = new LoggingConfiguration();
_target = new MemoryTarget
{
Layout = string.Format("${{level:uppercase=true}}|{0}=${{mdc:item={0}}}|{1}=${{mdc:item={1}}}|{2}=${{mdc:item={2}}}|${{message}}", CorrelationIdentifier.SpanIdKey, CorrelationIdentifier.TraceIdKey, LoggingProviderTestHelpers.CustomPropertyName)
};
config.AddTarget("memory", _target);
config.LoggingRules.Add(new LoggingRule("*", NLog.LogLevel.Trace, _target));
LogManager.Configuration = config;
SimpleConfigurator.ConfigureForTargetLogging(_target, NLog.LogLevel.Trace);
_logProvider = new NLogLogProvider();
LogProvider.SetCurrentLogProvider(_logProvider);
_logger = new LoggerExecutionWrapper(_logProvider.GetLogger("test"));
}
[Fact]
public void EnabledLibLogSubscriberAddsTraceData()
{
// Assert that the NLog log provider is correctly being used
Assert.IsType<NLogLogProvider>(LogProvider.CurrentLogProvider);
// Instantiate a tracer for this test with default settings and set LogsInjectionEnabled to TRUE
var tracer = LoggingProviderTestHelpers.InitializeTracer(enableLogsInjection: true);
LoggingProviderTestHelpers.PerformParentChildScopeSequence(tracer, _logger, _logProvider.OpenMappedContext, out var parentScope, out var childScope);
// Filter the logs
List<string> filteredLogs = new List<string>(_target.Logs);
filteredLogs.RemoveAll(log => !log.Contains(LoggingProviderTestHelpers.LogPrefix));
int logIndex = 0;
string logString;
// Scope: Parent scope
// Custom property: N/A
logString = filteredLogs[logIndex++];
Assert.Contains($"{CorrelationIdentifier.SpanIdKey}={parentScope.Span.SpanId}", logString);
Assert.Contains($"{CorrelationIdentifier.TraceIdKey}={parentScope.Span.TraceId}", logString);
Assert.Contains($"{LoggingProviderTestHelpers.CustomPropertyName}=", logString);
// Scope: Parent scope
// Custom property: SET
logString = filteredLogs[logIndex++];
Assert.Contains($"{CorrelationIdentifier.SpanIdKey}={parentScope.Span.SpanId}", logString);
Assert.Contains($"{CorrelationIdentifier.TraceIdKey}={parentScope.Span.TraceId}", logString);
Assert.Contains($"{LoggingProviderTestHelpers.CustomPropertyName}={LoggingProviderTestHelpers.CustomPropertyValue}", logString);
// Scope: Child scope
// Custom property: SET
logString = filteredLogs[logIndex++];
Assert.Contains($"{CorrelationIdentifier.SpanIdKey}={childScope.Span.SpanId}", logString);
Assert.Contains($"{CorrelationIdentifier.TraceIdKey}={childScope.Span.TraceId}", logString);
Assert.Contains($"{LoggingProviderTestHelpers.CustomPropertyName}={LoggingProviderTestHelpers.CustomPropertyValue}", logString);
// Scope: Parent scope
// Custom property: SET
logString = filteredLogs[logIndex++];
Assert.Contains($"{CorrelationIdentifier.SpanIdKey}={parentScope.Span.SpanId}", logString);
Assert.Contains($"{CorrelationIdentifier.TraceIdKey}={parentScope.Span.TraceId}", logString);
Assert.Contains($"{LoggingProviderTestHelpers.CustomPropertyName}={LoggingProviderTestHelpers.CustomPropertyValue}", logString);
// Scope: Parent scope
// Custom property: N/A
logString = filteredLogs[logIndex++];
Assert.Contains($"{CorrelationIdentifier.SpanIdKey}={parentScope.Span.SpanId}", logString);
Assert.Contains($"{CorrelationIdentifier.TraceIdKey}={parentScope.Span.TraceId}", logString);
Assert.Contains($"{LoggingProviderTestHelpers.CustomPropertyName}=", logString);
// Scope: Default values of TraceId=0,SpanId=0
// Custom property: N/A
logString = filteredLogs[logIndex++];
Assert.Contains($"{CorrelationIdentifier.SpanIdKey}=0", logString);
Assert.Contains($"{CorrelationIdentifier.TraceIdKey}=0", logString);
Assert.Contains($"{LoggingProviderTestHelpers.CustomPropertyName}=", logString);
}
[Fact]
public void DisabledLibLogSubscriberDoesNotAddTraceData()
{
// Assert that the NLog log provider is correctly being used
Assert.IsType<NLogLogProvider>(LogProvider.CurrentLogProvider);
// Instantiate a tracer for this test with default settings and set LogsInjectionEnabled to TRUE
var tracer = LoggingProviderTestHelpers.InitializeTracer(enableLogsInjection: false);
LoggingProviderTestHelpers.PerformParentChildScopeSequence(tracer, _logger, _logProvider.OpenMappedContext, out var parentScope, out var childScope);
// Filter the logs
List<string> filteredLogs = new List<string>(_target.Logs);
filteredLogs.RemoveAll(log => !log.Contains(LoggingProviderTestHelpers.LogPrefix));
int logIndex = 0;
string logString;
// Scope: N/A
// Custom property: N/A
logString = filteredLogs[logIndex++];
Assert.Contains($"{CorrelationIdentifier.SpanIdKey}=", logString);
Assert.Contains($"{CorrelationIdentifier.TraceIdKey}=", logString);
Assert.Contains($"{LoggingProviderTestHelpers.CustomPropertyName}=", logString);
// Scope: N/A
// Custom property: SET
logString = filteredLogs[logIndex++];
Assert.Contains($"{CorrelationIdentifier.SpanIdKey}=", logString);
Assert.Contains($"{CorrelationIdentifier.TraceIdKey}=", logString);
Assert.Contains($"{LoggingProviderTestHelpers.CustomPropertyName}={LoggingProviderTestHelpers.CustomPropertyValue}", logString);
// Scope: N/A
// Custom property: SET
logString = filteredLogs[logIndex++];
Assert.Contains($"{CorrelationIdentifier.SpanIdKey}=", logString);
Assert.Contains($"{CorrelationIdentifier.TraceIdKey}=", logString);
Assert.Contains($"{LoggingProviderTestHelpers.CustomPropertyName}={LoggingProviderTestHelpers.CustomPropertyValue}", logString);
// Scope: N/A
// Custom property: SET
logString = filteredLogs[logIndex++];
Assert.Contains($"{CorrelationIdentifier.SpanIdKey}=", logString);
Assert.Contains($"{CorrelationIdentifier.TraceIdKey}=", logString);
Assert.Contains($"{LoggingProviderTestHelpers.CustomPropertyName}={LoggingProviderTestHelpers.CustomPropertyValue}", logString);
// Scope: N/A
// Custom property: N/A
logString = filteredLogs[logIndex++];
Assert.Contains($"{CorrelationIdentifier.SpanIdKey}=", logString);
Assert.Contains($"{CorrelationIdentifier.TraceIdKey}=", logString);
Assert.Contains($"{LoggingProviderTestHelpers.CustomPropertyName}=", logString);
// Scope: N/A
// Custom property: N/A
logString = filteredLogs[logIndex++];
Assert.Contains($"{CorrelationIdentifier.SpanIdKey}=", logString);
Assert.Contains($"{CorrelationIdentifier.TraceIdKey}=", logString);
Assert.Contains($"{LoggingProviderTestHelpers.CustomPropertyName}=", logString);
}
}
}
| 1 | 15,839 | Not ideal, but I used a test case orderer so I could avoid a bug that occurs when running two tracer's sequentially with different DD_LOGS_INJECTION settings. | DataDog-dd-trace-dotnet | .cs |
@@ -129,6 +129,13 @@ func (p *Packer) Size() uint {
return p.bytes
}
+// HeaderFull returns true if the pack header is at maximum capacity.
+func (p *Packer) HeaderFull() bool {
+ p.m.Lock()
+ defer p.m.Unlock()
+ return EntrySize+crypto.Extension+(uint(headerLengthSize)*uint(len(p.blobs)-1)) <= uint(maxHeaderSize)
+}
+
// Count returns the number of blobs in this packer.
func (p *Packer) Count() int {
p.m.Lock() | 1 | package pack
import (
"encoding/binary"
"fmt"
"io"
"sync"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/crypto"
)
// Packer is used to create a new Pack.
type Packer struct {
blobs []restic.Blob
bytes uint
k *crypto.Key
wr io.Writer
m sync.Mutex
}
// NewPacker returns a new Packer that can be used to pack blobs together.
func NewPacker(k *crypto.Key, wr io.Writer) *Packer {
return &Packer{k: k, wr: wr}
}
// Add saves the data read from rd as a new blob to the packer. Returned is the
// number of bytes written to the pack.
func (p *Packer) Add(t restic.BlobType, id restic.ID, data []byte) (int, error) {
p.m.Lock()
defer p.m.Unlock()
c := restic.Blob{BlobHandle: restic.BlobHandle{Type: t, ID: id}}
n, err := p.wr.Write(data)
c.Length = uint(n)
c.Offset = p.bytes
p.bytes += uint(n)
p.blobs = append(p.blobs, c)
return n, errors.Wrap(err, "Write")
}
var EntrySize = uint(binary.Size(restic.BlobType(0)) + headerLengthSize + len(restic.ID{}))
// headerEntry describes the format of header entries. It serves only as
// documentation.
type headerEntry struct {
Type uint8
Length uint32
ID restic.ID
}
// Finalize writes the header for all added blobs and finalizes the pack.
// Returned are the number of bytes written, including the header.
func (p *Packer) Finalize() (uint, error) {
p.m.Lock()
defer p.m.Unlock()
bytesWritten := p.bytes
header, err := p.makeHeader()
if err != nil {
return 0, err
}
encryptedHeader := make([]byte, 0, len(header)+p.k.Overhead()+p.k.NonceSize())
nonce := crypto.NewRandomNonce()
encryptedHeader = append(encryptedHeader, nonce...)
encryptedHeader = p.k.Seal(encryptedHeader, nonce, header, nil)
// append the header
n, err := p.wr.Write(encryptedHeader)
if err != nil {
return 0, errors.Wrap(err, "Write")
}
hdrBytes := restic.CiphertextLength(len(header))
if n != hdrBytes {
return 0, errors.New("wrong number of bytes written")
}
bytesWritten += uint(hdrBytes)
// write length
err = binary.Write(p.wr, binary.LittleEndian, uint32(restic.CiphertextLength(len(p.blobs)*int(EntrySize))))
if err != nil {
return 0, errors.Wrap(err, "binary.Write")
}
bytesWritten += uint(binary.Size(uint32(0)))
p.bytes = uint(bytesWritten)
return bytesWritten, nil
}
// makeHeader constructs the header for p.
func (p *Packer) makeHeader() ([]byte, error) {
buf := make([]byte, 0, len(p.blobs)*int(EntrySize))
for _, b := range p.blobs {
switch b.Type {
case restic.DataBlob:
buf = append(buf, 0)
case restic.TreeBlob:
buf = append(buf, 1)
default:
return nil, errors.Errorf("invalid blob type %v", b.Type)
}
var lenLE [4]byte
binary.LittleEndian.PutUint32(lenLE[:], uint32(b.Length))
buf = append(buf, lenLE[:]...)
buf = append(buf, b.ID[:]...)
}
return buf, nil
}
// Size returns the number of bytes written so far.
func (p *Packer) Size() uint {
p.m.Lock()
defer p.m.Unlock()
return p.bytes
}
// Count returns the number of blobs in this packer.
func (p *Packer) Count() int {
p.m.Lock()
defer p.m.Unlock()
return len(p.blobs)
}
// Blobs returns the slice of blobs that have been written.
func (p *Packer) Blobs() []restic.Blob {
p.m.Lock()
defer p.m.Unlock()
return p.blobs
}
func (p *Packer) String() string {
return fmt.Sprintf("<Packer %d blobs, %d bytes>", len(p.blobs), p.bytes)
}
var (
// we require at least one entry in the header, and one blob for a pack file
minFileSize = EntrySize + crypto.Extension + uint(headerLengthSize)
)
const (
// size of the header-length field at the end of the file; it is a uint32
headerLengthSize = 4
// HeaderSize is the header's constant overhead (independent of #entries)
HeaderSize = headerLengthSize + crypto.Extension
maxHeaderSize = 16 * 1024 * 1024
// number of header enries to download as part of header-length request
eagerEntries = 15
)
// readRecords reads up to max records from the underlying ReaderAt, returning
// the raw header, the total number of records in the header, and any error.
// If the header contains fewer than max entries, the header is truncated to
// the appropriate size.
func readRecords(rd io.ReaderAt, size int64, max int) ([]byte, int, error) {
var bufsize int
bufsize += max * int(EntrySize)
bufsize += crypto.Extension
bufsize += headerLengthSize
if bufsize > int(size) {
bufsize = int(size)
}
b := make([]byte, bufsize)
off := size - int64(bufsize)
if _, err := rd.ReadAt(b, off); err != nil {
return nil, 0, err
}
hlen := binary.LittleEndian.Uint32(b[len(b)-headerLengthSize:])
b = b[:len(b)-headerLengthSize]
debug.Log("header length: %v", hlen)
var err error
switch {
case hlen == 0:
err = InvalidFileError{Message: "header length is zero"}
case hlen < crypto.Extension:
err = InvalidFileError{Message: "header length is too small"}
case (hlen-crypto.Extension)%uint32(EntrySize) != 0:
err = InvalidFileError{Message: "header length is invalid"}
case int64(hlen) > size-int64(headerLengthSize):
err = InvalidFileError{Message: "header is larger than file"}
case int64(hlen) > maxHeaderSize:
err = InvalidFileError{Message: "header is larger than maxHeaderSize"}
}
if err != nil {
return nil, 0, errors.Wrap(err, "readHeader")
}
total := (int(hlen) - crypto.Extension) / int(EntrySize)
if total < max {
// truncate to the beginning of the pack header
b = b[len(b)-int(hlen):]
}
return b, total, nil
}
// readHeader reads the header at the end of rd. size is the length of the
// whole data accessible in rd.
func readHeader(rd io.ReaderAt, size int64) ([]byte, error) {
debug.Log("size: %v", size)
if size < int64(minFileSize) {
err := InvalidFileError{Message: "file is too small"}
return nil, errors.Wrap(err, "readHeader")
}
// assuming extra request is significantly slower than extra bytes download,
// eagerly download eagerEntries header entries as part of header-length request.
// only make second request if actual number of entries is greater than eagerEntries
b, c, err := readRecords(rd, size, eagerEntries)
if err != nil {
return nil, err
}
if c <= eagerEntries {
// eager read sufficed, return what we got
return b, nil
}
b, _, err = readRecords(rd, size, c)
if err != nil {
return nil, err
}
return b, nil
}
// InvalidFileError is return when a file is found that is not a pack file.
type InvalidFileError struct {
Message string
}
func (e InvalidFileError) Error() string {
return e.Message
}
// List returns the list of entries found in a pack file and the length of the
// header (including header size and crypto overhead)
func List(k *crypto.Key, rd io.ReaderAt, size int64) (entries []restic.Blob, hdrSize uint32, err error) {
buf, err := readHeader(rd, size)
if err != nil {
return nil, 0, err
}
if len(buf) < k.NonceSize()+k.Overhead() {
return nil, 0, errors.New("invalid header, too small")
}
hdrSize = headerLengthSize + uint32(len(buf))
nonce, buf := buf[:k.NonceSize()], buf[k.NonceSize():]
buf, err = k.Open(buf[:0], nonce, buf, nil)
if err != nil {
return nil, 0, err
}
entries = make([]restic.Blob, 0, uint(len(buf))/EntrySize)
pos := uint(0)
for len(buf) > 0 {
entry, err := parseHeaderEntry(buf)
if err != nil {
return nil, 0, err
}
entry.Offset = pos
entries = append(entries, entry)
pos += entry.Length
buf = buf[EntrySize:]
}
return entries, hdrSize, nil
}
// PackedSizeOfBlob returns the size a blob actually uses when saved in a pack
func PackedSizeOfBlob(blobLength uint) uint {
return blobLength + EntrySize
}
func parseHeaderEntry(p []byte) (b restic.Blob, err error) {
if uint(len(p)) < EntrySize {
err = errors.Errorf("parseHeaderEntry: buffer of size %d too short", len(p))
return b, err
}
p = p[:EntrySize]
switch p[0] {
case 0:
b.Type = restic.DataBlob
case 1:
b.Type = restic.TreeBlob
default:
return b, errors.Errorf("invalid type %d", p[0])
}
b.Length = uint(binary.LittleEndian.Uint32(p[1:5]))
copy(b.ID[:], p[5:])
return b, nil
}
| 1 | 12,922 | I think the condition should be ` uint(headerLengthSize) + crypto.Extension + (len(p.blobs)+1)*uint(EntrySize) > uint(maxHeaderSize)` `headerLengthSize` is only the length field. But the pack header also includes the blob type and its ID. And the result of the check, currently seems to be inverted. | restic-restic | go |
@@ -19,7 +19,6 @@ namespace Nethermind.Mev.Test
{
public enum SelectorType
{
- V1,
V2,
}
} | 1 | // Copyright (c) 2021 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
//
namespace Nethermind.Mev.Test
{
public enum SelectorType
{
V1,
V2,
}
}
| 1 | 25,457 | Remove selector if only 1 version is supported | NethermindEth-nethermind | .cs |
@@ -1188,7 +1188,7 @@ class Application extends BaseApplication {
const selectedNoteIds = state.selectedNoteIds;
const note = selectedNoteIds.length === 1 ? await Note.load(selectedNoteIds[0]) : null;
- for (const itemId of ['copy', 'paste', 'cut', 'selectAll', 'bold', 'italic', 'link', 'code', 'insertDateTime', 'commandStartExternalEditing', 'showLocalSearch']) {
+ for (const itemId of ['paste', 'cut', 'bold', 'italic', 'link', 'code', 'insertDateTime']) {
const menuItem = Menu.getApplicationMenu().getMenuItemById(`edit:${itemId}`);
if (!menuItem) continue;
const isHtmlNote = !!note && note.markup_language === MarkupToHtml.MARKUP_LANGUAGE_HTML; | 1 | require('app-module-path').addPath(__dirname);
const { BaseApplication } = require('lib/BaseApplication');
const { FoldersScreenUtils } = require('lib/folders-screen-utils.js');
const Setting = require('lib/models/Setting.js');
const { shim } = require('lib/shim.js');
const MasterKey = require('lib/models/MasterKey');
const Note = require('lib/models/Note');
const { MarkupToHtml } = require('lib/joplin-renderer');
const { _, setLocale } = require('lib/locale.js');
const { Logger } = require('lib/logger.js');
const fs = require('fs-extra');
const Tag = require('lib/models/Tag.js');
const { reg } = require('lib/registry.js');
const { defaultState } = require('lib/reducer.js');
const packageInfo = require('./packageInfo.js');
const AlarmService = require('lib/services/AlarmService.js');
const AlarmServiceDriverNode = require('lib/services/AlarmServiceDriverNode');
const DecryptionWorker = require('lib/services/DecryptionWorker');
const InteropService = require('lib/services/InteropService');
const InteropServiceHelper = require('./InteropServiceHelper.js');
const ResourceService = require('lib/services/ResourceService');
const ClipperServer = require('lib/ClipperServer');
const ExternalEditWatcher = require('lib/services/ExternalEditWatcher');
const { bridge } = require('electron').remote.require('./bridge');
const { shell, webFrame, clipboard } = require('electron');
const Menu = bridge().Menu;
const PluginManager = require('lib/services/PluginManager');
const RevisionService = require('lib/services/RevisionService');
const MigrationService = require('lib/services/MigrationService');
const TemplateUtils = require('lib/TemplateUtils');
const CssUtils = require('lib/CssUtils');
const pluginClasses = [
require('./plugins/GotoAnything.min'),
];
const appDefaultState = Object.assign({}, defaultState, {
route: {
type: 'NAV_GO',
routeName: 'Main',
props: {},
},
navHistory: [],
fileToImport: null,
windowCommand: null,
noteVisiblePanes: ['editor', 'viewer'],
sidebarVisibility: true,
noteListVisibility: true,
windowContentSize: bridge().windowContentSize(),
watchedNoteFiles: [],
lastEditorScrollPercents: {},
devToolsVisible: false,
});
class Application extends BaseApplication {
constructor() {
super();
this.lastMenuScreen_ = null;
}
hasGui() {
return true;
}
checkForUpdateLoggerPath() {
return `${Setting.value('profileDir')}/log-autoupdater.txt`;
}
reducer(state = appDefaultState, action) {
let newState = state;
try {
switch (action.type) {
case 'NAV_BACK':
case 'NAV_GO':
{
const goingBack = action.type === 'NAV_BACK';
if (goingBack && !state.navHistory.length) break;
const currentRoute = state.route;
newState = Object.assign({}, state);
const newNavHistory = state.navHistory.slice();
if (goingBack) {
let newAction = null;
while (newNavHistory.length) {
newAction = newNavHistory.pop();
if (newAction.routeName !== state.route.routeName) break;
}
if (!newAction) break;
action = newAction;
}
if (!goingBack) newNavHistory.push(currentRoute);
newState.navHistory = newNavHistory;
newState.route = action;
}
break;
case 'WINDOW_CONTENT_SIZE_SET':
newState = Object.assign({}, state);
newState.windowContentSize = action.size;
break;
case 'WINDOW_COMMAND':
{
newState = Object.assign({}, state);
const command = Object.assign({}, action);
delete command.type;
newState.windowCommand = command.name ? command : null;
}
break;
case 'NOTE_VISIBLE_PANES_TOGGLE':
{
const getNextLayout = (currentLayout) => {
currentLayout = panes.length === 2 ? 'both' : currentLayout[0];
let paneOptions;
if (state.settings.layoutButtonSequence === Setting.LAYOUT_EDITOR_VIEWER) {
paneOptions = ['editor', 'viewer'];
} else if (state.settings.layoutButtonSequence === Setting.LAYOUT_EDITOR_SPLIT) {
paneOptions = ['editor', 'both'];
} else if (state.settings.layoutButtonSequence === Setting.LAYOUT_VIEWER_SPLIT) {
paneOptions = ['viewer', 'both'];
} else if (state.settings.layoutButtonSequence === Setting.LAYOUT_SPLIT_WYSIWYG) {
paneOptions = ['both', 'wysiwyg'];
} else {
paneOptions = ['editor', 'viewer', 'both'];
}
const currentLayoutIndex = paneOptions.indexOf(currentLayout);
const nextLayoutIndex = currentLayoutIndex === paneOptions.length - 1 ? 0 : currentLayoutIndex + 1;
const nextLayout = paneOptions[nextLayoutIndex];
return nextLayout === 'both' ? ['editor', 'viewer'] : [nextLayout];
};
newState = Object.assign({}, state);
const panes = state.noteVisiblePanes.slice();
newState.noteVisiblePanes = getNextLayout(panes);
}
break;
case 'NOTE_VISIBLE_PANES_SET':
newState = Object.assign({}, state);
newState.noteVisiblePanes = action.panes;
break;
case 'SIDEBAR_VISIBILITY_TOGGLE':
newState = Object.assign({}, state);
newState.sidebarVisibility = !state.sidebarVisibility;
break;
case 'SIDEBAR_VISIBILITY_SET':
newState = Object.assign({}, state);
newState.sidebarVisibility = action.visibility;
break;
case 'NOTELIST_VISIBILITY_TOGGLE':
newState = Object.assign({}, state);
newState.noteListVisibility = !state.noteListVisibility;
break;
case 'NOTELIST_VISIBILITY_SET':
newState = Object.assign({}, state);
newState.noteListVisibility = action.visibility;
break;
case 'NOTE_FILE_WATCHER_ADD':
if (newState.watchedNoteFiles.indexOf(action.id) < 0) {
newState = Object.assign({}, state);
const watchedNoteFiles = newState.watchedNoteFiles.slice();
watchedNoteFiles.push(action.id);
newState.watchedNoteFiles = watchedNoteFiles;
}
break;
case 'NOTE_FILE_WATCHER_REMOVE':
{
newState = Object.assign({}, state);
const idx = newState.watchedNoteFiles.indexOf(action.id);
if (idx >= 0) {
const watchedNoteFiles = newState.watchedNoteFiles.slice();
watchedNoteFiles.splice(idx, 1);
newState.watchedNoteFiles = watchedNoteFiles;
}
}
break;
case 'NOTE_FILE_WATCHER_CLEAR':
if (state.watchedNoteFiles.length) {
newState = Object.assign({}, state);
newState.watchedNoteFiles = [];
}
break;
case 'EDITOR_SCROLL_PERCENT_SET':
{
newState = Object.assign({}, state);
const newPercents = Object.assign({}, newState.lastEditorScrollPercents);
newPercents[action.noteId] = action.percent;
newState.lastEditorScrollPercents = newPercents;
}
break;
case 'NOTE_DEVTOOLS_TOGGLE':
newState = Object.assign({}, state);
newState.devToolsVisible = !newState.devToolsVisible;
break;
case 'NOTE_DEVTOOLS_SET':
newState = Object.assign({}, state);
newState.devToolsVisible = action.value;
break;
}
} catch (error) {
error.message = `In reducer: ${error.message} Action: ${JSON.stringify(action)}`;
throw error;
}
return super.reducer(newState, action);
}
toggleDevTools(visible) {
if (visible) {
bridge().openDevTools();
} else {
bridge().closeDevTools();
}
}
async generalMiddleware(store, next, action) {
if (action.type == 'SETTING_UPDATE_ONE' && action.key == 'locale' || action.type == 'SETTING_UPDATE_ALL') {
setLocale(Setting.value('locale'));
// The bridge runs within the main process, with its own instance of locale.js
// so it needs to be set too here.
bridge().setLocale(Setting.value('locale'));
await this.refreshMenu();
}
if (action.type == 'SETTING_UPDATE_ONE' && action.key == 'showTrayIcon' || action.type == 'SETTING_UPDATE_ALL') {
this.updateTray();
}
if (action.type == 'SETTING_UPDATE_ONE' && action.key == 'style.editor.fontFamily' || action.type == 'SETTING_UPDATE_ALL') {
this.updateEditorFont();
}
if (action.type == 'SETTING_UPDATE_ONE' && action.key == 'windowContentZoomFactor' || action.type == 'SETTING_UPDATE_ALL') {
webFrame.setZoomFactor(Setting.value('windowContentZoomFactor') / 100);
}
if (['EVENT_NOTE_ALARM_FIELD_CHANGE', 'NOTE_DELETE'].indexOf(action.type) >= 0) {
await AlarmService.updateNoteNotification(action.id, action.type === 'NOTE_DELETE');
}
const result = await super.generalMiddleware(store, next, action);
const newState = store.getState();
if (action.type === 'NAV_GO' || action.type === 'NAV_BACK') {
app().updateMenu(newState.route.routeName);
}
if (['NOTE_VISIBLE_PANES_TOGGLE', 'NOTE_VISIBLE_PANES_SET'].indexOf(action.type) >= 0) {
Setting.setValue('noteVisiblePanes', newState.noteVisiblePanes);
const layout = newState.noteVisiblePanes[0];
this.updateMenuItemStates(layout);
}
if (['SIDEBAR_VISIBILITY_TOGGLE', 'SIDEBAR_VISIBILITY_SET'].indexOf(action.type) >= 0) {
Setting.setValue('sidebarVisibility', newState.sidebarVisibility);
}
if (['NOTELIST_VISIBILITY_TOGGLE', 'NOTELIST_VISIBILITY_SET'].indexOf(action.type) >= 0) {
Setting.setValue('noteListVisibility', newState.noteListVisibility);
}
if (action.type.indexOf('NOTE_SELECT') === 0 || action.type.indexOf('FOLDER_SELECT') === 0) {
const layout = newState.noteVisiblePanes[0];
this.updateMenuItemStates(layout, newState);
}
if (['NOTE_DEVTOOLS_TOGGLE', 'NOTE_DEVTOOLS_SET'].indexOf(action.type) >= 0) {
this.toggleDevTools(newState.devToolsVisible);
this.updateMenuItemStates(newState);
}
return result;
}
async refreshMenu() {
const screen = this.lastMenuScreen_;
this.lastMenuScreen_ = null;
await this.updateMenu(screen);
}
focusElement_(target) {
this.dispatch({
type: 'WINDOW_COMMAND',
name: 'focusElement',
target: target,
});
}
async updateMenu(screen) {
if (this.lastMenuScreen_ === screen) return;
const sortNoteFolderItems = (type) => {
const sortItems = [];
const sortOptions = Setting.enumOptions(`${type}.sortOrder.field`);
for (const field in sortOptions) {
if (!sortOptions.hasOwnProperty(field)) continue;
sortItems.push({
label: sortOptions[field],
screens: ['Main'],
type: 'checkbox',
checked: Setting.value(`${type}.sortOrder.field`) === field,
click: () => {
Setting.setValue(`${type}.sortOrder.field`, field);
this.refreshMenu();
},
});
}
sortItems.push({ type: 'separator' });
sortItems.push({
label: Setting.settingMetadata(`${type}.sortOrder.reverse`).label(),
type: 'checkbox',
checked: Setting.value(`${type}.sortOrder.reverse`),
screens: ['Main'],
click: () => {
Setting.setValue(`${type}.sortOrder.reverse`, !Setting.value(`${type}.sortOrder.reverse`));
},
});
return sortItems;
};
const sortNoteItems = sortNoteFolderItems('notes');
const sortFolderItems = sortNoteFolderItems('folders');
const focusItems = [];
focusItems.push({
label: _('Sidebar'),
click: () => { this.focusElement_('sideBar'); },
accelerator: 'CommandOrControl+Shift+S',
});
focusItems.push({
label: _('Note list'),
click: () => { this.focusElement_('noteList'); },
accelerator: 'CommandOrControl+Shift+L',
});
focusItems.push({
label: _('Note title'),
click: () => { this.focusElement_('noteTitle'); },
accelerator: 'CommandOrControl+Shift+N',
});
focusItems.push({
label: _('Note body'),
click: () => { this.focusElement_('noteBody'); },
accelerator: 'CommandOrControl+Shift+B',
});
let toolsItems = [];
const importItems = [];
const exportItems = [];
const toolsItemsFirst = [];
const templateItems = [];
const ioService = new InteropService();
const ioModules = ioService.modules();
for (let i = 0; i < ioModules.length; i++) {
const module = ioModules[i];
if (module.type === 'exporter') {
if (module.canDoMultiExport !== false) {
exportItems.push({
label: module.fullLabel(),
screens: ['Main'],
click: async () => {
await InteropServiceHelper.export(this.dispatch.bind(this), module);
},
});
}
} else {
for (let j = 0; j < module.sources.length; j++) {
const moduleSource = module.sources[j];
importItems.push({
label: module.fullLabel(moduleSource),
screens: ['Main'],
click: async () => {
let path = null;
const selectedFolderId = this.store().getState().selectedFolderId;
if (moduleSource === 'file') {
path = bridge().showOpenDialog({
filters: [{ name: module.description, extensions: module.fileExtensions }],
});
} else {
path = bridge().showOpenDialog({
properties: ['openDirectory', 'createDirectory'],
});
}
if (!path || (Array.isArray(path) && !path.length)) return;
if (Array.isArray(path)) path = path[0];
this.dispatch({
type: 'WINDOW_COMMAND',
name: 'showModalMessage',
message: _('Importing from "%s" as "%s" format. Please wait...', path, module.format),
});
const importOptions = {
path,
format: module.format,
modulePath: module.path,
onError: console.warn,
destinationFolderId:
!module.isNoteArchive && moduleSource === 'file'
? selectedFolderId
: null,
};
const service = new InteropService();
try {
const result = await service.import(importOptions);
console.info('Import result: ', result);
} catch (error) {
bridge().showErrorMessageBox(error.message);
}
this.dispatch({
type: 'WINDOW_COMMAND',
name: 'hideModalMessage',
});
},
});
}
}
}
exportItems.push({
label: `PDF - ${_('PDF File')}`,
screens: ['Main'],
click: async () => {
const selectedNoteIds = this.store().getState().selectedNoteIds;
this.dispatch({
type: 'WINDOW_COMMAND',
name: 'exportPdf',
noteIds: selectedNoteIds,
});
},
});
// We need a dummy entry, otherwise the ternary operator to show a
// menu item only on a specific OS does not work.
const noItem = {
type: 'separator',
visible: false,
};
const syncStatusItem = {
label: _('Synchronisation Status'),
click: () => {
this.dispatch({
type: 'NAV_GO',
routeName: 'Status',
});
},
};
const newNoteItem = {
label: _('New note'),
accelerator: 'CommandOrControl+N',
screens: ['Main'],
click: () => {
this.dispatch({
type: 'WINDOW_COMMAND',
name: 'newNote',
});
},
};
const newTodoItem = {
label: _('New to-do'),
accelerator: 'CommandOrControl+T',
screens: ['Main'],
click: () => {
this.dispatch({
type: 'WINDOW_COMMAND',
name: 'newTodo',
});
},
};
const newNotebookItem = {
label: _('New notebook'),
screens: ['Main'],
click: () => {
this.dispatch({
type: 'WINDOW_COMMAND',
name: 'newNotebook',
});
},
};
const newSubNotebookItem = {
label: _('New sub-notebook'),
screens: ['Main'],
click: () => {
this.dispatch({
type: 'WINDOW_COMMAND',
name: 'newSubNotebook',
activeFolderId: Setting.value('activeFolderId'),
});
},
};
const printItem = {
label: _('Print'),
accelerator: 'CommandOrControl+P',
screens: ['Main'],
click: () => {
this.dispatch({
type: 'WINDOW_COMMAND',
name: 'print',
});
},
};
toolsItemsFirst.push(syncStatusItem, {
type: 'separator',
screens: ['Main'],
});
const templateDirExists = await shim.fsDriver().exists(Setting.value('templateDir'));
templateItems.push({
label: _('Create note from template'),
visible: templateDirExists,
click: () => {
this.dispatch({
type: 'WINDOW_COMMAND',
name: 'selectTemplate',
noteType: 'note',
});
},
}, {
label: _('Create to-do from template'),
visible: templateDirExists,
click: () => {
this.dispatch({
type: 'WINDOW_COMMAND',
name: 'selectTemplate',
noteType: 'todo',
});
},
}, {
label: _('Insert template'),
visible: templateDirExists,
accelerator: 'CommandOrControl+Alt+I',
click: () => {
this.dispatch({
type: 'WINDOW_COMMAND',
name: 'selectTemplate',
});
},
}, {
label: _('Open template directory'),
click: () => {
const templateDir = Setting.value('templateDir');
if (!templateDirExists) shim.fsDriver().mkdir(templateDir);
shell.openItem(templateDir);
},
}, {
label: _('Refresh templates'),
click: async () => {
const templates = await TemplateUtils.loadTemplates(Setting.value('templateDir'));
this.store().dispatch({
type: 'TEMPLATE_UPDATE_ALL',
templates: templates,
});
},
});
// we need this workaround, because on macOS the menu is different
const toolsItemsWindowsLinux = toolsItemsFirst.concat([{
label: _('Options'),
visible: !shim.isMac(),
accelerator: 'CommandOrControl+,',
click: () => {
this.dispatch({
type: 'NAV_GO',
routeName: 'Config',
});
},
}]);
// the following menu items will be available for all OS under Tools
const toolsItemsAll = [{
label: _('Resources'),
click: () => {
this.dispatch({
type: 'NAV_GO',
routeName: 'Resources',
});
},
}];
if (!shim.isMac()) {
toolsItems = toolsItems.concat(toolsItemsWindowsLinux);
}
toolsItems = toolsItems.concat(toolsItemsAll);
function _checkForUpdates(ctx) {
bridge().checkForUpdates(false, bridge().window(), ctx.checkForUpdateLoggerPath(), { includePreReleases: Setting.value('autoUpdate.includePreReleases') });
}
function _showAbout() {
const p = packageInfo;
let gitInfo = '';
if ('git' in p) {
gitInfo = _('Revision: %s (%s)', p.git.hash, p.git.branch);
}
const copyrightText = 'Copyright © 2016-YYYY Laurent Cozic';
const message = [
p.description,
'',
copyrightText.replace('YYYY', new Date().getFullYear()),
_('%s %s (%s, %s)', p.name, p.version, Setting.value('env'), process.platform),
'',
_('Client ID: %s', Setting.value('clientId')),
_('Sync Version: %s', Setting.value('syncVersion')),
_('Profile Version: %s', reg.db().version()),
];
if (gitInfo) {
message.push(`\n${gitInfo}`);
console.info(gitInfo);
}
const text = message.join('\n');
const copyToClipboard = bridge().showMessageBox(text, {
icon: `${bridge().electronApp().buildDir()}/icons/128x128.png`,
buttons: [_('Copy'), _('OK')],
cancelId: 1,
defaultId: 1,
});
if (copyToClipboard === 0) {
clipboard.writeText(message.splice(3).join('\n'));
}
}
const rootMenuFile = {
// Using a dummy entry for macOS here, because first menu
// becomes 'Joplin' and we need a nenu called 'File' later.
label: shim.isMac() ? '&JoplinMainMenu' : _('&File'),
// `&` before one of the char in the label name mean, that
// <Alt + F> will open this menu. It's needed becase electron
// opens the first menu on Alt press if no hotkey assigned.
// Issue: https://github.com/laurent22/joplin/issues/934
submenu: [{
label: _('About Joplin'),
visible: shim.isMac() ? true : false,
click: () => _showAbout(),
}, {
type: 'separator',
visible: shim.isMac() ? true : false,
}, {
label: _('Preferences...'),
visible: shim.isMac() ? true : false,
accelerator: 'CommandOrControl+,',
click: () => {
this.dispatch({
type: 'NAV_GO',
routeName: 'Config',
});
},
}, {
label: _('Check for updates...'),
visible: shim.isMac() ? true : false,
click: () => _checkForUpdates(this),
}, {
type: 'separator',
visible: shim.isMac() ? true : false,
},
shim.isMac() ? noItem : newNoteItem,
shim.isMac() ? noItem : newTodoItem,
shim.isMac() ? noItem : newNotebookItem,
shim.isMac() ? noItem : newSubNotebookItem, {
type: 'separator',
visible: shim.isMac() ? false : true,
}, {
label: _('Templates'),
visible: shim.isMac() ? false : true,
submenu: templateItems,
}, {
type: 'separator',
visible: shim.isMac() ? false : true,
}, {
label: _('Import'),
visible: shim.isMac() ? false : true,
submenu: importItems,
}, {
label: _('Export'),
visible: shim.isMac() ? false : true,
submenu: exportItems,
}, {
type: 'separator',
}, {
label: _('Synchronise'),
accelerator: 'CommandOrControl+S',
screens: ['Main'],
click: async () => {
this.dispatch({
type: 'WINDOW_COMMAND',
name: 'synchronize',
});
},
}, shim.isMac() ? syncStatusItem : noItem, {
type: 'separator',
}, shim.isMac() ? noItem : printItem, {
type: 'separator',
platforms: ['darwin'],
}, {
label: _('Hide %s', 'Joplin'),
platforms: ['darwin'],
accelerator: 'CommandOrControl+H',
click: () => { bridge().electronApp().hide(); },
}, {
type: 'separator',
}, {
label: _('Quit'),
accelerator: 'CommandOrControl+Q',
click: () => { bridge().electronApp().quit(); },
}],
};
const rootMenuFileMacOs = {
label: _('&File'),
visible: shim.isMac() ? true : false,
submenu: [
newNoteItem,
newTodoItem,
newNotebookItem,
newSubNotebookItem, {
label: _('Close Window'),
platforms: ['darwin'],
accelerator: 'Command+W',
selector: 'performClose:',
}, {
type: 'separator',
}, {
label: _('Templates'),
submenu: templateItems,
}, {
type: 'separator',
}, {
label: _('Import'),
submenu: importItems,
}, {
label: _('Export'),
submenu: exportItems,
}, {
type: 'separator',
},
printItem,
],
};
const layoutButtonSequenceOptions = Object.entries(Setting.enumOptions('layoutButtonSequence')).map(([layoutKey, layout]) => ({
label: layout,
screens: ['Main'],
type: 'checkbox',
checked: Setting.value('layoutButtonSequence') == layoutKey,
click: () => {
Setting.setValue('layoutButtonSequence', layoutKey);
this.refreshMenu();
},
}));
const rootMenus = {
edit: {
id: 'edit',
label: _('&Edit'),
submenu: [{
id: 'edit:copy',
label: _('Copy'),
role: 'copy',
accelerator: 'CommandOrControl+C',
}, {
id: 'edit:cut',
label: _('Cut'),
role: 'cut',
accelerator: 'CommandOrControl+X',
}, {
id: 'edit:paste',
label: _('Paste'),
role: 'paste',
accelerator: 'CommandOrControl+V',
}, {
id: 'edit:selectAll',
label: _('Select all'),
role: 'selectall',
accelerator: 'CommandOrControl+A',
}, {
type: 'separator',
screens: ['Main'],
}, {
id: 'edit:bold',
label: _('Bold'),
screens: ['Main'],
accelerator: 'CommandOrControl+B',
click: () => {
this.dispatch({
type: 'WINDOW_COMMAND',
name: 'textBold',
});
},
}, {
id: 'edit:italic',
label: _('Italic'),
screens: ['Main'],
accelerator: 'CommandOrControl+I',
click: () => {
this.dispatch({
type: 'WINDOW_COMMAND',
name: 'textItalic',
});
},
}, {
id: 'edit:link',
label: _('Link'),
screens: ['Main'],
accelerator: 'CommandOrControl+K',
click: () => {
this.dispatch({
type: 'WINDOW_COMMAND',
name: 'textLink',
});
},
}, {
id: 'edit:code',
label: _('Code'),
screens: ['Main'],
accelerator: 'CommandOrControl+`',
click: () => {
this.dispatch({
type: 'WINDOW_COMMAND',
name: 'textCode',
});
},
}, {
type: 'separator',
screens: ['Main'],
}, {
id: 'edit:insertDateTime',
label: _('Insert Date Time'),
screens: ['Main'],
accelerator: 'CommandOrControl+Shift+T',
click: () => {
this.dispatch({
type: 'WINDOW_COMMAND',
name: 'insertDateTime',
});
},
}, {
type: 'separator',
screens: ['Main'],
}, {
id: 'edit:commandStartExternalEditing',
label: _('Edit in external editor'),
screens: ['Main'],
accelerator: 'CommandOrControl+E',
click: () => {
this.dispatch({
type: 'WINDOW_COMMAND',
name: 'commandStartExternalEditing',
});
},
}, {
id: 'edit:setTags',
label: _('Tags'),
screens: ['Main'],
accelerator: 'CommandOrControl+Alt+T',
click: () => {
const selectedNoteIds = this.store().getState().selectedNoteIds;
this.dispatch({
type: 'WINDOW_COMMAND',
name: 'setTags',
noteIds: selectedNoteIds,
});
},
}, {
type: 'separator',
screens: ['Main'],
}, {
id: 'edit:focusSearch',
label: _('Search in all the notes'),
screens: ['Main'],
accelerator: shim.isMac() ? 'Shift+Command+F' : 'F6',
click: () => {
this.dispatch({
type: 'WINDOW_COMMAND',
name: 'focusSearch',
});
},
}, {
id: 'edit:showLocalSearch',
label: _('Search in current note'),
screens: ['Main'],
accelerator: 'CommandOrControl+F',
click: () => {
this.dispatch({
type: 'WINDOW_COMMAND',
name: 'showLocalSearch',
});
},
}],
},
view: {
label: _('&View'),
submenu: [{
label: _('Toggle sidebar'),
screens: ['Main'],
accelerator: shim.isMac() ? 'Option+Command+S' : 'F10',
click: () => {
this.dispatch({
type: 'WINDOW_COMMAND',
name: 'toggleSidebar',
});
},
}, {
type: 'separator',
screens: ['Main'],
}, {
label: _('Layout button sequence'),
screens: ['Main'],
submenu: layoutButtonSequenceOptions,
}, {
label: _('Toggle note list'),
screens: ['Main'],
click: () => {
this.dispatch({
type: 'WINDOW_COMMAND',
name: 'toggleNoteList',
});
},
}, {
label: _('Toggle editor layout'),
screens: ['Main'],
accelerator: 'CommandOrControl+L',
click: () => {
this.dispatch({
type: 'WINDOW_COMMAND',
name: 'toggleVisiblePanes',
});
},
}, {
type: 'separator',
screens: ['Main'],
}, {
label: Setting.settingMetadata('notes.sortOrder.field').label(),
screens: ['Main'],
submenu: sortNoteItems,
}, {
label: Setting.settingMetadata('folders.sortOrder.field').label(),
screens: ['Main'],
submenu: sortFolderItems,
}, {
label: Setting.settingMetadata('showNoteCounts').label(),
type: 'checkbox',
checked: Setting.value('showNoteCounts'),
screens: ['Main'],
click: () => {
Setting.setValue('showNoteCounts', !Setting.value('showNoteCounts'));
},
}, {
label: Setting.settingMetadata('uncompletedTodosOnTop').label(),
type: 'checkbox',
checked: Setting.value('uncompletedTodosOnTop'),
screens: ['Main'],
click: () => {
Setting.setValue('uncompletedTodosOnTop', !Setting.value('uncompletedTodosOnTop'));
},
}, {
label: Setting.settingMetadata('showCompletedTodos').label(),
type: 'checkbox',
checked: Setting.value('showCompletedTodos'),
screens: ['Main'],
click: () => {
Setting.setValue('showCompletedTodos', !Setting.value('showCompletedTodos'));
},
}, {
type: 'separator',
screens: ['Main'],
}, {
label: _('Focus'),
screens: ['Main'],
submenu: focusItems,
}, {
type: 'separator',
screens: ['Main'],
}, {
label: _('Actual Size'),
click: () => {
Setting.setValue('windowContentZoomFactor', 100);
},
accelerator: 'CommandOrControl+0',
}, {
label: _('Zoom In'),
click: () => {
Setting.incValue('windowContentZoomFactor', 10);
},
accelerator: 'CommandOrControl+=',
}, {
label: _('Zoom Out'),
click: () => {
Setting.incValue('windowContentZoomFactor', -10);
},
accelerator: 'CommandOrControl+-',
}],
},
tools: {
label: _('&Tools'),
submenu: toolsItems,
},
help: {
label: _('&Help'),
submenu: [{
label: _('Website and documentation'),
accelerator: 'F1',
click() { bridge().openExternal('https://joplinapp.org'); },
}, {
label: _('Joplin Forum'),
click() { bridge().openExternal('https://discourse.joplinapp.org'); },
}, {
label: _('Make a donation'),
click() { bridge().openExternal('https://joplinapp.org/donate/'); },
}, {
label: _('Check for updates...'),
visible: shim.isMac() ? false : true,
click: () => _checkForUpdates(this),
}, {
type: 'separator',
screens: ['Main'],
}, {
id: 'help:toggleDevTools',
type: 'checkbox',
label: _('Toggle development tools'),
visible: true,
click: () => {
this.dispatch({
type: 'NOTE_DEVTOOLS_TOGGLE',
});
},
}, {
type: 'separator',
visible: shim.isMac() ? false : true,
screens: ['Main'],
}, {
label: _('About Joplin'),
visible: shim.isMac() ? false : true,
click: () => _showAbout(),
}],
},
};
if (shim.isMac()) {
rootMenus.macOsApp = rootMenuFile;
rootMenus.file = rootMenuFileMacOs;
} else {
rootMenus.file = rootMenuFile;
}
// It seems the "visible" property of separators is ignored by Electron, making
// it display separators that we want hidden. So this function iterates through
// them and remove them completely.
const cleanUpSeparators = items => {
const output = [];
for (const item of items) {
if ('visible' in item && item.type === 'separator' && !item.visible) continue;
output.push(item);
}
return output;
};
for (const key in rootMenus) {
if (!rootMenus.hasOwnProperty(key)) continue;
if (!rootMenus[key].submenu) continue;
rootMenus[key].submenu = cleanUpSeparators(rootMenus[key].submenu);
}
const pluginMenuItems = PluginManager.instance().menuItems();
for (const item of pluginMenuItems) {
const itemParent = rootMenus[item.parent] ? rootMenus[item.parent] : 'tools';
itemParent.submenu.push(item);
}
const template = [
rootMenus.file,
rootMenus.edit,
rootMenus.view,
rootMenus.tools,
rootMenus.help,
];
if (shim.isMac()) template.splice(0, 0, rootMenus.macOsApp);
function isEmptyMenu(template) {
for (let i = 0; i < template.length; i++) {
const t = template[i];
if (t.type !== 'separator') return false;
}
return true;
}
function removeUnwantedItems(template, screen) {
const platform = shim.platformName();
let output = [];
for (let i = 0; i < template.length; i++) {
const t = Object.assign({}, template[i]);
if (t.screens && t.screens.indexOf(screen) < 0) continue;
if (t.platforms && t.platforms.indexOf(platform) < 0) continue;
if (t.submenu) t.submenu = removeUnwantedItems(t.submenu, screen);
if (('submenu' in t) && isEmptyMenu(t.submenu)) continue;
output.push(t);
}
// Remove empty separator for now empty sections
const temp = [];
let previous = null;
for (let i = 0; i < output.length; i++) {
const t = Object.assign({}, output[i]);
if (t.type === 'separator') {
if (!previous) continue;
if (previous.type === 'separator') continue;
}
temp.push(t);
previous = t;
}
output = temp;
return output;
}
const screenTemplate = removeUnwantedItems(template, screen);
const menu = Menu.buildFromTemplate(screenTemplate);
Menu.setApplicationMenu(menu);
this.lastMenuScreen_ = screen;
}
async updateMenuItemStates(layout, state = null) {
if (!this.lastMenuScreen_) return;
if (!this.store() && !state) return;
if (!state) state = this.store().getState();
const selectedNoteIds = state.selectedNoteIds;
const note = selectedNoteIds.length === 1 ? await Note.load(selectedNoteIds[0]) : null;
for (const itemId of ['copy', 'paste', 'cut', 'selectAll', 'bold', 'italic', 'link', 'code', 'insertDateTime', 'commandStartExternalEditing', 'showLocalSearch']) {
const menuItem = Menu.getApplicationMenu().getMenuItemById(`edit:${itemId}`);
if (!menuItem) continue;
const isHtmlNote = !!note && note.markup_language === MarkupToHtml.MARKUP_LANGUAGE_HTML;
menuItem.enabled = !isHtmlNote && layout !== 'viewer' && !!note;
}
const menuItem = Menu.getApplicationMenu().getMenuItemById('help:toggleDevTools');
menuItem.checked = state.devToolsVisible;
}
updateTray() {
const app = bridge().electronApp();
if (app.trayShown() === Setting.value('showTrayIcon')) return;
if (!Setting.value('showTrayIcon')) {
app.destroyTray();
} else {
const contextMenu = Menu.buildFromTemplate([
{ label: _('Open %s', app.electronApp().name), click: () => { app.window().show(); } },
{ type: 'separator' },
{ label: _('Exit'), click: () => { app.quit(); } },
]);
app.createTray(contextMenu);
}
}
updateEditorFont() {
const fontFamilies = [];
if (Setting.value('style.editor.fontFamily')) fontFamilies.push(`"${Setting.value('style.editor.fontFamily')}"`);
fontFamilies.push('monospace');
// The '*' and '!important' parts are necessary to make sure Russian text is displayed properly
// https://github.com/laurent22/joplin/issues/155
const css = `.ace_editor * { font-family: ${fontFamilies.join(', ')} !important; }`;
const styleTag = document.createElement('style');
styleTag.type = 'text/css';
styleTag.appendChild(document.createTextNode(css));
document.head.appendChild(styleTag);
}
async loadCustomCss(filePath) {
let cssString = '';
if (await fs.pathExists(filePath)) {
try {
cssString = await fs.readFile(filePath, 'utf-8');
} catch (error) {
let msg = error.message ? error.message : '';
msg = `Could not load custom css from ${filePath}\n${msg}`;
error.message = msg;
throw error;
}
}
return cssString;
}
// async createManyNotes() {
// return;
// const folderIds = [];
// const randomFolderId = (folderIds) => {
// if (!folderIds.length) return '';
// const idx = Math.floor(Math.random() * folderIds.length);
// if (idx > folderIds.length - 1) throw new Error('Invalid index ' + idx + ' / ' + folderIds.length);
// return folderIds[idx];
// }
// let rootFolderCount = 0;
// let folderCount = 100;
// for (let i = 0; i < folderCount; i++) {
// let parentId = '';
// if (Math.random() >= 0.9 || rootFolderCount >= folderCount / 10) {
// parentId = randomFolderId(folderIds);
// } else {
// rootFolderCount++;
// }
// const folder = await Folder.save({ title: 'folder' + i, parent_id: parentId });
// folderIds.push(folder.id);
// }
// for (let i = 0; i < 10000; i++) {
// const parentId = randomFolderId(folderIds);
// Note.save({ title: 'note' + i, parent_id: parentId });
// }
// }
async start(argv) {
const electronIsDev = require('electron-is-dev');
// If running inside a package, the command line, instead of being "node.exe <path> <flags>" is "joplin.exe <flags>" so
// insert an extra argument so that they can be processed in a consistent way everywhere.
if (!electronIsDev) argv.splice(1, 0, '.');
argv = await super.start(argv);
// Loads app-wide styles. (Markdown preview-specific styles loaded in app.js)
const dir = Setting.value('profileDir');
const filename = Setting.custom_css_files.JOPLIN_APP;
await CssUtils.injectCustomStyles(`${dir}/${filename}`);
AlarmService.setDriver(new AlarmServiceDriverNode({ appName: packageInfo.build.appId }));
AlarmService.setLogger(reg.logger());
reg.setShowErrorMessageBoxHandler((message) => { bridge().showErrorMessageBox(message); });
if (Setting.value('flagOpenDevTools')) {
bridge().openDevTools();
}
PluginManager.instance().dispatch_ = this.dispatch.bind(this);
PluginManager.instance().setLogger(reg.logger());
PluginManager.instance().register(pluginClasses);
this.updateMenu('Main');
this.initRedux();
// Since the settings need to be loaded before the store is created, it will never
// receive the SETTING_UPDATE_ALL even, which mean state.settings will not be
// initialised. So we manually call dispatchUpdateAll() to force an update.
Setting.dispatchUpdateAll();
await FoldersScreenUtils.refreshFolders();
const tags = await Tag.allWithNotes();
this.dispatch({
type: 'TAG_UPDATE_ALL',
items: tags,
});
const masterKeys = await MasterKey.all();
this.dispatch({
type: 'MASTERKEY_UPDATE_ALL',
items: masterKeys,
});
this.store().dispatch({
type: 'FOLDER_SELECT',
id: Setting.value('activeFolderId'),
});
this.store().dispatch({
type: 'FOLDER_SET_COLLAPSED_ALL',
ids: Setting.value('collapsedFolderIds'),
});
// Loads custom Markdown preview styles
const cssString = await CssUtils.loadCustomCss(`${Setting.value('profileDir')}/userstyle.css`);
this.store().dispatch({
type: 'LOAD_CUSTOM_CSS',
css: cssString,
});
const templates = await TemplateUtils.loadTemplates(Setting.value('templateDir'));
this.store().dispatch({
type: 'TEMPLATE_UPDATE_ALL',
templates: templates,
});
this.store().dispatch({
type: 'NOTE_DEVTOOLS_SET',
value: Setting.value('flagOpenDevTools'),
});
// Note: Auto-update currently doesn't work in Linux: it downloads the update
// but then doesn't install it on exit.
if (shim.isWindows() || shim.isMac()) {
const runAutoUpdateCheck = () => {
if (Setting.value('autoUpdateEnabled')) {
bridge().checkForUpdates(true, bridge().window(), this.checkForUpdateLoggerPath(), { includePreReleases: Setting.value('autoUpdate.includePreReleases') });
}
};
// Initial check on startup
setTimeout(() => { runAutoUpdateCheck(); }, 5000);
// Then every x hours
setInterval(() => { runAutoUpdateCheck(); }, 12 * 60 * 60 * 1000);
}
this.updateTray();
setTimeout(() => {
AlarmService.garbageCollect();
}, 1000 * 60 * 60);
if (Setting.value('startMinimized') && Setting.value('showTrayIcon')) {
// Keep it hidden
} else {
bridge().window().show();
}
ResourceService.runInBackground();
if (Setting.value('env') === 'dev') {
AlarmService.updateAllNotifications();
} else {
reg.scheduleSync().then(() => {
// Wait for the first sync before updating the notifications, since synchronisation
// might change the notifications.
AlarmService.updateAllNotifications();
DecryptionWorker.instance().scheduleStart();
});
}
const clipperLogger = new Logger();
clipperLogger.addTarget('file', { path: `${Setting.value('profileDir')}/log-clipper.txt` });
clipperLogger.addTarget('console');
ClipperServer.instance().setLogger(clipperLogger);
ClipperServer.instance().setDispatch(this.store().dispatch);
if (Setting.value('clipperServer.autoStart')) {
ClipperServer.instance().start();
}
ExternalEditWatcher.instance().setLogger(reg.logger());
ExternalEditWatcher.instance().dispatch = this.store().dispatch;
RevisionService.instance().runInBackground();
this.updateMenuItemStates();
// Make it available to the console window - useful to call revisionService.collectRevisions()
window.revisionService = RevisionService.instance();
window.migrationService = MigrationService.instance();
window.decryptionWorker = DecryptionWorker.instance();
}
}
let application_ = null;
function app() {
if (!application_) application_ = new Application();
return application_;
}
module.exports = { app };
| 1 | 13,088 | Why did you remove more than the asked command? | laurent22-joplin | js |
@@ -320,11 +320,15 @@ func (s *session) handlePacketImpl(p *receivedPacket) error {
s.largestRcvdPacketNumber,
hdr.PacketNumber,
)
- if utils.Debug() {
- utils.Debugf("<- Reading packet 0x%x (%d bytes) for connection %x @ %s", hdr.PacketNumber, len(data)+len(hdr.Raw), hdr.ConnectionID, time.Now().Format("15:04:05.000"))
- }
packet, err := s.unpacker.Unpack(hdr.Raw, hdr, data)
+ if utils.Debug() {
+ if err != nil {
+ utils.Debugf("<- Reading packet 0x%x (%d bytes) for connection %x @ %s", hdr.PacketNumber, len(data)+len(hdr.Raw), hdr.ConnectionID, time.Now().Format("15:04:05.000"))
+ } else {
+ utils.Debugf("<- Reading packet 0x%x (%d bytes) for connection %x, %s @ %s", hdr.PacketNumber, len(data)+len(hdr.Raw), hdr.ConnectionID, packet.encryptionLevel, time.Now().Format("15:04:05.000"))
+ }
+ }
// if the decryption failed, this might be a packet sent by an attacker
// don't update the remote address
if quicErr, ok := err.(*qerr.QuicError); ok && quicErr.ErrorCode == qerr.DecryptionFailure { | 1 | package quic
import (
"crypto/tls"
"errors"
"fmt"
"net"
"sync/atomic"
"time"
"github.com/lucas-clemente/quic-go/ackhandler"
"github.com/lucas-clemente/quic-go/congestion"
"github.com/lucas-clemente/quic-go/flowcontrol"
"github.com/lucas-clemente/quic-go/frames"
"github.com/lucas-clemente/quic-go/handshake"
"github.com/lucas-clemente/quic-go/protocol"
"github.com/lucas-clemente/quic-go/qerr"
"github.com/lucas-clemente/quic-go/utils"
)
type unpacker interface {
Unpack(publicHeaderBinary []byte, hdr *PublicHeader, data []byte) (*unpackedPacket, error)
}
type receivedPacket struct {
remoteAddr net.Addr
publicHeader *PublicHeader
data []byte
rcvTime time.Time
}
var (
errRstStreamOnInvalidStream = errors.New("RST_STREAM received for unknown stream")
errWindowUpdateOnClosedStream = errors.New("WINDOW_UPDATE received for an already closed stream")
errSessionAlreadyClosed = errors.New("Cannot close session. It was already closed before.")
)
// cryptoChangeCallback is called every time the encryption level changes
// Once the callback has been called with isForwardSecure = true, it is guarantueed to not be called with isForwardSecure = false after that
type cryptoChangeCallback func(session Session, isForwardSecure bool)
// closeCallback is called when a session is closed
type closeCallback func(id protocol.ConnectionID)
// A Session is a QUIC session
type session struct {
connectionID protocol.ConnectionID
perspective protocol.Perspective
version protocol.VersionNumber
closeCallback closeCallback
cryptoChangeCallback cryptoChangeCallback
conn connection
streamsMap *streamsMap
rttStats *congestion.RTTStats
sentPacketHandler ackhandler.SentPacketHandler
receivedPacketHandler ackhandler.ReceivedPacketHandler
streamFramer *streamFramer
flowControlManager flowcontrol.FlowControlManager
unpacker unpacker
packer *packetPacker
cryptoSetup handshake.CryptoSetup
receivedPackets chan *receivedPacket
sendingScheduled chan struct{}
// closeChan is used to notify the run loop that it should terminate.
// If the value is not nil, the error is sent as a CONNECTION_CLOSE.
closeChan chan *qerr.QuicError
runClosed chan struct{}
closed uint32 // atomic bool
undecryptablePackets []*receivedPacket
aeadChanged chan struct{}
nextAckScheduledTime time.Time
connectionParameters handshake.ConnectionParametersManager
lastRcvdPacketNumber protocol.PacketNumber
// Used to calculate the next packet number from the truncated wire
// representation, and sent back in public reset packets
largestRcvdPacketNumber protocol.PacketNumber
sessionCreationTime time.Time
lastNetworkActivityTime time.Time
timer *time.Timer
currentDeadline time.Time
timerRead bool
}
var _ Session = &session{}
// newSession makes a new session
func newSession(conn connection, v protocol.VersionNumber, connectionID protocol.ConnectionID, sCfg *handshake.ServerConfig, closeCallback closeCallback, cryptoChangeCallback cryptoChangeCallback) (packetHandler, error) {
s := &session{
conn: conn,
connectionID: connectionID,
perspective: protocol.PerspectiveServer,
version: v,
closeCallback: closeCallback,
cryptoChangeCallback: cryptoChangeCallback,
connectionParameters: handshake.NewConnectionParamatersManager(protocol.PerspectiveServer, v),
}
s.setup()
cryptoStream, _ := s.GetOrOpenStream(1)
_, _ = s.AcceptStream() // don't expose the crypto stream
var sourceAddr []byte
if udpAddr, ok := conn.RemoteAddr().(*net.UDPAddr); ok {
sourceAddr = udpAddr.IP
} else {
sourceAddr = []byte(conn.RemoteAddr().String())
}
var err error
s.cryptoSetup, err = handshake.NewCryptoSetup(connectionID, sourceAddr, v, sCfg, cryptoStream, s.connectionParameters, s.aeadChanged)
if err != nil {
return nil, err
}
s.packer = newPacketPacker(connectionID, s.cryptoSetup, s.connectionParameters, s.streamFramer, s.perspective, s.version)
s.unpacker = &packetUnpacker{aead: s.cryptoSetup, version: s.version}
return s, err
}
func newClientSession(conn connection, hostname string, v protocol.VersionNumber, connectionID protocol.ConnectionID, tlsConfig *tls.Config, closeCallback closeCallback, cryptoChangeCallback cryptoChangeCallback, negotiatedVersions []protocol.VersionNumber) (*session, error) {
s := &session{
conn: conn,
connectionID: connectionID,
perspective: protocol.PerspectiveClient,
version: v,
closeCallback: closeCallback,
cryptoChangeCallback: cryptoChangeCallback,
connectionParameters: handshake.NewConnectionParamatersManager(protocol.PerspectiveClient, v),
}
s.receivedPacketHandler = ackhandler.NewReceivedPacketHandler(s.ackAlarmChanged)
s.setup()
cryptoStream, _ := s.OpenStream()
var err error
s.cryptoSetup, err = handshake.NewCryptoSetupClient(hostname, connectionID, v, cryptoStream, tlsConfig, s.connectionParameters, s.aeadChanged, negotiatedVersions)
if err != nil {
return nil, err
}
s.packer = newPacketPacker(connectionID, s.cryptoSetup, s.connectionParameters, s.streamFramer, s.perspective, s.version)
s.unpacker = &packetUnpacker{aead: s.cryptoSetup, version: s.version}
return s, err
}
// setup is called from newSession and newClientSession and initializes values that are independent of the perspective
func (s *session) setup() {
s.rttStats = &congestion.RTTStats{}
flowControlManager := flowcontrol.NewFlowControlManager(s.connectionParameters, s.rttStats)
var sentPacketHandler ackhandler.SentPacketHandler
sentPacketHandler = ackhandler.NewSentPacketHandler(s.rttStats)
now := time.Now()
s.sentPacketHandler = sentPacketHandler
s.flowControlManager = flowControlManager
s.receivedPacketHandler = ackhandler.NewReceivedPacketHandler(s.ackAlarmChanged)
s.receivedPackets = make(chan *receivedPacket, protocol.MaxSessionUnprocessedPackets)
s.closeChan = make(chan *qerr.QuicError, 1)
s.sendingScheduled = make(chan struct{}, 1)
s.undecryptablePackets = make([]*receivedPacket, 0, protocol.MaxUndecryptablePackets)
s.aeadChanged = make(chan struct{}, 1)
s.runClosed = make(chan struct{}, 1)
s.timer = time.NewTimer(0)
s.lastNetworkActivityTime = now
s.sessionCreationTime = now
s.streamsMap = newStreamsMap(s.newStream, s.perspective, s.connectionParameters)
s.streamFramer = newStreamFramer(s.streamsMap, s.flowControlManager)
}
// run the session main loop
func (s *session) run() {
// Start the crypto stream handler
go func() {
if err := s.cryptoSetup.HandleCryptoStream(); err != nil {
s.Close(err)
}
}()
runLoop:
for {
// Close immediately if requested
select {
case errForConnClose := <-s.closeChan:
if errForConnClose != nil {
s.sendConnectionClose(errForConnClose)
}
break runLoop
default:
}
s.maybeResetTimer()
var err error
select {
case errForConnClose := <-s.closeChan:
if errForConnClose != nil {
s.sendConnectionClose(errForConnClose)
}
break runLoop
case <-s.timer.C:
s.timerRead = true
// We do all the interesting stuff after the switch statement, so
// nothing to see here.
case <-s.sendingScheduled:
// We do all the interesting stuff after the switch statement, so
// nothing to see here.
case p := <-s.receivedPackets:
err = s.handlePacketImpl(p)
if qErr, ok := err.(*qerr.QuicError); ok && qErr.ErrorCode == qerr.DecryptionFailure {
s.tryQueueingUndecryptablePacket(p)
continue
}
// This is a bit unclean, but works properly, since the packet always
// begins with the public header and we never copy it.
putPacketBuffer(p.publicHeader.Raw)
case <-s.aeadChanged:
s.tryDecryptingQueuedPackets()
s.cryptoChangeCallback(s, s.cryptoSetup.HandshakeComplete())
}
if err != nil {
s.close(err)
}
if err := s.sendPacket(); err != nil {
s.close(err)
}
if time.Now().Sub(s.lastNetworkActivityTime) >= s.idleTimeout() {
s.close(qerr.Error(qerr.NetworkIdleTimeout, "No recent network activity."))
}
if !s.cryptoSetup.HandshakeComplete() && time.Now().Sub(s.sessionCreationTime) >= protocol.MaxTimeForCryptoHandshake {
s.close(qerr.Error(qerr.NetworkIdleTimeout, "Crypto handshake did not complete in time."))
}
s.garbageCollectStreams()
}
s.closeCallback(s.connectionID)
s.runClosed <- struct{}{}
}
func (s *session) maybeResetTimer() {
nextDeadline := s.lastNetworkActivityTime.Add(s.idleTimeout())
if !s.nextAckScheduledTime.IsZero() {
nextDeadline = utils.MinTime(nextDeadline, s.nextAckScheduledTime)
}
if rtoTime := s.sentPacketHandler.TimeOfFirstRTO(); !rtoTime.IsZero() {
nextDeadline = utils.MinTime(nextDeadline, rtoTime)
}
if !s.cryptoSetup.HandshakeComplete() {
handshakeDeadline := s.sessionCreationTime.Add(protocol.MaxTimeForCryptoHandshake)
nextDeadline = utils.MinTime(nextDeadline, handshakeDeadline)
}
if nextDeadline.Equal(s.currentDeadline) {
// No need to reset the timer
return
}
// We need to drain the timer if the value from its channel was not read yet.
// See https://groups.google.com/forum/#!topic/golang-dev/c9UUfASVPoU
if !s.timer.Stop() && !s.timerRead {
<-s.timer.C
}
s.timer.Reset(nextDeadline.Sub(time.Now()))
s.timerRead = false
s.currentDeadline = nextDeadline
}
func (s *session) idleTimeout() time.Duration {
if s.cryptoSetup.HandshakeComplete() {
return s.connectionParameters.GetIdleConnectionStateLifetime()
}
return protocol.InitialIdleTimeout
}
func (s *session) handlePacketImpl(p *receivedPacket) error {
if s.perspective == protocol.PerspectiveClient {
diversificationNonce := p.publicHeader.DiversificationNonce
if len(diversificationNonce) > 0 {
s.cryptoSetup.SetDiversificationNonce(diversificationNonce)
}
}
if p.rcvTime.IsZero() {
// To simplify testing
p.rcvTime = time.Now()
}
s.lastNetworkActivityTime = p.rcvTime
hdr := p.publicHeader
data := p.data
// Calculate packet number
hdr.PacketNumber = protocol.InferPacketNumber(
hdr.PacketNumberLen,
s.largestRcvdPacketNumber,
hdr.PacketNumber,
)
if utils.Debug() {
utils.Debugf("<- Reading packet 0x%x (%d bytes) for connection %x @ %s", hdr.PacketNumber, len(data)+len(hdr.Raw), hdr.ConnectionID, time.Now().Format("15:04:05.000"))
}
packet, err := s.unpacker.Unpack(hdr.Raw, hdr, data)
// if the decryption failed, this might be a packet sent by an attacker
// don't update the remote address
if quicErr, ok := err.(*qerr.QuicError); ok && quicErr.ErrorCode == qerr.DecryptionFailure {
return err
}
if s.perspective == protocol.PerspectiveServer {
// update the remote address, even if unpacking failed for any other reason than a decryption error
s.conn.SetCurrentRemoteAddr(p.remoteAddr)
}
if err != nil {
return err
}
s.lastRcvdPacketNumber = hdr.PacketNumber
// Only do this after decrypting, so we are sure the packet is not attacker-controlled
s.largestRcvdPacketNumber = utils.MaxPacketNumber(s.largestRcvdPacketNumber, hdr.PacketNumber)
err = s.receivedPacketHandler.ReceivedPacket(hdr.PacketNumber, packet.IsRetransmittable())
// ignore duplicate packets
if err == ackhandler.ErrDuplicatePacket {
utils.Infof("Ignoring packet 0x%x due to ErrDuplicatePacket", hdr.PacketNumber)
return nil
}
// ignore packets with packet numbers smaller than the LeastUnacked of a StopWaiting
if err == ackhandler.ErrPacketSmallerThanLastStopWaiting {
utils.Infof("Ignoring packet 0x%x due to ErrPacketSmallerThanLastStopWaiting", hdr.PacketNumber)
return nil
}
if err != nil {
return err
}
return s.handleFrames(packet.frames)
}
func (s *session) handleFrames(fs []frames.Frame) error {
for _, ff := range fs {
var err error
frames.LogFrame(ff, false)
switch frame := ff.(type) {
case *frames.StreamFrame:
err = s.handleStreamFrame(frame)
case *frames.AckFrame:
err = s.handleAckFrame(frame)
case *frames.ConnectionCloseFrame:
s.closeImpl(qerr.Error(frame.ErrorCode, frame.ReasonPhrase), true)
case *frames.GoawayFrame:
err = errors.New("unimplemented: handling GOAWAY frames")
case *frames.StopWaitingFrame:
err = s.receivedPacketHandler.ReceivedStopWaiting(frame)
case *frames.RstStreamFrame:
err = s.handleRstStreamFrame(frame)
case *frames.WindowUpdateFrame:
err = s.handleWindowUpdateFrame(frame)
case *frames.BlockedFrame:
case *frames.PingFrame:
default:
return errors.New("Session BUG: unexpected frame type")
}
if err != nil {
switch err {
case ackhandler.ErrDuplicateOrOutOfOrderAck:
// Can happen e.g. when packets thought missing arrive late
case errRstStreamOnInvalidStream:
// Can happen when RST_STREAMs arrive early or late (?)
utils.Errorf("Ignoring error in session: %s", err.Error())
case errWindowUpdateOnClosedStream:
// Can happen when we already sent the last StreamFrame with the FinBit, but the client already sent a WindowUpdate for this Stream
default:
return err
}
}
}
return nil
}
// handlePacket is called by the server with a new packet
func (s *session) handlePacket(p *receivedPacket) {
// Discard packets once the amount of queued packets is larger than
// the channel size, protocol.MaxSessionUnprocessedPackets
select {
case s.receivedPackets <- p:
default:
}
}
func (s *session) handleStreamFrame(frame *frames.StreamFrame) error {
str, err := s.streamsMap.GetOrOpenStream(frame.StreamID)
if err != nil {
return err
}
if str == nil {
// Stream is closed and already garbage collected
// ignore this StreamFrame
return nil
}
err = str.AddStreamFrame(frame)
if err != nil {
return err
}
return nil
}
func (s *session) handleWindowUpdateFrame(frame *frames.WindowUpdateFrame) error {
if frame.StreamID != 0 {
str, err := s.streamsMap.GetOrOpenStream(frame.StreamID)
if err != nil {
return err
}
if str == nil {
return errWindowUpdateOnClosedStream
}
}
_, err := s.flowControlManager.UpdateWindow(frame.StreamID, frame.ByteOffset)
return err
}
func (s *session) handleRstStreamFrame(frame *frames.RstStreamFrame) error {
str, err := s.streamsMap.GetOrOpenStream(frame.StreamID)
if err != nil {
return err
}
if str == nil {
return errRstStreamOnInvalidStream
}
str.RegisterRemoteError(fmt.Errorf("RST_STREAM received with code %d", frame.ErrorCode))
return s.flowControlManager.ResetStream(frame.StreamID, frame.ByteOffset)
}
func (s *session) handleAckFrame(frame *frames.AckFrame) error {
if err := s.sentPacketHandler.ReceivedAck(frame, s.lastRcvdPacketNumber, s.lastNetworkActivityTime); err != nil {
return err
}
return nil
}
// Close the connection. If err is nil it will be set to qerr.PeerGoingAway.
// It waits until the run loop has stopped before returning
func (s *session) Close(e error) error {
err := s.closeImpl(e, false)
if err == errSessionAlreadyClosed {
return nil
}
// wait for the run loop to finish
<-s.runClosed
return err
}
// close the connection. Use this when called from the run loop
func (s *session) close(e error) error {
err := s.closeImpl(e, false)
if err == errSessionAlreadyClosed {
return nil
}
return err
}
func (s *session) closeImpl(e error, remoteClose bool) error {
// Only close once
if !atomic.CompareAndSwapUint32(&s.closed, 0, 1) {
return errSessionAlreadyClosed
}
if e == errCloseSessionForNewVersion {
s.streamsMap.CloseWithError(e)
s.closeStreamsWithError(e)
// when the run loop exits, it will call the closeCallback
// replace it with an noop function to make sure this doesn't have any effect
s.closeCallback = func(protocol.ConnectionID) {}
s.closeChan <- nil
return nil
}
if e == nil {
e = qerr.PeerGoingAway
}
quicErr := qerr.ToQuicError(e)
// Don't log 'normal' reasons
if quicErr.ErrorCode == qerr.PeerGoingAway || quicErr.ErrorCode == qerr.NetworkIdleTimeout {
utils.Infof("Closing connection %x", s.connectionID)
} else {
utils.Errorf("Closing session with error: %s", e.Error())
}
s.streamsMap.CloseWithError(quicErr)
s.closeStreamsWithError(quicErr)
if remoteClose {
// If this is a remote close we don't need to send a CONNECTION_CLOSE
s.closeChan <- nil
return nil
}
if quicErr.ErrorCode == qerr.DecryptionFailure {
// If we send a public reset, don't send a CONNECTION_CLOSE
s.closeChan <- nil
return s.sendPublicReset(s.lastRcvdPacketNumber)
}
s.closeChan <- quicErr
return nil
}
func (s *session) closeStreamsWithError(err error) {
s.streamsMap.Iterate(func(str *stream) (bool, error) {
str.Cancel(err)
return true, nil
})
}
func (s *session) sendPacket() error {
// Repeatedly try sending until we don't have any more data, or run out of the congestion window
for {
err := s.sentPacketHandler.CheckForError()
if err != nil {
return err
}
// Do this before checking the congestion, since we might de-congestionize here :)
s.sentPacketHandler.MaybeQueueRTOs()
if !s.sentPacketHandler.SendingAllowed() {
return nil
}
var controlFrames []frames.Frame
// get WindowUpdate frames
// this call triggers the flow controller to increase the flow control windows, if necessary
windowUpdateFrames, err := s.getWindowUpdateFrames()
if err != nil {
return err
}
for _, wuf := range windowUpdateFrames {
controlFrames = append(controlFrames, wuf)
}
// check for retransmissions first
for {
retransmitPacket := s.sentPacketHandler.DequeuePacketForRetransmission()
if retransmitPacket == nil {
break
}
utils.Debugf("\tDequeueing retransmission for packet 0x%x", retransmitPacket.PacketNumber)
// resend the frames that were in the packet
for _, frame := range retransmitPacket.GetFramesForRetransmission() {
switch frame.(type) {
case *frames.StreamFrame:
s.streamFramer.AddFrameForRetransmission(frame.(*frames.StreamFrame))
case *frames.WindowUpdateFrame:
// only retransmit WindowUpdates if the stream is not yet closed and the we haven't sent another WindowUpdate with a higher ByteOffset for the stream
var currentOffset protocol.ByteCount
f := frame.(*frames.WindowUpdateFrame)
currentOffset, err = s.flowControlManager.GetReceiveWindow(f.StreamID)
if err == nil && f.ByteOffset >= currentOffset {
controlFrames = append(controlFrames, frame)
}
default:
controlFrames = append(controlFrames, frame)
}
}
}
ack := s.receivedPacketHandler.GetAckFrame()
if ack != nil {
controlFrames = append(controlFrames, ack)
}
hasRetransmission := s.streamFramer.HasFramesForRetransmission()
var stopWaitingFrame *frames.StopWaitingFrame
if ack != nil || hasRetransmission {
stopWaitingFrame = s.sentPacketHandler.GetStopWaitingFrame(hasRetransmission)
}
packet, err := s.packer.PackPacket(stopWaitingFrame, controlFrames, s.sentPacketHandler.GetLeastUnacked())
if err != nil {
return err
}
if packet == nil {
return nil
}
// send every window update twice
for _, f := range windowUpdateFrames {
s.packer.QueueControlFrameForNextPacket(f)
}
err = s.sentPacketHandler.SentPacket(&ackhandler.Packet{
PacketNumber: packet.number,
Frames: packet.frames,
Length: protocol.ByteCount(len(packet.raw)),
})
if err != nil {
return err
}
s.logPacket(packet)
err = s.conn.Write(packet.raw)
putPacketBuffer(packet.raw)
if err != nil {
return err
}
s.nextAckScheduledTime = time.Time{}
}
}
func (s *session) sendConnectionClose(quicErr *qerr.QuicError) error {
packet, err := s.packer.PackConnectionClose(&frames.ConnectionCloseFrame{ErrorCode: quicErr.ErrorCode, ReasonPhrase: quicErr.ErrorMessage}, s.sentPacketHandler.GetLeastUnacked())
if err != nil {
return err
}
if packet == nil {
return errors.New("Session BUG: expected packet not to be nil")
}
s.logPacket(packet)
return s.conn.Write(packet.raw)
}
func (s *session) logPacket(packet *packedPacket) {
if !utils.Debug() {
// We don't need to allocate the slices for calling the format functions
return
}
if utils.Debug() {
utils.Debugf("-> Sending packet 0x%x (%d bytes) @ %s", packet.number, len(packet.raw), time.Now().Format("15:04:05.000"))
for _, frame := range packet.frames {
frames.LogFrame(frame, true)
}
}
}
// GetOrOpenStream either returns an existing stream, a newly opened stream, or nil if a stream with the provided ID is already closed.
// Newly opened streams should only originate from the client. To open a stream from the server, OpenStream should be used.
func (s *session) GetOrOpenStream(id protocol.StreamID) (Stream, error) {
str, err := s.streamsMap.GetOrOpenStream(id)
if str != nil {
return str, err
}
// make sure to return an actual nil value here, not an Stream with value nil
return nil, err
}
// AcceptStream returns the next stream openend by the peer
func (s *session) AcceptStream() (Stream, error) {
return s.streamsMap.AcceptStream()
}
// OpenStream opens a stream
func (s *session) OpenStream() (Stream, error) {
return s.streamsMap.OpenStream()
}
func (s *session) OpenStreamSync() (Stream, error) {
return s.streamsMap.OpenStreamSync()
}
func (s *session) queueResetStreamFrame(id protocol.StreamID, offset protocol.ByteCount) {
s.packer.QueueControlFrameForNextPacket(&frames.RstStreamFrame{
StreamID: id,
ByteOffset: offset,
})
s.scheduleSending()
}
func (s *session) newStream(id protocol.StreamID) (*stream, error) {
stream, err := newStream(id, s.scheduleSending, s.queueResetStreamFrame, s.flowControlManager)
if err != nil {
return nil, err
}
// TODO: find a better solution for determining which streams contribute to connection level flow control
if id == 1 || id == 3 {
s.flowControlManager.NewStream(id, false)
} else {
s.flowControlManager.NewStream(id, true)
}
return stream, nil
}
// garbageCollectStreams goes through all streams and removes EOF'ed streams
// from the streams map.
func (s *session) garbageCollectStreams() {
s.streamsMap.Iterate(func(str *stream) (bool, error) {
id := str.StreamID()
if str.finished() {
err := s.streamsMap.RemoveStream(id)
if err != nil {
return false, err
}
s.flowControlManager.RemoveStream(id)
}
return true, nil
})
}
func (s *session) sendPublicReset(rejectedPacketNumber protocol.PacketNumber) error {
utils.Infof("Sending public reset for connection %x, packet number %d", s.connectionID, rejectedPacketNumber)
return s.conn.Write(writePublicReset(s.connectionID, rejectedPacketNumber, 0))
}
// scheduleSending signals that we have data for sending
func (s *session) scheduleSending() {
select {
case s.sendingScheduled <- struct{}{}:
default:
}
}
func (s *session) tryQueueingUndecryptablePacket(p *receivedPacket) {
if s.cryptoSetup.HandshakeComplete() {
return
}
utils.Infof("Queueing packet 0x%x for later decryption", p.publicHeader.PacketNumber)
if len(s.undecryptablePackets)+1 >= protocol.MaxUndecryptablePackets {
s.close(qerr.Error(qerr.DecryptionFailure, "too many undecryptable packets received"))
}
s.undecryptablePackets = append(s.undecryptablePackets, p)
}
func (s *session) tryDecryptingQueuedPackets() {
for _, p := range s.undecryptablePackets {
s.handlePacket(p)
}
s.undecryptablePackets = s.undecryptablePackets[:0]
}
func (s *session) getWindowUpdateFrames() ([]*frames.WindowUpdateFrame, error) {
updates := s.flowControlManager.GetWindowUpdates()
res := make([]*frames.WindowUpdateFrame, len(updates))
for i, u := range updates {
res[i] = &frames.WindowUpdateFrame{StreamID: u.StreamID, ByteOffset: u.Offset}
}
return res, nil
}
func (s *session) ackAlarmChanged(t time.Time) {
s.nextAckScheduledTime = t
s.maybeResetTimer()
}
// RemoteAddr returns the net.Addr of the client
func (s *session) RemoteAddr() net.Addr {
return s.conn.RemoteAddr()
}
| 1 | 5,696 | I just realized that we are logging time now - we shouldn't do that here, that's the logger's job. I've opened a new issue for that #444. | lucas-clemente-quic-go | go |
@@ -44,13 +44,13 @@ const char *MolHolderBaseDoc =
"The API is quite simple: \n"
" AddMol(mol) -> adds a molecule to the molecule holder, returns index of "
"molecule\n"
- " GetMol(idx) -> return the molecule at index idx\n";
+ " GetMol(idx,sanitize=True) -> return the molecule at index idx\n";
const char *MolHolderDoc =
"Holds raw in-memory molecules\n"
" AddMol(mol) -> adds a molecule to the molecule holder, returns index of "
"molecule\n"
- " GetMol(idx) -> return the molecule at index idx\n";
+ " GetMol(idx,sanitize=True) -> return the molecule at index idx\n";
const char *CachedMolHolderDoc =
"Holds molecules in their binary representation.\n" | 1 | // Copyright (c) 2017-2019, Novartis Institutes for BioMedical Research Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Novartis Institutes for BioMedical Research Inc.
// nor the names of its contributors may be used to endorse or promote
// products derived from this software without specific prior written
// permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
#include <RDBoost/python.h>
#include <RDBoost/Wrap.h>
#include <GraphMol/RDKitBase.h>
#include <GraphMol/SubstructLibrary/SubstructLibrary.h>
namespace python = boost::python;
namespace RDKit {
const char *MolHolderBaseDoc =
"Base class for holding molecules used in the Substructure Library.\n"
"Instantiations of this class are passed into the SubstructureLibrary.\n"
"The API is quite simple: \n"
" AddMol(mol) -> adds a molecule to the molecule holder, returns index of "
"molecule\n"
" GetMol(idx) -> return the molecule at index idx\n";
const char *MolHolderDoc =
"Holds raw in-memory molecules\n"
" AddMol(mol) -> adds a molecule to the molecule holder, returns index of "
"molecule\n"
" GetMol(idx) -> return the molecule at index idx\n";
const char *CachedMolHolderDoc =
"Holds molecules in their binary representation.\n"
"This allows more molecules to be held in memory at a time\n"
" AddMol(mol) -> adds a molecule to the molecule holder, returns index of "
"molecule\n"
" AddBinary(data) -> adds a picked molecule molecule to the molecule "
"holder, returns index of molecule\n"
" The data is stored as-is, no checking is done for "
"validity.\n"
" GetMol(idx) -> return the molecule at index idx\n";
const char *CachedSmilesMolHolderDoc =
"Holds molecules as smiles string\n"
"This allows more molecules to be held in memory at a time\n"
" AddMol(mol) -> adds a molecule to the molecule holder, returns index of "
"molecule\n"
" AddSmiles(smiles) -> adds a smiles string to the molecule holder, "
"returns index of molecule\n"
" The smiles is stored as-is, no checking is done "
"for validity.\n"
" GetMol(idx) -> return the molecule at index idx\n";
const char *CachedTrustedSmilesMolHolderDoc =
"Holds molecules as trusted smiles string\n"
"This allows more molecules to be held in memory at a time and avoids "
"RDKit sanitization\n"
"overhead\n"
"See: "
"http://rdkit.blogspot.com/2016/09/avoiding-unnecessary-work-and.html\n"
" AddMol(mol) -> adds a molecule to the molecule holder, returns index of "
"molecule\n"
" AddSmiles(smiles) -> adds a smiles string to the molecule holder, "
"returns index of molecule\n"
" The smiles is stored as-is, no checking is done "
"for validity.\n"
" GetMol(idx) -> return the molecule at index idx\n";
const char *PatternHolderDoc =
"Holds fingerprints used for filtering of molecules.";
const char *SubstructLibraryDoc =
"SubstructLibrary: This provides a simple API for substructure searching "
"large datasets\n"
"The SubstructLibrary takes full advantage of available threads during the "
"search operation.\n"
"Basic operation is simple\n"
">>> from __future__ import print_function\n"
">>> import os\n"
">>> from rdkit import Chem, RDConfig\n"
">>> from rdkit.Chem import rdSubstructLibrary\n"
">>> library = rdSubstructLibrary.SubstructLibrary()\n"
">>> for mol in Chem.SDMolSupplier(os.path.join(RDConfig.RDDataDir, \n"
"... 'NCI', 'first_200.props.sdf')):\n"
"... idx = library.AddMol(mol)\n"
">>> core = Chem.MolFromSmarts('CCCCOC')\n"
">>> indices = library.GetMatches(core)\n"
">>> len(indices)\n"
"11\n"
"\n"
"\n"
"Substructure matching options can be sent into GetMatches:\n"
">>> indices = library.GetMatches(core, useChirality=False) \n"
">>> len(indices)\n"
"11\n"
"\n"
"Controlling the number of threads or the maximum number of matches "
"returned:\n"
"is also available (the default is to run on all cores)\n"
">>> indices = library.GetMatches(core, numThreads=2, maxResults=10) \n"
">>> len(indices)\n"
"10\n"
"\n"
"Working on larger datasets:\n"
"\n"
"Molecules are fairly large objects and will limit the number that can be "
"kept in memory.\n"
"To assist this we supply three other molecule holders:\n"
" CachedMolHolder - stores molecules as their pickled representation\n"
" CachedSmilesMolHolder - stores molecules internally as smiles strings\n"
" CachedTrustedSmilesMolHolder = excepts (and stores) molecules as "
"trusted smiles strings\n"
"\n"
"Using Pattern fingerprints as a pre-filter:"
"\n"
"Pattern fingerprints provide an easy way to indicate whether the "
"substructure search should be\n"
"be done at all. This is particulary useful with the Binary and Smiles "
"based molecule holders\n"
"as they have an expensive molecule creation step in addition to the "
"substructure searching step\n "
"\n"
">>> library = "
"rdSubstructLibrary.SubstructLibrary(rdSubstructLibrary."
"CachedSmilesMolHolder(), \n"
"... "
"rdSubstructLibrary.PatternHolder())\n"
">>> for mol in Chem.SDMolSupplier(os.path.join(RDConfig.RDDataDir, \n"
"... 'NCI', 'first_200.props.sdf')):\n"
"... idx = library.AddMol(mol)\n"
">>> indices = library.GetMatches(core)\n"
">>> len(indices)\n"
"11\n"
"\n"
"This (obviously) takes longer to initialize. However, both the molecule "
"and pattern\n"
"holders can be populated with raw data, a simple example is below:\n"
">>> import csv\n"
">>> molholder = rdSubstructLibrary.CachedSmilesMolHolder()\n"
">>> pattern_holder = rdSubstructLibrary.PatternHolder()\n"
">>> for i, row in "
"enumerate(csv.reader(open(os.path.join(RDConfig.RDDataDir, \n"
"... 'NCI', 'first_200.tpsa.csv')))):\n"
"... if i:\n"
"... idx = molholder.AddSmiles(row[0])\n"
"... idx2 = pattern_holder.AddFingerprint(\n"
"... pattern_holder.MakeFingerprint(Chem.MolFromSmiles(row[0])))\n"
"... assert idx==idx2\n"
">>> library = "
"rdSubstructLibrary.SubstructLibrary(molholder,pattern_holder)\n"
">>> indices = library.GetMatches(core)\n"
">>> len(indices)\n"
"11\n"
"";
python::object SubstructLibrary_Serialize(const SubstructLibrary &cat) {
std::string res = cat.Serialize();
python::object retval = python::object(
python::handle<>(PyBytes_FromStringAndSize(res.c_str(), res.length())));
return retval;
}
struct substructlibrary_pickle_suite : python::pickle_suite {
static python::tuple getinitargs(const SubstructLibrary &self) {
std::string res;
if (!SubstructLibraryCanSerialize()) {
throw_runtime_error("Pickling of FilterCatalog instances is not enabled");
}
res = self.Serialize();
return python::make_tuple(python::object(python::handle<>(
PyBytes_FromStringAndSize(res.c_str(), res.length()))));
};
};
struct substructlibrary_wrapper {
static void wrap() {
// n.b. there can only be one of these in all wrappings
// python::class_<std::vector<unsigned int> >("UIntVect").def(
// python::vector_indexing_suite<std::vector<unsigned int>, true>());
python::class_<MolHolderBase, boost::noncopyable>("MolHolderBase", "",
python::no_init)
.def("AddMol", &MolHolderBase::addMol,
"Adds molecle to the molecule holder")
.def("GetMol", &MolHolderBase::getMol,
"Returns a particular molecule in the molecule holder\n\n"
" ARGUMENTS:\n"
" - idx: which molecule to return\n\n"
" NOTE: molecule indices start at 0\n")
.def("__len__", &MolHolderBase::size);
python::class_<MolHolder, boost::shared_ptr<MolHolder>,
python::bases<MolHolderBase>>("MolHolder", MolHolderDoc,
python::init<>());
python::class_<CachedMolHolder, boost::shared_ptr<CachedMolHolder>,
python::bases<MolHolderBase>>(
"CachedMolHolder", CachedMolHolderDoc, python::init<>())
.def("AddBinary", &CachedMolHolder::addBinary, (python::args("pickle")),
"Add a binary pickle to the molecule holder, no checking is done "
"on the input data");
python::class_<CachedSmilesMolHolder,
boost::shared_ptr<CachedSmilesMolHolder>,
python::bases<MolHolderBase>>(
"CachedSmilesMolHolder", CachedSmilesMolHolderDoc, python::init<>())
.def("AddSmiles", &CachedSmilesMolHolder::addSmiles,
(python::args("smiles")),
"Add a trusted smiles string to the molecule holder, no checking "
"is done on the input data");
python::class_<CachedTrustedSmilesMolHolder,
boost::shared_ptr<CachedTrustedSmilesMolHolder>,
python::bases<MolHolderBase>>(
"CachedTrustedSmilesMolHolder", CachedTrustedSmilesMolHolderDoc,
python::init<>())
.def("AddSmiles", &CachedTrustedSmilesMolHolder::addSmiles,
(python::args("smiles")),
"Add a trusted smiles string to the molecule holder, no checking "
"is done on the input data");
python::class_<FPHolderBase, boost::shared_ptr<FPHolderBase>,
boost::noncopyable>("FPHolderBase", "", python::no_init)
.def("AddMol", &FPHolderBase::addMol,
"Adds a molecule to the fingerprint database, returns the index "
"of the new pattern")
.def("AddFingerprint", &FPHolderBase::addFingerprint,
"Adds a raw bit vector to the fingerprint database, returns the "
"index of the supplied pattern")
.def("GetFingerprint", &FPHolderBase::getFingerprint,
python::return_value_policy<python::reference_existing_object>(),
"Return the bit vector at the specified index")
.def("PassesFilter", &FPHolderBase::passesFilter,
(python::args("idx"), python::args("query")),
"Returns True if the specified index passes the filter supplied "
"by the query bit vector")
.def("MakeFingerprint", &FPHolderBase::makeFingerprint,
(python::arg("mol")),
python::return_value_policy<python::manage_new_object>(),
"Compute the query bits for the holder");
python::class_<PatternHolder, boost::shared_ptr<PatternHolder>,
python::bases<FPHolderBase>>(
"PatternHolder", PatternHolderDoc, python::init<>());
python::class_<SubstructLibrary, SubstructLibrary *,
const SubstructLibrary *>(
"SubstructLibrary", SubstructLibraryDoc, python::init<>())
.def(python::init<boost::shared_ptr<MolHolderBase>>())
.def(python::init<boost::shared_ptr<MolHolderBase>,
boost::shared_ptr<FPHolderBase>>())
.def(python::init<std::string>())
.def("AddMol", &SubstructLibrary::addMol, (python::arg("mol")),
"Adds a molecule to the substruct library")
.def("GetMatches", (std::vector<unsigned int>(SubstructLibrary::*)(
const ROMol &, bool, bool, bool, int, int)) &
SubstructLibrary::getMatches,
(python::arg("query"), python::arg("recursionPossible") = true,
python::arg("useChirality") = true,
python::arg("useQueryQueryMatches") = false,
python::arg("numThreads") = -1, python::arg("maxResults") = 1000),
"Get the matches for the query.\n\n"
" Arguments:\n"
" - query: substructure query\n"
" - numThreads: number of threads to use, -1 means all threads\n"
" - maxResults: maximum number of results to return")
.def("GetMatches", (std::vector<unsigned int>(SubstructLibrary::*)(
const ROMol &, unsigned int, unsigned int, bool,
bool, bool, int, int)) &
SubstructLibrary::getMatches,
(python::arg("query"), python::arg("startIdx"),
python::arg("endIdx"), python::arg("recursionPossible") = true,
python::arg("useChirality") = true,
python::arg("useQueryQueryMatches") = false,
python::arg("numThreads") = -1, python::arg("maxResults") = 1000),
"Get the matches for the query.\n\n"
" Arguments:\n"
" - query: substructure query\n"
" - startIdx: index to search from\n"
" - endIdx: index (non-inclusize) to search to\n"
" - numThreads: number of threads to use, -1 means all threads\n"
" - maxResults: maximum number of results to return")
.def("CountMatches", (unsigned int (SubstructLibrary::*)(
const ROMol &, bool, bool, bool, int)) &
SubstructLibrary::countMatches,
(python::arg("query"), python::arg("recursionPossible") = true,
python::arg("useChirality") = true,
python::arg("useQueryQueryMatches") = false,
python::arg("numThreads") = -1, python::arg("maxResults") = 1000),
"Get the matches for the query.\n\n"
" Arguments:\n"
" - query: substructure query\n"
" - numThreads: number of threads to use, -1 means all threads\n")
.def("CountMatches", (unsigned int (SubstructLibrary::*)(
const ROMol &, unsigned int, unsigned int,
bool, bool, bool, int)) &
SubstructLibrary::countMatches,
(python::arg("query"), python::arg("startIdx"),
python::arg("endIdx"), python::arg("recursionPossible") = true,
python::arg("useChirality") = true,
python::arg("useQueryQueryMatches") = false,
python::arg("numThreads") = -1),
"Get the matches for the query.\n\n"
" Arguments:\n"
" - query: substructure query\n"
" - startIdx: index to search from\n"
" - endIdx: index (non-inclusize) to search to\n"
" - numThreads: number of threads to use, -1 means all threads\n")
.def("HasMatch", (bool (SubstructLibrary::*)(const ROMol &, bool, bool,
bool, int)) &
SubstructLibrary::hasMatch,
(python::arg("query"), python::arg("recursionPossible") = true,
python::arg("useChirality") = true,
python::arg("useQueryQueryMatches") = false,
python::arg("numThreads") = -1),
"Get the matches for the query.\n\n"
" Arguments:\n"
" - query: substructure query\n"
" - numThreads: number of threads to use, -1 means all threads\n")
.def("HasMatch",
(bool (SubstructLibrary::*)(const ROMol &, unsigned int,
unsigned int, bool, bool, bool, int)) &
SubstructLibrary::hasMatch,
(python::arg("query"), python::arg("startIdx"),
python::arg("endIdx"), python::arg("recursionPossible") = true,
python::arg("useChirality") = true,
python::arg("useQueryQueryMatches") = false,
python::arg("numThreads") = -1),
"Get the matches for the query.\n\n"
" Arguments:\n"
" - query: substructure query\n"
" - startIdx: index to search from\n"
" - endIdx: index (non-inclusize) to search to\n"
" - numThreads: number of threads to use, -1 means all threads\n")
.def("GetMol", &SubstructLibrary::getMol,
"Returns a particular molecule in the molecule holder\n\n"
" ARGUMENTS:\n"
" - idx: which molecule to return\n\n"
" NOTE: molecule indices start at 0\n")
.def("__len__", &SubstructLibrary::size)
.def("Serialize", &SubstructLibrary_Serialize)
// enable pickle support
.def_pickle(substructlibrary_pickle_suite())
;
python::def("SubstructLibraryCanSerialize", SubstructLibraryCanSerialize,
"Returns True if the SubstructLibrary is serializable "
"(requires boost serialization");
}
};
}
void wrap_substructlibrary() { RDKit::substructlibrary_wrapper::wrap(); }
| 1 | 20,104 | why is the `sanitize=True` here in the docs? | rdkit-rdkit | cpp |
@@ -9,6 +9,9 @@ class Approval < ActiveRecord::Base
end
belongs_to :proposal
+ belongs_to :user
+ has_many :delegations, through: :user, source: :outgoing_delegates
+ has_many :delegates, through: :delegations, source: :assignee
acts_as_list scope: :proposal
belongs_to :parent, class_name: 'Approval' | 1 | class Approval < ActiveRecord::Base
include WorkflowModel
has_paper_trail
workflow do # overwritten in child classes
state :pending
state :actionable
state :approved
end
belongs_to :proposal
acts_as_list scope: :proposal
belongs_to :parent, class_name: 'Approval'
has_many :child_approvals, class_name: 'Approval', foreign_key: 'parent_id'
scope :individual, -> { where(type: 'Approvals::Individual') }
self.statuses.each do |status|
scope status, -> { where(status: status) }
end
default_scope { order('position ASC') }
def notify_parent_approved
if self.parent
self.parent.child_approved!(self)
else
self.proposal.partial_approve!
end
end
def children_approved?
self.child_approvals.where.not(status: "approved").empty?
end
end
| 1 | 13,749 | Didn't end up using this, but I think it's useful anyway. | 18F-C2 | rb |
@@ -121,7 +121,16 @@ namespace Microsoft.DotNet.Build.Tasks
// Every test project comes with 4 of them, so not producing a warning here.
else if (!string.IsNullOrEmpty(relativePath))
{
- copyCommands.AppendLine($"call :copyandcheck \"%PACKAGE_DIR%\\{relativePath}\" \"%EXECUTION_DIR%\\{Path.GetFileName(relativePath)}\" || exit /b -1");
+ bool? preserveSubDirectories = dependency.GetMetadata("PreserveSubDirectories")?.Equals("true", StringComparison.OrdinalIgnoreCase);
+ string filePath = Path.GetFileName(relativePath);
+ if (preserveSubDirectories == true)
+ {
+ //This is to remove the first two directories on the path which will always be the dependency followed by the version used
+ int indexOfSubDirectories = relativePath.IndexOf("\\", relativePath.IndexOf("\\")+1);
+ filePath = relativePath.Substring(indexOfSubDirectories);
+ copyCommands.AppendLine($"call :makedir \"%EXECUTION_DIR%{Path.GetDirectoryName(filePath)}\" || exit /b -1");
+ }
+ copyCommands.AppendLine($"call :copyandcheck \"%PACKAGE_DIR%\\{relativePath}\" \"%EXECUTION_DIR%\\{filePath}\" || exit /b -1");
}
}
cmdExecutionTemplate = cmdExecutionTemplate.Replace("[[CopyFilesCommands]]", copyCommands.ToString()); | 1 | using Microsoft.Build.Framework;
using Microsoft.Build.Utilities;
using System;
using System.IO;
using System.Text;
namespace Microsoft.DotNet.Build.Tasks
{
public class GenerateTestExecutionScripts : Task
{
[Required]
public string[] TestCommands { get; set; }
[Required]
public ITaskItem[] TestDependencies { get; set; }
[Required]
public string RunnerScriptTemplate { get; set; }
[Required]
public string ScriptOutputPath { get; set; }
public override bool Execute()
{
if (TestCommands.Length == 0)
{
throw new InvalidOperationException("Please provide at least one test command To execute via the TestCommands property.");
}
if (!File.Exists(RunnerScriptTemplate))
{
throw new FileNotFoundException($"Runner script template {RunnerScriptTemplate} was not found.");
}
string executionScriptTemplate = File.ReadAllText(RunnerScriptTemplate);
Directory.CreateDirectory(Path.GetDirectoryName(ScriptOutputPath));
Log.LogMessage($"Test Command lines = {string.Join(Environment.NewLine, TestCommands)}");
string extension = Path.GetExtension(Path.GetFileName(ScriptOutputPath)).ToLowerInvariant();
switch (extension)
{
case ".sh":
WriteShExecutionScript(executionScriptTemplate, ScriptOutputPath);
break;
case ".cmd":
case ".bat":
WriteCmdExecutionScript(executionScriptTemplate, ScriptOutputPath);
break;
default:
throw new System.NotSupportedException($"Generating runner scripts with extension '{extension}' is not yet supported");
}
return true;
}
private void WriteShExecutionScript(string shExecutionTemplate, string outputPath)
{
// Build up the copy commands...
StringBuilder copyCommands = new StringBuilder();
foreach (ITaskItem dependency in TestDependencies)
{
string relativePath = dependency.GetMetadata("PackageRelativePath");
bool? useAbsolutePath = dependency.GetMetadata("UseAbsolutePath")?.Equals("true", StringComparison.OrdinalIgnoreCase);
if (useAbsolutePath == true)
{
string filePath = dependency.GetMetadata("SourcePath");
string fileName = Path.GetFileName(filePath);
copyCommands.Append($"copy_and_check {filePath} $EXECUTION_DIR/{fileName}\n");
}
// Generally anything without the relative path is just the test DLL and its directly referenced dependencies.
// Every test project comes with 4 of them, so not producing a warning here.
else if (!string.IsNullOrEmpty(relativePath))
{
string normalizedDependency = relativePath.Replace('\\', '/');
if (normalizedDependency.StartsWith("/"))
{
normalizedDependency = normalizedDependency.Substring(1);
}
string fileName = Path.GetFileName(normalizedDependency);
copyCommands.Append($"copy_and_check $PACKAGE_DIR/{normalizedDependency} $EXECUTION_DIR/{fileName}\n");
}
}
shExecutionTemplate = shExecutionTemplate.Replace("[[CopyFilesCommands]]", copyCommands.ToString());
StringBuilder testRunEchoes = new StringBuilder();
StringBuilder testRunCommands = new StringBuilder();
foreach (string runCommand in TestCommands)
{
testRunCommands.Append($"{runCommand}\n");
testRunEchoes.Append($"echo {runCommand}\n");
}
shExecutionTemplate = shExecutionTemplate.Replace("[[TestRunCommands]]", testRunCommands.ToString());
shExecutionTemplate = shExecutionTemplate.Replace("[[TestRunCommandsEcho]]", testRunEchoes.ToString());
// Just in case any Windows EOLs have made it in by here, clean any up.
shExecutionTemplate = shExecutionTemplate.Replace("\r\n", "\n");
using (StreamWriter sw = new StreamWriter(new FileStream(outputPath, FileMode.Create)))
{
sw.NewLine = "\n";
sw.Write(shExecutionTemplate);
sw.WriteLine();
}
Log.LogMessage($"Wrote .sh test execution script to {outputPath}");
}
private void WriteCmdExecutionScript(string cmdExecutionTemplate, string outputPath)
{
// Build up the copy commands...
StringBuilder copyCommands = new StringBuilder();
foreach (ITaskItem dependency in TestDependencies)
{
string relativePath = dependency.GetMetadata("PackageRelativePath");
bool? useAbsolutePath = dependency.GetMetadata("UseAbsolutePath")?.Equals("true", StringComparison.OrdinalIgnoreCase);
if (useAbsolutePath == true)
{
string fullPath = dependency.GetMetadata("SourcePath");
fullPath = fullPath.Replace('/', '\\');
copyCommands.AppendLine($"call :copyandcheck \"{fullPath}\" \"%EXECUTION_DIR%/{Path.GetFileName(fullPath)}\" || exit /b -1");
}
// Generally anything without the relative path is just the test DLL and its directly referenced dependencies.
// Every test project comes with 4 of them, so not producing a warning here.
else if (!string.IsNullOrEmpty(relativePath))
{
copyCommands.AppendLine($"call :copyandcheck \"%PACKAGE_DIR%\\{relativePath}\" \"%EXECUTION_DIR%\\{Path.GetFileName(relativePath)}\" || exit /b -1");
}
}
cmdExecutionTemplate = cmdExecutionTemplate.Replace("[[CopyFilesCommands]]", copyCommands.ToString());
// Same thing with execution commands
StringBuilder testRunEchoes = new StringBuilder();
StringBuilder testRunCommands = new StringBuilder();
foreach (string runCommand in TestCommands)
{
testRunCommands.AppendLine($"call {runCommand}");
testRunEchoes.AppendLine($"echo {runCommand}");
}
cmdExecutionTemplate = cmdExecutionTemplate.Replace("[[TestRunCommands]]", testRunCommands.ToString());
cmdExecutionTemplate = cmdExecutionTemplate.Replace("[[TestRunCommandsEcho]]", testRunEchoes.ToString());
using (StreamWriter sw = new StreamWriter(new FileStream(outputPath, FileMode.Create)))
{
sw.Write(cmdExecutionTemplate);
sw.WriteLine();
}
Log.LogMessage($"Wrote Windows-compatible test execution script to {outputPath}");
}
}
} | 1 | 9,805 | I'm pretty sure there's already a metadata item that contains what you're calculating here (PackageRelativePath)? | dotnet-buildtools | .cs |
@@ -79,6 +79,15 @@ module ProjectsHelper
end
end
+ def browse_security_button(project)
+ css_class = project.uuid.blank? ? 'disabled' : 'btn-primary'
+ project_name = CGI.escape(project.name)
+ url = ENV['OH_SECURITY_URL'] + "/#{project_name}/#{project.uuid}?project_id=#{project.id}"
+ haml_tag :a, href: url, class: "btn #{css_class}", target: '_blank' do
+ concat t('projects.browse_security')
+ end
+ end
+
private
def project_twitter_description_analysis(project, analysis) | 1 | module ProjectsHelper
def project_activity_level_class(project, image_size)
haml_tag :a, href: 'http://blog.openhub.net/about-project-activity-icons/', target: '_blank',
class: project_activity_css_class(project, image_size),
title: project_activity_text(project, true)
end
def project_activity_level_text(project, image_size)
haml_tag :div, project_activity_text(project, true), class: project_activity_level_text_class(image_size)
end
def project_iusethis_button(project)
haml_tag :a, href: '#', data: { project_id: project.to_param },
class: "#{needs_login_or_verification_or_default('new-stack-entry')} btn btn-primary btn-mini" do
concat t('projects.i_use_this')
end
end
def project_description(project)
text1 = description(project.description.truncate(340), t('projects.more'), style: 'display: inline',
id: "proj_desc_#{project.id}_sm",
link_id: "proj_more_desc_#{project.id}",
css_class: 'proj_desc_toggle')
text2 = description(project.description, t('projects.less'), style: 'display: none',
id: "proj_desc_#{project.id}_lg",
link_id: "proj_less_desc_#{project.id}",
css_class: 'proj_desc_toggle')
"#{text1.html_safe}#{text2.html_safe}".html_safe
end
def project_compare_button(project, label = project.name)
selected = (@session_projects || []).include?(project)
haml_tag :form, class: "sp_form styled form-inline #{'selected' if selected}",
style: 'min-width: 94px;', id: "sp_form_#{project.to_param}" do
haml_tag :span, class: 'sp_label', title: label do
concat label.truncate(35)
end
haml_tag :input, style: 'margin-top: 2px;', type: 'checkbox', id: "sp_chk_#{project.to_param}",
checked: selected, project_id: project.to_param, class: 'sp_input'
haml_tag :div, class: 'clear_both'
end
end
def project_twitter_description(project, analysis)
return project_twitter_description_analysis(project, analysis) unless analysis.blank?
project.description.to_s.length > 0 ? project.description : ''
end
def truncate_project_name(name, link = false, len = 25)
if name.length > len && link == false
content_tag(:abbr, name.truncate(len), title: name)
elsif name.length > len && link == true
name.truncate(len)
else
name
end
end
def project_managers_list
@project.active_managers.map { |m| link_to(html_escape(m.name), account_path(m)) }.to_sentence
end
def stack_name(account)
stacks ||= account.stacks.joins(:projects).where(projects: { id: @project })
stacks.map do |stack|
name = stack.decorate.name(account, @project)
link_to "#{name}#{' Stack' unless name =~ /stack/i}", stack_path(stack)
end.join(', ')
end
def project_activity_text(project, append_activity)
activity_level = project_activity_level(project)
case activity_level
when :na then "#{I18n.t('projects.activity') if append_activity} #{I18n.t('projects.not_available')}"
when :new then I18n.t('projects.new_project')
when :inactive then I18n.t('projects.inactive')
else
"#{I18n.t("projects.#{activity_level}")} #{I18n.t('projects.activity') if append_activity}"
end
end
private
def project_twitter_description_analysis(project, analysis)
content = ''
content += project.description.to_s.truncate(80).concat(', ')
content += "#{number_with_delimiter analysis.code_total} lines of code"
content += " from #{number_with_delimiter analysis.committers_all_time} contributors"
content + ", #{project_activity_text(project, true)}, #{project.user_count} users"
end
def project_activity_css_class(project, size)
"#{size}_project_activity_level_#{project_activity_level(project)}"
end
def project_activity_level_text_class(image_size)
"#{image_size}_project_activity_text"
end
def project_activity_level(project)
project.best_analysis.activity_level
end
end
| 1 | 8,429 | We want to show this button only when there is a page for us to connect to. There is no reason to put the Browse Security Info button on the page at all unless we've identified the UUID from the KB. | blackducksoftware-ohloh-ui | rb |
@@ -42,10 +42,14 @@
#include "res/windowsicon.xpm"
#include "res/macosicon.xpm"
#include "res/linuxicon.xpm"
+#include "res/androidicon.xpm"
#include "res/freebsdicon.xpm"
+#include "res/linuxarmicon2.xpm"
#include "res/atiicon.xpm"
+#include "res/amdicon2.xpm"
#include "res/nvidiaicon.xpm"
-#include "res/androidicon.xpm"
+#include "res/nvidiaicon2.xpm"
+#include "res/intelgpuicon2.xpm"
#include "res/virtualboxicon.xpm"
#include "res/blankicon.xpm"
| 1 | // This file is part of BOINC.
// http://boinc.berkeley.edu
// Copyright (C) 2008 University of California
//
// BOINC is free software; you can redistribute it and/or modify it
// under the terms of the GNU Lesser General Public License
// as published by the Free Software Foundation,
// either version 3 of the License, or (at your option) any later version.
//
// BOINC is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
// See the GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with BOINC. If not, see <http://www.gnu.org/licenses/>.
//
#if defined(__GNUG__) && !defined(__APPLE__)
#pragma implementation "ProjectInfoPage.h"
#endif
#include "stdwx.h"
#include "diagnostics.h"
#include "util.h"
#include "mfile.h"
#include "miofile.h"
#include "parse.h"
#include "url.h"
#include "error_numbers.h"
#include "wizardex.h"
#include "error_numbers.h"
#include "BOINCGUIApp.h"
#include "SkinManager.h"
#include "MainDocument.h"
#include "ValidateURL.h"
#include "BOINCBaseWizard.h"
#include "WizardAttach.h"
#include "ProjectInfoPage.h"
#include "res/windowsicon.xpm"
#include "res/macosicon.xpm"
#include "res/linuxicon.xpm"
#include "res/freebsdicon.xpm"
#include "res/atiicon.xpm"
#include "res/nvidiaicon.xpm"
#include "res/androidicon.xpm"
#include "res/virtualboxicon.xpm"
#include "res/blankicon.xpm"
/*!
* CProject type
*/
class CProjectInfo : public wxObject
{
DECLARE_DYNAMIC_CLASS( CProjectInfo )
CProjectInfo() {
m_bSupportedPlatformFound = false;
m_bProjectSupportsWindows = false;
m_bProjectSupportsMac = false;
m_bProjectSupportsLinux = false;
m_bProjectSupportsFreeBSD = false;
m_bProjectSupportsCUDA = false;
m_bProjectSupportsCAL = false;
m_bProjectSupportsAndroid = false;
m_bProjectSupportsVirtualBox = false;
}
public:
wxString m_strURL;
wxString m_strName;
wxString m_strDescription;
wxString m_strGeneralArea;
wxString m_strSpecificArea;
wxString m_strOrganization;
bool m_bSupportedPlatformFound;
bool m_bProjectSupportsWindows;
bool m_bProjectSupportsMac;
bool m_bProjectSupportsLinux;
bool m_bProjectSupportsFreeBSD;
bool m_bProjectSupportsCUDA;
bool m_bProjectSupportsCAL;
bool m_bProjectSupportsAndroid;
bool m_bProjectSupportsVirtualBox;
};
IMPLEMENT_DYNAMIC_CLASS( CProjectInfo, wxObject )
/*!
* CProjectInfoPage type definition
*/
IMPLEMENT_DYNAMIC_CLASS( CProjectInfoPage, wxWizardPageEx )
/*!
* CProjectInfoPage event table definition
*/
BEGIN_EVENT_TABLE( CProjectInfoPage, wxWizardPageEx )
////@begin CProjectInfoPage event table entries
EVT_COMBOBOX( ID_CATEGORIES, CProjectInfoPage::OnProjectCategorySelected )
EVT_LISTBOX( ID_PROJECTS, CProjectInfoPage::OnProjectSelected )
EVT_WIZARDEX_PAGE_CHANGED( wxID_ANY, CProjectInfoPage::OnPageChanged )
EVT_WIZARDEX_PAGE_CHANGING( wxID_ANY, CProjectInfoPage::OnPageChanging )
EVT_WIZARDEX_CANCEL( wxID_ANY, CProjectInfoPage::OnCancel )
////@end CProjectInfoPage event table entries
END_EVENT_TABLE()
/*!
* CProjectInfoPage constructors
*/
CProjectInfoPage::CProjectInfoPage( )
{
}
CProjectInfoPage::CProjectInfoPage( CBOINCBaseWizard* parent )
{
Create( parent );
}
/*!
* CProjectInfoPage destructor
*/
CProjectInfoPage::~CProjectInfoPage( )
{
for (std::vector<CProjectInfo*>::iterator iter = m_Projects.begin(); iter != m_Projects.end(); ++iter)
{
CProjectInfo* pEntry = (CProjectInfo*)*iter;
delete pEntry;
}
m_Projects.clear();
delete m_apl;
}
/*!
* CProjectInfoPage creator
*/
bool CProjectInfoPage::Create( CBOINCBaseWizard* parent )
{
////@begin CProjectInfoPage member initialisation
m_pTitleStaticCtrl = NULL;
m_pDescriptionStaticCtrl = NULL;
m_pProjectCategoriesStaticCtrl = NULL;
m_pProjectCategoriesCtrl = NULL;
m_pProjectsStaticCtrl = NULL;
m_pProjectsCtrl = NULL;
m_pProjectDetailsStaticCtrl = NULL;
m_pProjectDetailsDescriptionCtrl = NULL;
m_pProjectDetailsResearchAreaStaticCtrl = NULL;
m_pProjectDetailsResearchAreaCtrl = NULL;
m_pProjectDetailsOrganizationStaticCtrl = NULL;
m_pProjectDetailsOrganizationCtrl = NULL;
m_pProjectDetailsURLStaticCtrl = NULL;
m_pProjectDetailsURLCtrl = NULL;
m_pProjectDetailsSupportedPlatformsStaticCtrl = NULL;
m_pProjectDetailsSupportedPlatformWindowsCtrl = NULL;
m_pProjectDetailsSupportedPlatformMacCtrl = NULL;
m_pProjectDetailsSupportedPlatformLinuxCtrl = NULL;
m_pProjectDetailsSupportedPlatformFreeBSDCtrl = NULL;
m_pProjectDetailsSupportedPlatformATICtrl = NULL;
m_pProjectDetailsSupportedPlatformNvidiaCtrl = NULL;
m_pProjectDetailsSupportedPlatformAndroidCtrl = NULL;
m_pProjectDetailsSupportedPlatformVirtualBoxCtrl = NULL;
m_pProjectDetailsSupportedPlatformBlankCtrl = NULL;
m_pProjectURLStaticCtrl = NULL;
m_pProjectURLCtrl = NULL;
////@end CProjectInfoPage member initialisation
m_Projects.clear();
m_bProjectSupported = false;
m_bProjectListPopulated = false;
////@begin CProjectInfoPage creation
wxWizardPageEx::Create( parent, ID_PROJECTINFOPAGE );
CreateControls();
GetSizer()->Fit(this);
////@end CProjectInfoPage creation
return TRUE;
}
/*!
* Control creation for WizardPage
*/
void CProjectInfoPage::CreateControls()
{
////@begin CProjectInfoPage content construction
#ifdef __WXMAC__
#define DESCRIPTIONSWIDTH 350
#else
#define DESCRIPTIONSWIDTH ADJUSTFORXDPI(310)
#endif
wxArrayString aCategories;
CMainDocument* pDoc = wxGetApp().GetDocument();
wxASSERT(pDoc);
wxASSERT(wxDynamicCast(pDoc, CMainDocument));
CProjectInfoPage* itemWizardPage23 = this;
wxBoxSizer* itemBoxSizer24 = new wxBoxSizer(wxVERTICAL);
itemWizardPage23->SetSizer(itemBoxSizer24);
m_pTitleStaticCtrl = new wxStaticText;
m_pTitleStaticCtrl->Create( itemWizardPage23, wxID_STATIC, _("Choose a project"), wxDefaultPosition, wxDefaultSize, 0 );
m_pTitleStaticCtrl->SetFont(wxFont(10, wxSWISS, wxNORMAL, wxBOLD, FALSE, _T("Verdana")));
itemBoxSizer24->Add(m_pTitleStaticCtrl, 0, wxALIGN_LEFT|wxALL, 5);
m_pDescriptionStaticCtrl = new wxStaticText;
m_pDescriptionStaticCtrl->Create( itemWizardPage23, wxID_STATIC, _("To choose a project, click its name or type its URL below."), wxDefaultPosition, wxDefaultSize, 0 );
itemBoxSizer24->Add(m_pDescriptionStaticCtrl, 0, wxALIGN_LEFT|wxALL, 5);
itemBoxSizer24->Add(5, 5, 0, wxALIGN_LEFT|wxALL, 5);
wxFlexGridSizer* itemFlexGridSizer4 = new wxFlexGridSizer(1, 0, 0);
itemFlexGridSizer4->AddGrowableRow(0);
itemFlexGridSizer4->AddGrowableCol(0);
itemBoxSizer24->Add(itemFlexGridSizer4, 0, wxGROW|wxALL, 0);
wxFlexGridSizer* itemFlexGridSizer6 = new wxFlexGridSizer(2, 0, 0);
itemFlexGridSizer4->Add(itemFlexGridSizer6, 0, wxGROW|wxALL, 0);
wxBoxSizer* itemBoxSizer7 = new wxBoxSizer(wxVERTICAL);
itemFlexGridSizer6->Add(itemBoxSizer7, 0, wxALIGN_LEFT|wxALIGN_TOP, 0);
m_pProjectCategoriesStaticCtrl = new wxStaticText( itemWizardPage23, wxID_STATIC, _("Categories:"), wxDefaultPosition, wxDefaultSize, 0 );
itemBoxSizer7->Add(m_pProjectCategoriesStaticCtrl, 0, wxALIGN_LEFT|wxRIGHT|wxBOTTOM, 5);
// We must populate the combo box before our sizers can calculate its width.
// The combo box will be repopulated in CProjectInfoPage::OnPageChanged(),
// so we don't need to worry about duplicate entries here.
// Get the project list
m_apl = new ALL_PROJECTS_LIST;
pDoc->rpc.get_all_projects_list(*m_apl);
for (unsigned int i=0; i<m_apl->projects.size(); i++) {
wxString strGeneralArea = wxGetTranslation(wxString(m_apl->projects[i]->general_area.c_str(), wxConvUTF8));
aCategories.Add(strGeneralArea);
}
m_pProjectCategoriesCtrl = new wxComboBox( itemWizardPage23, ID_CATEGORIES, wxT(""), wxDefaultPosition, wxDefaultSize, aCategories, wxCB_READONLY
#ifndef __WXMAC__ // wxCB_SORT is not available in wxCocoa 3.0
|wxCB_SORT
#endif
);
itemBoxSizer7->Add(m_pProjectCategoriesCtrl, 0, wxGROW|wxLEFT|wxRIGHT, 5);
m_pProjectsStaticCtrl = new wxStaticText( itemWizardPage23, wxID_STATIC, _("Projects:"), wxDefaultPosition, wxDefaultSize, 0 );
itemBoxSizer7->Add(m_pProjectsStaticCtrl, 0, wxALIGN_LEFT|wxTOP|wxRIGHT|wxBOTTOM, 5);
wxFlexGridSizer* itemFlexGridSizer11 = new wxFlexGridSizer(1, 0, 0);
itemFlexGridSizer11->AddGrowableRow(0);
itemFlexGridSizer11->AddGrowableCol(0);
itemBoxSizer7->Add(itemFlexGridSizer11, 0, wxGROW|wxALL, 0);
wxArrayString m_pProjectsCtrlStrings;
m_pProjectsCtrl = new wxListBox( itemWizardPage23, ID_PROJECTS, wxDefaultPosition, wxSize(-1, ADJUSTFORYDPI(175)), m_pProjectsCtrlStrings, wxLB_SINGLE|wxLB_SORT );
itemFlexGridSizer11->Add(m_pProjectsCtrl, 0, wxGROW|wxALIGN_CENTER_VERTICAL|wxLEFT|wxRIGHT, 0);
m_pProjectDetailsStaticCtrl = new wxStaticBox(itemWizardPage23, wxID_ANY, _("Project details"));
wxStaticBoxSizer* itemStaticBoxSizer13 = new wxStaticBoxSizer(m_pProjectDetailsStaticCtrl, wxVERTICAL);
itemFlexGridSizer6->Add(itemStaticBoxSizer13, 0, wxGROW|wxALIGN_CENTER_VERTICAL|wxALL, 5);
m_pProjectDetailsDescriptionCtrl = new wxTextCtrl( itemWizardPage23, ID_PROJECTDESCRIPTION, wxT(""), wxDefaultPosition, wxSize(DESCRIPTIONSWIDTH, ADJUSTFORYDPI(100)), wxTE_MULTILINE|wxTE_READONLY );
itemStaticBoxSizer13->Add(m_pProjectDetailsDescriptionCtrl, 0, wxGROW|wxLEFT|wxTOP|wxBOTTOM, 5);
wxFlexGridSizer* itemFlexGridSizer16 = new wxFlexGridSizer(2, 0, 0);
itemFlexGridSizer16->AddGrowableCol(1);
itemStaticBoxSizer13->Add(itemFlexGridSizer16, 0, wxGROW|wxALL, 0);
m_pProjectDetailsResearchAreaStaticCtrl = new wxStaticText( itemWizardPage23, wxID_STATIC, _("Research area:"), wxDefaultPosition, wxDefaultSize, 0 );
itemFlexGridSizer16->Add(m_pProjectDetailsResearchAreaStaticCtrl, 0, wxALIGN_CENTER_HORIZONTAL|wxALIGN_CENTER_VERTICAL|wxRIGHT|wxBOTTOM, 2);
m_pProjectDetailsResearchAreaCtrl = new wxStaticText( itemWizardPage23, wxID_STATIC, wxEmptyString, wxDefaultPosition, wxDefaultSize, 0 );
itemFlexGridSizer16->Add(m_pProjectDetailsResearchAreaCtrl, 0, wxALIGN_CENTER_VERTICAL|wxLEFT|wxRIGHT, 5);
wxFlexGridSizer* itemFlexGridSizer19 = new wxFlexGridSizer(2, 0, 0);
itemFlexGridSizer19->AddGrowableCol(1);
itemStaticBoxSizer13->Add(itemFlexGridSizer19, 0, wxGROW|wxALL, 0);
m_pProjectDetailsOrganizationStaticCtrl = new wxStaticText( itemWizardPage23, wxID_STATIC, _("Organization:"), wxDefaultPosition, wxDefaultSize, 0 );
itemFlexGridSizer19->Add(m_pProjectDetailsOrganizationStaticCtrl, 0, wxALIGN_CENTER_HORIZONTAL|wxALIGN_CENTER_VERTICAL|wxRIGHT|wxBOTTOM, 2);
m_pProjectDetailsOrganizationCtrl = new wxStaticText( itemWizardPage23, wxID_STATIC, wxEmptyString, wxDefaultPosition, wxDefaultSize, 0 );
itemFlexGridSizer19->Add(m_pProjectDetailsOrganizationCtrl, 0, wxALIGN_CENTER_VERTICAL|wxLEFT|wxRIGHT, 5);
wxFlexGridSizer* itemFlexGridSizer20 = new wxFlexGridSizer(2, 0, 0);
itemFlexGridSizer20->AddGrowableCol(1);
itemStaticBoxSizer13->Add(itemFlexGridSizer20, 0, wxGROW|wxALL, 0);
m_pProjectDetailsURLStaticCtrl = new wxStaticText( itemWizardPage23, wxID_STATIC, _("Web site:"), wxDefaultPosition, wxDefaultSize, 0 );
itemFlexGridSizer20->Add(m_pProjectDetailsURLStaticCtrl, 0, wxALIGN_LEFT|wxRIGHT|wxBOTTOM, 2);
m_pProjectDetailsURLCtrl = new wxHyperlinkCtrl( itemWizardPage23, wxID_STATIC, wxT("BOINC"), wxT("https://boinc.berkeley.edu/"), wxDefaultPosition, wxDefaultSize, wxNO_BORDER|wxHL_CONTEXTMENU|wxHL_ALIGN_LEFT);
itemFlexGridSizer20->Add(m_pProjectDetailsURLCtrl, 0, wxALIGN_LEFT|wxLEFT|wxRIGHT|wxBOTTOM, 5);
wxFlexGridSizer* itemFlexGridSizer24 = new wxFlexGridSizer(2, 1, 0, 0);
itemFlexGridSizer24->AddGrowableRow(1);
itemFlexGridSizer24->AddGrowableCol(0);
itemStaticBoxSizer13->Add(itemFlexGridSizer24, 0, wxGROW|wxALL, 0);
m_pProjectDetailsSupportedPlatformsStaticCtrl = new wxStaticText( itemWizardPage23, wxID_STATIC, _("Supported systems:"), wxDefaultPosition, wxDefaultSize, 0 );
itemFlexGridSizer24->Add(m_pProjectDetailsSupportedPlatformsStaticCtrl, 0, wxALIGN_LEFT|wxALIGN_CENTER_VERTICAL|wxRIGHT|wxBOTTOM, 5);
wxBoxSizer* itemBoxSizer26 = new wxBoxSizer(wxHORIZONTAL);
itemFlexGridSizer24->Add(itemBoxSizer26, 0, wxGROW|wxALIGN_CENTER_VERTICAL|wxLEFT|wxRIGHT, 0);
m_pProjectDetailsSupportedPlatformWindowsCtrl = new wxStaticBitmap( itemWizardPage23, wxID_STATIC, GetBitmapResource(wxT("windowsicon.xpm")), wxDefaultPosition, wxSize(16,16), 0 );
itemBoxSizer26->Add(m_pProjectDetailsSupportedPlatformWindowsCtrl, 0, wxALIGN_CENTER_VERTICAL|wxLEFT|wxRIGHT|wxTOP, 5);
m_pProjectDetailsSupportedPlatformMacCtrl = new wxStaticBitmap( itemWizardPage23, wxID_STATIC, GetBitmapResource(wxT("macosicon.xpm")), wxDefaultPosition, wxSize(16,16), 0 );
itemBoxSizer26->Add(m_pProjectDetailsSupportedPlatformMacCtrl, 0, wxALIGN_CENTER_VERTICAL|wxLEFT|wxRIGHT|wxTOP, 5);
m_pProjectDetailsSupportedPlatformLinuxCtrl = new wxStaticBitmap( itemWizardPage23, wxID_STATIC, GetBitmapResource(wxT("linuxicon.xpm")), wxDefaultPosition, wxSize(16,16), 0 );
itemBoxSizer26->Add(m_pProjectDetailsSupportedPlatformLinuxCtrl, 0, wxALIGN_CENTER_VERTICAL|wxLEFT|wxRIGHT|wxTOP, 5);
m_pProjectDetailsSupportedPlatformFreeBSDCtrl = new wxStaticBitmap( itemWizardPage23, wxID_STATIC, GetBitmapResource(wxT("freebsdicon.xpm")), wxDefaultPosition, wxSize(16,16), 0 );
itemBoxSizer26->Add(m_pProjectDetailsSupportedPlatformFreeBSDCtrl, 0, wxALIGN_CENTER_VERTICAL|wxLEFT|wxRIGHT|wxTOP, 5);
m_pProjectDetailsSupportedPlatformATICtrl = new wxStaticBitmap( itemWizardPage23, wxID_STATIC, GetBitmapResource(wxT("atiicon.xpm")), wxDefaultPosition, wxSize(16,16), 0 );
itemBoxSizer26->Add(m_pProjectDetailsSupportedPlatformATICtrl, 0, wxALIGN_CENTER_VERTICAL|wxLEFT|wxRIGHT|wxTOP, 5);
m_pProjectDetailsSupportedPlatformNvidiaCtrl = new wxStaticBitmap( itemWizardPage23, wxID_STATIC, GetBitmapResource(wxT("nvidiaicon.xpm")), wxDefaultPosition, wxSize(16,16), 0 );
itemBoxSizer26->Add(m_pProjectDetailsSupportedPlatformNvidiaCtrl, 0, wxALIGN_CENTER_VERTICAL|wxLEFT|wxRIGHT|wxTOP, 5);
m_pProjectDetailsSupportedPlatformAndroidCtrl = new wxStaticBitmap( itemWizardPage23, wxID_STATIC, GetBitmapResource(wxT("androidicon.xpm")), wxDefaultPosition, wxSize(16,16), 0 );
itemBoxSizer26->Add(m_pProjectDetailsSupportedPlatformAndroidCtrl, 0, wxALIGN_CENTER_VERTICAL|wxLEFT|wxRIGHT|wxTOP, 5);
m_pProjectDetailsSupportedPlatformVirtualBoxCtrl = new wxStaticBitmap( itemWizardPage23, wxID_STATIC, GetBitmapResource(wxT("virtualboxicon.xpm")), wxDefaultPosition, wxSize(16,16), 0 );
itemBoxSizer26->Add(m_pProjectDetailsSupportedPlatformVirtualBoxCtrl, 0, wxALIGN_CENTER_VERTICAL|wxLEFT|wxRIGHT|wxTOP, 5);
m_pProjectDetailsSupportedPlatformBlankCtrl = new wxStaticBitmap( itemWizardPage23, wxID_STATIC, GetBitmapResource(wxT("blankicon.xpm")), wxDefaultPosition, wxSize(16,16), 0 );
itemBoxSizer26->Add(m_pProjectDetailsSupportedPlatformBlankCtrl, 0, wxALIGN_CENTER_VERTICAL|wxLEFT|wxRIGHT|wxTOP, 5);
wxFlexGridSizer* itemFlexGridSizer33 = new wxFlexGridSizer(2, 0, 0);
itemFlexGridSizer33->AddGrowableCol(1);
itemFlexGridSizer4->Add(itemFlexGridSizer33, 0, wxGROW|wxALIGN_CENTER_VERTICAL|wxALL, 0);
m_pProjectURLStaticCtrl = new wxStaticText( itemWizardPage23, ID_PROJECTURLSTATICCTRL, _("Project URL:"), wxDefaultPosition, wxDefaultSize, 0 );
itemFlexGridSizer33->Add(m_pProjectURLStaticCtrl, 0, wxALIGN_LEFT|wxALIGN_CENTER_VERTICAL|wxALL, 5);
m_pProjectURLCtrl = new wxTextCtrl( itemWizardPage23, ID_PROJECTURLCTRL, wxEmptyString, wxDefaultPosition, wxDefaultSize, 0 );
itemFlexGridSizer33->Add(m_pProjectURLCtrl, 0, wxGROW|wxALIGN_CENTER_VERTICAL|wxALL, 5);
itemFlexGridSizer33->Add(0, 10, 0);
// Set validators
m_pProjectURLCtrl->SetValidator(CValidateURL(&m_strProjectURL));
////@end CProjectInfoPage content construction
}
/*!
* Gets the previous page.
*/
wxWizardPageEx* CProjectInfoPage::GetPrev() const
{
return PAGE_TRANSITION_BACK;
}
/*!
* Gets the next page.
*/
wxWizardPageEx* CProjectInfoPage::GetNext() const
{
if (CHECK_CLOSINGINPROGRESS()) {
// Cancel Event Detected
return PAGE_TRANSITION_NEXT(ID_COMPLETIONERRORPAGE);
} else {
return PAGE_TRANSITION_NEXT(ID_PROJECTPROPERTIESPAGE);
}
return NULL;
}
/*!
* Should we show tooltips?
*/
bool CProjectInfoPage::ShowToolTips()
{
return TRUE;
}
/*!
* Get bitmap resources
*/
wxBitmap CProjectInfoPage::GetBitmapResource( const wxString& name )
{
// Bitmap retrieval
////@begin CProjectInfoPage bitmap retrieval
wxUnusedVar(name);
if (name == wxT("windowsicon.xpm"))
{
wxBitmap bitmap(Win32_xpm);
return bitmap;
}
else if (name == wxT("macosicon.xpm"))
{
wxBitmap bitmap(MacOS_xpm);
return bitmap;
}
else if (name == wxT("linuxicon.xpm"))
{
wxBitmap bitmap(Linux_xpm);
return bitmap;
}
else if (name == wxT("freebsdicon.xpm"))
{
wxBitmap bitmap(FreeBSD_xpm);
return bitmap;
}
else if (name == wxT("atiicon.xpm"))
{
wxBitmap bitmap(atiicon_xpm);
return bitmap;
}
else if (name == wxT("nvidiaicon.xpm"))
{
wxBitmap bitmap(nvidiaicon_xpm);
return bitmap;
}
else if (name == wxT("androidicon.xpm"))
{
wxBitmap bitmap(androidicon_xpm);
return bitmap;
}
else if (name == wxT("virtualboxicon.xpm"))
{
wxBitmap bitmap(virtualboxicon_xpm);
return bitmap;
}
else if (name == wxT("blankicon.xpm"))
{
wxBitmap bitmap(blankicon_xpm);
return bitmap;
}
return wxNullBitmap;
////@end CProjectInfoPage bitmap retrieval
}
/*!
* Get icon resources
*/
wxIcon CProjectInfoPage::GetIconResource( const wxString& WXUNUSED(name) )
{
// Icon retrieval
////@begin CProjectInfoPage icon retrieval
return wxNullIcon;
////@end CProjectInfoPage icon retrieval
}
/*
* wxEVT_COMMAND_COMBOBOX_SELECTED event handler for ID_CATEGORIES
*/
void CProjectInfoPage::OnProjectCategorySelected( wxCommandEvent& WXUNUSED(event) ) {
wxLogTrace(wxT("Function Start/End"), wxT("CProjectInfoPage::OnProjectCategorySelected - Function Begin"));
m_pProjectsCtrl->Clear();
// Populate the list box with the list of project names that belong to eith the specific
// category or all of them.
for (unsigned int i=0; i<m_Projects.size(); i++) {
if ((m_pProjectCategoriesCtrl->GetValue() == _("All")) ||
(m_pProjectCategoriesCtrl->GetValue() == m_Projects[i]->m_strGeneralArea)
) {
m_pProjectsCtrl->Append(m_Projects[i]->m_strName, m_Projects[i]);
}
}
// Set the first item to be the selected item and then pop the next event.
if (m_pProjectsCtrl->GetCount() > 0) {
m_pProjectsCtrl->SetSelection(0);
wxCommandEvent evtEvent(wxEVT_COMMAND_LISTBOX_SELECTED, ID_PROJECTS);
ProcessEvent(evtEvent);
}
wxLogTrace(wxT("Function Start/End"), wxT("CProjectInfoPage::OnProjectCategorySelected - Function End"));
}
/*
* wxEVT_COMMAND_LISTBOX_SELECTED event handler for ID_PROJECTS
*/
void CProjectInfoPage::OnProjectSelected( wxCommandEvent& WXUNUSED(event) ) {
wxLogTrace(wxT("Function Start/End"), wxT("CProjectInfoPage::OnProjectSelected - Function Begin"));
if (m_pProjectsCtrl->GetSelection() != wxNOT_FOUND) {
CProjectInfo* pProjectInfo = (CProjectInfo*)m_pProjectsCtrl->GetClientData(m_pProjectsCtrl->GetSelection());
wxString strURL = pProjectInfo->m_strURL;
EllipseStringIfNeeded(strURL, m_pProjectDetailsURLCtrl);
// Populate the project details area
wxString desc = pProjectInfo->m_strDescription;
// Change all occurrences of "<sup>n</sup>" to "^n"
desc.Replace(wxT("<sup>"), wxT("^"), true);
desc.Replace(wxT("</sup>"), wxT(""), true);
desc.Replace(wxT("<"), wxT("<"), true);
m_pProjectDetailsURLCtrl->SetLabel(strURL);
m_pProjectDetailsURLCtrl->SetURL(pProjectInfo->m_strURL);
m_pProjectDetailsURLCtrl->SetToolTip(pProjectInfo->m_strURL);
m_pProjectDetailsDescriptionCtrl->SetValue(desc);
m_pProjectDetailsSupportedPlatformWindowsCtrl->Hide();
m_pProjectDetailsSupportedPlatformMacCtrl->Hide();
m_pProjectDetailsSupportedPlatformLinuxCtrl->Hide();
m_pProjectDetailsSupportedPlatformFreeBSDCtrl->Hide();
m_pProjectDetailsSupportedPlatformATICtrl->Hide();
m_pProjectDetailsSupportedPlatformNvidiaCtrl->Hide();
m_pProjectDetailsSupportedPlatformAndroidCtrl->Hide();
m_pProjectDetailsSupportedPlatformVirtualBoxCtrl->Hide();
if (pProjectInfo->m_bProjectSupportsWindows) m_pProjectDetailsSupportedPlatformWindowsCtrl->Show();
if (pProjectInfo->m_bProjectSupportsMac) m_pProjectDetailsSupportedPlatformMacCtrl->Show();
if (pProjectInfo->m_bProjectSupportsLinux) m_pProjectDetailsSupportedPlatformLinuxCtrl->Show();
if (pProjectInfo->m_bProjectSupportsFreeBSD) m_pProjectDetailsSupportedPlatformFreeBSDCtrl->Show();
if (pProjectInfo->m_bProjectSupportsCAL) m_pProjectDetailsSupportedPlatformATICtrl->Show();
if (pProjectInfo->m_bProjectSupportsCUDA) m_pProjectDetailsSupportedPlatformNvidiaCtrl->Show();
if (pProjectInfo->m_bProjectSupportsAndroid) m_pProjectDetailsSupportedPlatformAndroidCtrl->Show();
if (pProjectInfo->m_bProjectSupportsVirtualBox) m_pProjectDetailsSupportedPlatformVirtualBoxCtrl->Show();
// Populate non-control data for use in other places of the wizard
m_strProjectURL = pProjectInfo->m_strURL;
m_bProjectSupported = pProjectInfo->m_bSupportedPlatformFound;
Layout();
TransferDataToWindow();
wxString strResearchArea = pProjectInfo->m_strSpecificArea;
EllipseStringIfNeeded(strResearchArea, m_pProjectDetailsResearchAreaCtrl);
wxString strOrganization = pProjectInfo->m_strOrganization;
EllipseStringIfNeeded(strOrganization, m_pProjectDetailsOrganizationCtrl);
m_pProjectDetailsResearchAreaCtrl->SetLabel(strResearchArea);
m_pProjectDetailsResearchAreaCtrl->SetToolTip(pProjectInfo->m_strSpecificArea);
m_pProjectDetailsOrganizationCtrl->SetLabel(strOrganization);
m_pProjectDetailsOrganizationCtrl->SetToolTip(pProjectInfo->m_strOrganization);
}
wxLogTrace(wxT("Function Start/End"), wxT("CProjectInfoPage::OnProjectSelected - Function End"));
}
/*!
* wxEVT_WIZARD_PAGE_CHANGED event handler for ID_PROJECTINFOPAGE
*/
void CProjectInfoPage::OnPageChanged( wxWizardExEvent& event ) {
if (event.GetDirection() == false) return;
wxLogTrace(wxT("Function Start/End"), wxT("CProjectInfoPage::OnPageChanged - Function Begin"));
CMainDocument* pDoc = wxGetApp().GetDocument();
unsigned int i = 0, j = 0, k = 0;
wxArrayString aClientPlatforms;
wxArrayString aProjectPlatforms;
wxArrayString aCategories;
bool bCategoryFound = false;
CProjectInfo* pProjectInfo = NULL;
// Populate the ProjectInfo data structure with the list of projects we want to show and
// any other activity we need to prep the page.
if (!m_bProjectListPopulated) {
// Convert the supported client platforms into something useful
for (i=0; i<pDoc->state.platforms.size(); i++) {
aClientPlatforms.Add(wxString(pDoc->state.platforms[i].c_str(), wxConvUTF8));
}
// Iterate through the project list and add them to the ProjectInfo data structure
for (i=0; i<m_apl->projects.size(); i++) {
pProjectInfo = new CProjectInfo();
m_Projects.push_back(pProjectInfo);
// Convert the easy stuff
pProjectInfo->m_strURL = wxGetTranslation(wxString(m_apl->projects[i]->url.c_str(), wxConvUTF8));
pProjectInfo->m_strName = wxGetTranslation(wxString(m_apl->projects[i]->name.c_str(), wxConvUTF8));
pProjectInfo->m_strDescription = wxGetTranslation(wxString(m_apl->projects[i]->description.c_str(), wxConvUTF8));
pProjectInfo->m_strGeneralArea = wxGetTranslation(wxString(m_apl->projects[i]->general_area.c_str(), wxConvUTF8));
pProjectInfo->m_strSpecificArea = wxGetTranslation(wxString(m_apl->projects[i]->specific_area.c_str(), wxConvUTF8));
pProjectInfo->m_strOrganization = wxGetTranslation(wxString(m_apl->projects[i]->home.c_str(), wxConvUTF8));
// Add the category if it isn't already in the category list
bCategoryFound = false;
for (j=0; j<aCategories.size(); j++) {
if (aCategories[j] == pProjectInfo->m_strGeneralArea) {
bCategoryFound = true;
}
}
if (!bCategoryFound) {
aCategories.Add(pProjectInfo->m_strGeneralArea);
}
// Convert the supported project platforms into something useful
aProjectPlatforms.Clear();
for (j=0; j<m_apl->projects[i]->platforms.size(); j++) {
aProjectPlatforms.Add(wxString(m_apl->projects[i]->platforms[j].c_str(), wxConvUTF8));
}
// Can the core client support a platform that this project supports?
//
for (j = 0;j < aClientPlatforms.size(); j++) {
wxString strClientPlatform = aClientPlatforms[j];
for (k = 0;k < aProjectPlatforms.size(); k++) {
wxString strProjectPlatform = aProjectPlatforms[k];
wxString strRootProjectPlatform = strProjectPlatform.SubString(0, strProjectPlatform.Find(_T("[")) - 1);
if (strProjectPlatform.Find(_T("windows")) != wxNOT_FOUND) {
pProjectInfo->m_bProjectSupportsWindows = true;
}
if (strProjectPlatform.Find(_T("apple")) != wxNOT_FOUND) {
pProjectInfo->m_bProjectSupportsMac = true;
}
if (strProjectPlatform.Find(_T("linux")) != wxNOT_FOUND) {
pProjectInfo->m_bProjectSupportsLinux = true;
}
if (strProjectPlatform.Find(_T("freebsd")) != wxNOT_FOUND) {
pProjectInfo->m_bProjectSupportsFreeBSD = true;
}
if (strProjectPlatform.Find(_T("android")) != wxNOT_FOUND) {
pProjectInfo->m_bProjectSupportsAndroid = true;
}
if (strProjectPlatform.Find(_T("[cuda")) != wxNOT_FOUND) {
pProjectInfo->m_bProjectSupportsCUDA = true;
if (!pDoc->state.host_info.coprocs.have_nvidia()) continue;
}
if (strProjectPlatform.Find(_T("[ati")) != wxNOT_FOUND) {
pProjectInfo->m_bProjectSupportsCAL = true;
if (!pDoc->state.host_info.coprocs.have_ati()) continue;
}
if (strProjectPlatform.Find(_T("[vbox")) != wxNOT_FOUND) {
pProjectInfo->m_bProjectSupportsVirtualBox = true;
}
if (strClientPlatform == strRootProjectPlatform) {
pProjectInfo->m_bSupportedPlatformFound = true;
}
}
}
// If project doesn't export its platforms, assume we're supported
//
if (aProjectPlatforms.size() == 0) {
pProjectInfo->m_bSupportedPlatformFound = true;
}
}
// Populate the category combo box
if (!m_pProjectCategoriesCtrl->IsListEmpty()) {
m_pProjectCategoriesCtrl->Clear();
}
m_pProjectCategoriesCtrl->Append(_("All"));
for (i=0; i<aCategories.size(); i++) {
m_pProjectCategoriesCtrl->Append(aCategories[i]);
}
m_pProjectCategoriesCtrl->SetValue(_("All"));
// Trigger initial event to populate the list control
wxCommandEvent evtEvent(wxEVT_COMMAND_COMBOBOX_SELECTED, ID_CATEGORIES);
ProcessEvent(evtEvent);
m_bProjectListPopulated = true;
}
m_pProjectsCtrl->SetFocus();
wxLogTrace(wxT("Function Start/End"), wxT("CProjectInfoPage::OnPageChanged - Function End"));
}
/*!
* wxEVT_WIZARD_PAGE_CHANGING event handler for ID_PROJECTINFOPAGE
*/
void CProjectInfoPage::OnPageChanging( wxWizardExEvent& event ) {
if (event.GetDirection() == false) return;
CWizardAttach* pWA = ((CWizardAttach*)GetParent());
CMainDocument* pDoc = wxGetApp().GetDocument();
CSkinAdvanced* pSkinAdvanced = wxGetApp().GetSkinManager()->GetAdvanced();
wxString strTitle;
int iAnswer;
bool bAlreadyAttached = false;
wxASSERT(pDoc);
wxASSERT(wxDynamicCast(pDoc, CMainDocument));
wxASSERT(pSkinAdvanced);
wxASSERT(wxDynamicCast(pSkinAdvanced, CSkinAdvanced));
strTitle.Printf(
wxT("%s"),
pSkinAdvanced->GetApplicationName().c_str()
);
// Check to see if the project is supported:
if (!m_bProjectSupported) {
iAnswer = wxGetApp().SafeMessageBox(
_("This project may not have work for your type of computer. Do you want to add it anyway?"),
strTitle,
wxCENTER | wxYES_NO | wxICON_INFORMATION
);
// Project is not supported
if (wxNO == iAnswer) {
event.Veto();
}
}
// Check if we are already attached to that project:
for (int i = 0; i < pDoc->GetProjectCount(); ++i) {
PROJECT* project = pDoc->project(i);
if (project) {
std::string project_url = project->master_url;
std::string new_project_url = (const char*)m_strProjectURL.mb_str();
canonicalize_master_url(project_url);
canonicalize_master_url(new_project_url);
if (project_url == new_project_url) {
bAlreadyAttached = true;
break;
}
}
}
if (bAlreadyAttached) {
wxGetApp().SafeMessageBox(
_("You already added this project. Please choose a different project."),
strTitle,
wxCENTER | wxOK | wxICON_INFORMATION
);
event.Veto();
} else {
// Update authoritative data in CWizardAttach
pWA->SetProjectURL(m_strProjectURL);
}
}
/*!
* wxEVT_WIZARD_CANCEL event handler for ID_PROJECTINFOPAGE
*/
void CProjectInfoPage::OnCancel( wxWizardExEvent& event ) {
PROCESS_CANCELEVENT(event);
}
void CProjectInfoPage::EllipseStringIfNeeded(wxString& s, wxWindow *win) {
int x, y;
int w, h;
wxSize sz = win->GetParent()->GetSize();
win->GetPosition(&x, &y);
int maxWidth = sz.GetWidth() - x - 10;
win->GetTextExtent(s, &w, &h);
// Adapted from ellipis code in wxRendererGeneric::DrawHeaderButtonContents()
if (w > maxWidth) {
int ellipsisWidth;
win->GetTextExtent( wxT("..."), &ellipsisWidth, NULL);
if (ellipsisWidth > maxWidth) {
s.Clear();
w = 0;
} else {
do {
s.Truncate( s.length() - 1 );
win->GetTextExtent( s, &w, &h);
} while (((w + ellipsisWidth) > maxWidth) && s.length() );
s.append( wxT("...") );
w += ellipsisWidth;
}
}
}
void CProjectInfoPage::RefreshPage() {
// Trigger initial event to populate the list control
wxCommandEvent evtEvent(wxEVT_COMMAND_COMBOBOX_SELECTED, ID_CATEGORIES);
ProcessEvent(evtEvent);
}
| 1 | 11,361 | Why `2` in this and others? | BOINC-boinc | php |
@@ -118,6 +118,7 @@ namespace Microsoft.VisualStudio.TestPlatform.CrossPlatEngine.Hosting
var testHostProcessName = (this.architecture == Architecture.X86) ? X86TestHostProcessName : X64TestHostProcessName;
var currentWorkingDirectory = Path.GetDirectoryName(typeof(DefaultTestHostManager).GetTypeInfo().Assembly.Location);
var argumentsString = " " + Constants.PortOption + " " + connectionInfo.Port;
+ argumentsString += " " + Constants.ParentProcessIdOption + " " + processHelper.GetCurrentProcessId();
var testhostProcessPath = Path.Combine(currentWorkingDirectory, testHostProcessName);
| 1 | // Copyright (c) Microsoft. All rights reserved.
namespace Microsoft.VisualStudio.TestPlatform.CrossPlatEngine.Hosting
{
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.IO;
using System.Linq;
using System.Reflection;
using Microsoft.VisualStudio.TestPlatform.CrossPlatEngine.Helpers;
using Microsoft.VisualStudio.TestPlatform.CrossPlatEngine.Helpers.Interfaces;
using Microsoft.VisualStudio.TestPlatform.ObjectModel;
using Microsoft.VisualStudio.TestPlatform.ObjectModel.Client.Interfaces;
using Microsoft.VisualStudio.TestPlatform.ObjectModel.Engine;
using Constants = Microsoft.VisualStudio.TestPlatform.CrossPlatEngine.Constants;
/// <summary>
/// The default test host launcher for the engine.
/// This works for Desktop local scenarios
/// </summary>
public class DefaultTestHostManager : ITestHostManager
{
private const string X64TestHostProcessName = "testhost.exe";
private const string X86TestHostProcessName = "testhost.x86.exe";
private const string DotnetProcessName = "dotnet.exe";
private const string DotnetProcessNameXPlat = "dotnet";
private const string NetCoreDirectoryName = "NetCore";
private readonly Architecture architecture;
private readonly Framework framework;
private ITestHostLauncher customTestHostLauncher;
private Process testHostProcess;
private readonly IProcessHelper processHelper;
private EventHandler registeredExitHandler;
/// <summary>
/// Initializes a new instance of the <see cref="DefaultTestHostManager"/> class.
/// </summary>
/// <param name="architecture">Platform architecture of the host process.</param>
/// <param name="framework">Runtime framework for the host process.</param>
public DefaultTestHostManager(Architecture architecture, Framework framework)
: this(architecture, framework, new ProcessHelper())
{
}
/// <summary>
/// Initializes a new instance of the <see cref="DefaultTestHostManager"/> class.
/// </summary>
/// <param name="architecture">Platform architecture of the host process.</param>
/// <param name="framework">Runtime framework for the host process.</param>
/// <param name="processHelper">Process helper instance.</param>
internal DefaultTestHostManager(Architecture architecture, Framework framework, IProcessHelper processHelper)
{
this.architecture = architecture;
this.framework = framework;
this.processHelper = processHelper;
this.testHostProcess = null;
}
/// <inheritdoc/>
public bool Shared => true;
/// <summary>
/// Gets the properties of the test executor launcher. These could be the targetID for emulator/phone specific scenarios.
/// </summary>
public IDictionary<string, string> Properties
{
get
{
return new Dictionary<string, string>();
}
}
/// <summary>
/// Sets a custom launcher.
/// </summary>
/// <param name="customLauncher">Custom launcher to set</param>
public void SetCustomLauncher(ITestHostLauncher customLauncher)
{
this.customTestHostLauncher = customLauncher;
}
/// <summary>
/// Launches the test host for discovery/execution.
/// </summary>
/// <param name="testHostStartInfo"></param>
/// <returns>ProcessId of launched Process. 0 means not launched.</returns>
public int LaunchTestHost(TestProcessStartInfo testHostStartInfo)
{
this.DeregisterForExitNotification();
EqtTrace.Verbose("Launching default test Host Process {0} with arguments {1}", testHostStartInfo.FileName, testHostStartInfo.Arguments);
if (this.customTestHostLauncher == null)
{
this.testHostProcess = this.processHelper.LaunchProcess(testHostStartInfo.FileName, testHostStartInfo.Arguments, testHostStartInfo.WorkingDirectory);
}
else
{
int processId = this.customTestHostLauncher.LaunchTestHost(testHostStartInfo);
this.testHostProcess = Process.GetProcessById(processId);
}
return this.testHostProcess.Id;
}
/// <inheritdoc/>
public virtual TestProcessStartInfo GetTestHostProcessStartInfo(
IEnumerable<string> sources,
IDictionary<string, string> environmentVariables,
TestRunnerConnectionInfo connectionInfo)
{
// Default test host manager supports shared test sources
var testHostProcessName = (this.architecture == Architecture.X86) ? X86TestHostProcessName : X64TestHostProcessName;
var currentWorkingDirectory = Path.GetDirectoryName(typeof(DefaultTestHostManager).GetTypeInfo().Assembly.Location);
var argumentsString = " " + Constants.PortOption + " " + connectionInfo.Port;
var testhostProcessPath = Path.Combine(currentWorkingDirectory, testHostProcessName);
// For IDEs and other scenario, current directory should be the
// working directory (not the vstest.console.exe location).
// For VS - this becomes the solution directory for example
// "TestResults" directory will be created at "current directory" of test host
var processWorkingDirectory = Directory.GetCurrentDirectory();
return new TestProcessStartInfo
{
FileName = testhostProcessPath,
Arguments = argumentsString,
EnvironmentVariables = environmentVariables ?? new Dictionary<string, string>(),
WorkingDirectory = processWorkingDirectory
};
}
/// <summary>
/// Register for the exit event.
/// </summary>
/// <param name="abortCallback"> The callback on exit. </param>
public virtual void RegisterForExitNotification(Action abortCallback)
{
if (this.testHostProcess != null && abortCallback != null)
{
this.registeredExitHandler = (sender, args) => abortCallback();
this.testHostProcess.Exited += this.registeredExitHandler;
}
}
/// <summary>
/// Deregister for the exit event.
/// </summary>
public virtual void DeregisterForExitNotification()
{
if (this.testHostProcess != null && this.registeredExitHandler != null)
{
this.testHostProcess.Exited -= this.registeredExitHandler;
}
}
}
}
| 1 | 11,208 | This may cause a new allocation, please consider merging the concat in above line. Same applies to change in dotnethostmanager. | microsoft-vstest | .cs |
@@ -490,6 +490,16 @@ given file (report RP0402 must not be disabled)'}
importedname = node.modname
else:
importedname = node.names[0][0].split('.')[0]
+ if isinstance(node, astroid.ImportFrom) and \
+ node.as_string().startswith('from .'):
+ # We need the impotedname with first point to detect local package
+ # Example of node:
+ # 'from .my_package1 import MyClass1'
+ # the output should be '.my_package1' instead of 'my_package1'
+ # Example of node:
+ # 'from . import my_package2'
+ # the output should be '.my_package2' instead of '{pyfile}'
+ importedname = '.' + importedname
self._imports_stack.append((node, importedname))
@staticmethod | 1 | # Copyright (c) 2003-2013 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:[email protected]
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""imports checkers for Python code"""
import collections
from distutils import sysconfig
import os
import sys
import six
import astroid
from astroid import are_exclusive
from astroid.modutils import (get_module_part, is_standard_module)
import isort
from pylint.interfaces import IAstroidChecker
from pylint.utils import EmptyReport, get_global_option
from pylint.checkers import BaseChecker
from pylint.checkers.utils import check_messages, node_ignores_exception
from pylint.graph import get_cycles, DotBackend
from pylint.reporters.ureports.nodes import VerbatimText, Paragraph
def _qualified_names(modname):
"""Split the names of the given module into subparts
For example,
_qualified_names('pylint.checkers.ImportsChecker')
returns
['pylint', 'pylint.checkers', 'pylint.checkers.ImportsChecker']
"""
names = modname.split('.')
return ['.'.join(names[0:i+1]) for i in range(len(names))]
def _get_import_name(importnode, modname):
"""Get a prepared module name from the given import node
In the case of relative imports, this will return the
absolute qualified module name, which might be useful
for debugging. Otherwise, the initial module name
is returned unchanged.
"""
if isinstance(importnode, astroid.ImportFrom):
if importnode.level:
root = importnode.root()
if isinstance(root, astroid.Module):
modname = root.relative_to_absolute_name(
modname, level=importnode.level)
return modname
def _get_first_import(node, context, name, base, level, alias):
"""return the node where [base.]<name> is imported or None if not found
"""
fullname = '%s.%s' % (base, name) if base else name
first = None
found = False
for first in context.body:
if first is node:
continue
if first.scope() is node.scope() and first.fromlineno > node.fromlineno:
continue
if isinstance(first, astroid.Import):
if any(fullname == iname[0] for iname in first.names):
found = True
break
elif isinstance(first, astroid.ImportFrom):
if level == first.level:
for imported_name, imported_alias in first.names:
if fullname == '%s.%s' % (first.modname, imported_name):
found = True
break
if name != '*' and name == imported_name and not (alias or imported_alias):
found = True
break
if found:
break
if found and not are_exclusive(first, node):
return first
def _ignore_import_failure(node, modname, ignored_modules):
for submodule in _qualified_names(modname):
if submodule in ignored_modules:
return True
return node_ignores_exception(node, ImportError)
# utilities to represents import dependencies as tree and dot graph ###########
def _make_tree_defs(mod_files_list):
"""get a list of 2-uple (module, list_of_files_which_import_this_module),
it will return a dictionary to represent this as a tree
"""
tree_defs = {}
for mod, files in mod_files_list:
node = (tree_defs, ())
for prefix in mod.split('.'):
node = node[0].setdefault(prefix, [{}, []])
node[1] += files
return tree_defs
def _repr_tree_defs(data, indent_str=None):
"""return a string which represents imports as a tree"""
lines = []
nodes = data.items()
for i, (mod, (sub, files)) in enumerate(sorted(nodes, key=lambda x: x[0])):
if not files:
files = ''
else:
files = '(%s)' % ','.join(files)
if indent_str is None:
lines.append('%s %s' % (mod, files))
sub_indent_str = ' '
else:
lines.append(r'%s\-%s %s' % (indent_str, mod, files))
if i == len(nodes)-1:
sub_indent_str = '%s ' % indent_str
else:
sub_indent_str = '%s| ' % indent_str
if sub:
lines.append(_repr_tree_defs(sub, sub_indent_str))
return '\n'.join(lines)
def _dependencies_graph(filename, dep_info):
"""write dependencies as a dot (graphviz) file
"""
done = {}
printer = DotBackend(filename[:-4], rankdir='LR')
printer.emit('URL="." node[shape="box"]')
for modname, dependencies in sorted(six.iteritems(dep_info)):
done[modname] = 1
printer.emit_node(modname)
for modname in dependencies:
if modname not in done:
done[modname] = 1
printer.emit_node(modname)
for depmodname, dependencies in sorted(six.iteritems(dep_info)):
for modname in dependencies:
printer.emit_edge(modname, depmodname)
printer.generate(filename)
def _make_graph(filename, dep_info, sect, gtype):
"""generate a dependencies graph and add some information about it in the
report's section
"""
_dependencies_graph(filename, dep_info)
sect.append(Paragraph('%simports graph has been written to %s'
% (gtype, filename)))
# the import checker itself ###################################################
MSGS = {
'E0401': ('Unable to import %s',
'import-error',
'Used when pylint has been unable to import a module.',
{'old_names': [('F0401', 'import-error')]}),
'E0402': ('Attempted relative import beyond top-level package',
'relative-beyond-top-level',
'Used when a relative import tries to access too many levels '
'in the current package.'),
'R0401': ('Cyclic import (%s)',
'cyclic-import',
'Used when a cyclic import between two or more modules is \
detected.'),
'W0401': ('Wildcard import %s',
'wildcard-import',
'Used when `from module import *` is detected.'),
'W0402': ('Uses of a deprecated module %r',
'deprecated-module',
'Used a module marked as deprecated is imported.'),
'W0403': ('Relative import %r, should be %r',
'relative-import',
'Used when an import relative to the package directory is '
'detected.',
{'maxversion': (3, 0)}),
'W0404': ('Reimport %r (imported line %s)',
'reimported',
'Used when a module is reimported multiple times.'),
'W0406': ('Module import itself',
'import-self',
'Used when a module is importing itself.'),
'W0410': ('__future__ import is not the first non docstring statement',
'misplaced-future',
'Python 2.5 and greater require __future__ import to be the \
first non docstring statement in the module.'),
'C0410': ('Multiple imports on one line (%s)',
'multiple-imports',
'Used when import statement importing multiple modules is '
'detected.'),
'C0411': ('%s comes before %s',
'wrong-import-order',
'Used when PEP8 import order is not respected (standard imports '
'first, then third-party libraries, then local imports)'),
'C0412': ('Imports from package %s are not grouped',
'ungrouped-imports',
'Used when imports are not grouped by packages'),
'C0413': ('Import "%s" should be placed at the top of the '
'module',
'wrong-import-position',
'Used when code and imports are mixed'),
}
DEFAULT_STANDARD_LIBRARY = ()
DEFAULT_KNOWN_THIRD_PARTY = ('enchant',)
class ImportsChecker(BaseChecker):
"""checks for
* external modules dependencies
* relative / wildcard imports
* cyclic imports
* uses of deprecated modules
"""
__implements__ = IAstroidChecker
name = 'imports'
msgs = MSGS
priority = -2
if six.PY2:
deprecated_modules = ('regsub', 'TERMIOS', 'Bastion', 'rexec')
else:
deprecated_modules = ('optparse', )
options = (('deprecated-modules',
{'default' : deprecated_modules,
'type' : 'csv',
'metavar' : '<modules>',
'help' : 'Deprecated modules which should not be used, \
separated by a comma'}
),
('import-graph',
{'default' : '',
'type' : 'string',
'metavar' : '<file.dot>',
'help' : 'Create a graph of every (i.e. internal and \
external) dependencies in the given file (report RP0402 must not be disabled)'}
),
('ext-import-graph',
{'default' : '',
'type' : 'string',
'metavar' : '<file.dot>',
'help' : 'Create a graph of external dependencies in the \
given file (report RP0402 must not be disabled)'}
),
('int-import-graph',
{'default' : '',
'type' : 'string',
'metavar' : '<file.dot>',
'help' : 'Create a graph of internal dependencies in the \
given file (report RP0402 must not be disabled)'}
),
('known-standard-library',
{'default': DEFAULT_STANDARD_LIBRARY,
'type': 'csv',
'metavar': '<modules>',
'help': 'Force import order to recognize a module as part of' \
' the standard compatibility libraries.'}
),
('known-third-party',
{'default': DEFAULT_KNOWN_THIRD_PARTY,
'type': 'csv',
'metavar': '<modules>',
'help': 'Force import order to recognize a module as part of' \
' a third party library.'}
),
)
def __init__(self, linter=None):
BaseChecker.__init__(self, linter)
self.stats = None
self.import_graph = None
self._imports_stack = []
self._first_non_import_node = None
self.__int_dep_info = self.__ext_dep_info = None
self.reports = (('RP0401', 'External dependencies',
self._report_external_dependencies),
('RP0402', 'Modules dependencies graph',
self._report_dependencies_graph),
)
self._site_packages = self._compute_site_packages()
@staticmethod
def _compute_site_packages():
def _normalized_path(path):
return os.path.normcase(os.path.abspath(path))
paths = set()
real_prefix = getattr(sys, 'real_prefix', None)
for prefix in filter(None, (real_prefix, sys.prefix)):
path = sysconfig.get_python_lib(prefix=prefix)
path = _normalized_path(path)
paths.add(path)
# Handle Debian's derivatives /usr/local.
if os.path.isfile("/etc/debian_version"):
for prefix in filter(None, (real_prefix, sys.prefix)):
libpython = os.path.join(prefix, "local", "lib",
"python" + sysconfig.get_python_version(),
"dist-packages")
paths.add(libpython)
return paths
def open(self):
"""called before visiting project (i.e set of modules)"""
self.linter.add_stats(dependencies={})
self.linter.add_stats(cycles=[])
self.stats = self.linter.stats
self.import_graph = collections.defaultdict(set)
self._ignored_modules = get_global_option(
self, 'ignored-modules', default=[])
def close(self):
"""called before visiting project (i.e set of modules)"""
# don't try to compute cycles if the associated message is disabled
if self.linter.is_message_enabled('cyclic-import'):
vertices = list(self.import_graph)
for cycle in get_cycles(self.import_graph, vertices=vertices):
self.add_message('cyclic-import', args=' -> '.join(cycle))
@check_messages('wrong-import-position', 'multiple-imports',
'relative-import', 'reimported')
def visit_import(self, node):
"""triggered when an import statement is seen"""
self._check_reimport(node)
modnode = node.root()
names = [name for name, _ in node.names]
if len(names) >= 2:
self.add_message('multiple-imports', args=', '.join(names), node=node)
for name in names:
self._check_deprecated_module(node, name)
importedmodnode = self._get_imported_module(node, name)
if isinstance(node.scope(), astroid.Module):
self._check_position(node)
self._record_import(node, importedmodnode)
if importedmodnode is None:
continue
self._check_relative_import(modnode, node, importedmodnode, name)
self._add_imported_module(node, importedmodnode.name)
@check_messages(*(MSGS.keys()))
def visit_importfrom(self, node):
"""triggered when a from statement is seen"""
basename = node.modname
self._check_misplaced_future(node)
self._check_deprecated_module(node, basename)
self._check_wildcard_imports(node)
self._check_same_line_imports(node)
self._check_reimport(node, basename=basename, level=node.level)
modnode = node.root()
importedmodnode = self._get_imported_module(node, basename)
if isinstance(node.scope(), astroid.Module):
self._check_position(node)
self._record_import(node, importedmodnode)
if importedmodnode is None:
return
self._check_relative_import(modnode, node, importedmodnode, basename)
for name, _ in node.names:
if name != '*':
self._add_imported_module(node, '%s.%s' % (importedmodnode.name, name))
@check_messages('wrong-import-order', 'ungrouped-imports',
'wrong-import-position')
def leave_module(self, node):
# Check imports are grouped by category (standard, 3rd party, local)
std_imports, ext_imports, loc_imports = self._check_imports_order(node)
# Check imports are grouped by package within a given category
met = set()
current_package = None
for import_node, import_name in std_imports + ext_imports + loc_imports:
package, _, _ = import_name.partition('.')
if current_package and current_package != package and package in met:
self.add_message('ungrouped-imports', node=import_node,
args=package)
current_package = package
met.add(package)
self._imports_stack = []
self._first_non_import_node = None
def visit_if(self, node):
# if the node does not contain an import instruction, and if it is the
# first node of the module, keep a track of it (all the import positions
# of the module will be compared to the position of this first
# instruction)
if self._first_non_import_node:
return
if not isinstance(node.parent, astroid.Module):
return
if any(node.nodes_of_class((astroid.Import, astroid.ImportFrom))):
return
self._first_non_import_node = node
visit_tryfinally = visit_tryexcept = visit_assignattr = visit_assign \
= visit_ifexp = visit_comprehension = visit_if
def visit_functiondef(self, node):
# If it is the first non import instruction of the module, record it.
if self._first_non_import_node:
return
# Check if the node belongs to an `If` or a `Try` block. If they
# contain imports, skip recording this node.
if not isinstance(node.parent.scope(), astroid.Module):
return
root = node
while not isinstance(root.parent, astroid.Module):
root = root.parent
if isinstance(root, (astroid.If, astroid.TryFinally, astroid.TryExcept)):
if any(root.nodes_of_class((astroid.Import, astroid.ImportFrom))):
return
self._first_non_import_node = node
visit_classdef = visit_for = visit_while = visit_functiondef
def _check_misplaced_future(self, node):
basename = node.modname
if basename == '__future__':
# check if this is the first non-docstring statement in the module
prev = node.previous_sibling()
if prev:
# consecutive future statements are possible
if not (isinstance(prev, astroid.ImportFrom)
and prev.modname == '__future__'):
self.add_message('misplaced-future', node=node)
return
def _check_same_line_imports(self, node):
# Detect duplicate imports on the same line.
names = (name for name, _ in node.names)
counter = collections.Counter(names)
for name, count in counter.items():
if count > 1:
self.add_message('reimported', node=node,
args=(name, node.fromlineno))
def _check_position(self, node):
"""Check `node` import or importfrom node position is correct
Send a message if `node` comes before another instruction
"""
# if a first non-import instruction has already been encountered,
# it means the import comes after it and therefore is not well placed
if self._first_non_import_node:
self.add_message('wrong-import-position', node=node,
args=node.as_string())
def _record_import(self, node, importedmodnode):
"""Record the package `node` imports from"""
importedname = importedmodnode.name if importedmodnode else None
if not importedname:
if isinstance(node, astroid.ImportFrom):
importedname = node.modname
else:
importedname = node.names[0][0].split('.')[0]
self._imports_stack.append((node, importedname))
@staticmethod
def _is_fallback_import(node, imports):
imports = [import_node for (import_node, _) in imports]
return any(astroid.are_exclusive(import_node, node)
for import_node in imports)
def _check_imports_order(self, node):
"""Checks imports of module `node` are grouped by category
Imports must follow this order: standard, 3rd party, local
"""
extern_imports = []
local_imports = []
std_imports = []
isort_obj = isort.SortImports(
file_contents='', known_third_party=self.config.known_third_party,
known_standard_library=self.config.known_standard_library,
)
for node, modname in self._imports_stack:
package = modname.split('.')[0]
import_category = isort_obj.place_module(package)
if import_category in ('FUTURE', 'STDLIB'):
std_imports.append((node, package))
wrong_import = extern_imports or local_imports
if self._is_fallback_import(node, wrong_import):
continue
if wrong_import:
self.add_message('wrong-import-order', node=node,
args=('standard import "%s"' % node.as_string(),
'"%s"' % wrong_import[0][0].as_string()))
elif import_category in ('FIRSTPARTY', 'THIRDPARTY'):
extern_imports.append((node, package))
wrong_import = local_imports
if wrong_import:
self.add_message('wrong-import-order', node=node,
args=('external import "%s"' % node.as_string(),
'"%s"' % wrong_import[0][0].as_string()))
elif import_category == 'LOCALFOLDER':
local_imports.append((node, package))
return std_imports, extern_imports, local_imports
def _get_imported_module(self, importnode, modname):
try:
return importnode.do_import_module(modname)
except astroid.TooManyLevelsError:
if _ignore_import_failure(importnode, modname, self._ignored_modules):
return None
self.add_message('relative-beyond-top-level', node=importnode)
except astroid.AstroidBuildingException:
if _ignore_import_failure(importnode, modname, self._ignored_modules):
return None
dotted_modname = _get_import_name(importnode, modname)
self.add_message('import-error', args=repr(dotted_modname),
node=importnode)
def _check_relative_import(self, modnode, importnode, importedmodnode,
importedasname):
"""check relative import. node is either an Import or From node, modname
the imported module name.
"""
if not self.linter.is_message_enabled('relative-import'):
return
if importedmodnode.file is None:
return False # built-in module
if modnode is importedmodnode:
return False # module importing itself
if modnode.absolute_import_activated() or getattr(importnode, 'level', None):
return False
if importedmodnode.name != importedasname:
# this must be a relative import...
self.add_message('relative-import',
args=(importedasname, importedmodnode.name),
node=importnode)
def _add_imported_module(self, node, importedmodname):
"""notify an imported module, used to analyze dependencies"""
module_file = node.root().file
context_name = node.root().name
base = os.path.splitext(os.path.basename(module_file))[0]
# Determine if we have a `from .something import` in a package's
# __init__. This means the module will never be able to import
# itself using this condition (the level will be bigger or
# if the same module is named as the package, it will be different
# anyway).
if isinstance(node, astroid.ImportFrom):
if node.level and node.level > 0 and base == '__init__':
return
try:
importedmodname = get_module_part(importedmodname,
module_file)
except ImportError:
pass
if context_name == importedmodname:
self.add_message('import-self', node=node)
elif not is_standard_module(importedmodname):
# handle dependencies
importedmodnames = self.stats['dependencies'].setdefault(
importedmodname, set())
if context_name not in importedmodnames:
importedmodnames.add(context_name)
# update import graph
mgraph = self.import_graph[context_name]
if importedmodname not in mgraph:
mgraph.add(importedmodname)
def _check_deprecated_module(self, node, mod_path):
"""check if the module is deprecated"""
for mod_name in self.config.deprecated_modules:
if mod_path == mod_name or mod_path.startswith(mod_name + '.'):
self.add_message('deprecated-module', node=node, args=mod_path)
def _check_reimport(self, node, basename=None, level=None):
"""check if the import is necessary (i.e. not already done)"""
if not self.linter.is_message_enabled('reimported'):
return
frame = node.frame()
root = node.root()
contexts = [(frame, level)]
if root is not frame:
contexts.append((root, None))
for context, level in contexts:
for name, alias in node.names:
first = _get_first_import(node, context, name, basename, level, alias)
if first is not None:
self.add_message('reimported', node=node,
args=(name, first.fromlineno))
def _report_external_dependencies(self, sect, _, dummy):
"""return a verbatim layout for displaying dependencies"""
dep_info = _make_tree_defs(six.iteritems(self._external_dependencies_info()))
if not dep_info:
raise EmptyReport()
tree_str = _repr_tree_defs(dep_info)
sect.append(VerbatimText(tree_str))
def _report_dependencies_graph(self, sect, _, dummy):
"""write dependencies as a dot (graphviz) file"""
dep_info = self.stats['dependencies']
if not dep_info or not (self.config.import_graph
or self.config.ext_import_graph
or self.config.int_import_graph):
raise EmptyReport()
filename = self.config.import_graph
if filename:
_make_graph(filename, dep_info, sect, '')
filename = self.config.ext_import_graph
if filename:
_make_graph(filename, self._external_dependencies_info(),
sect, 'external ')
filename = self.config.int_import_graph
if filename:
_make_graph(filename, self._internal_dependencies_info(),
sect, 'internal ')
def _external_dependencies_info(self):
"""return cached external dependencies information or build and
cache them
"""
if self.__ext_dep_info is None:
package = self.linter.current_name
self.__ext_dep_info = result = {}
for importee, importers in six.iteritems(self.stats['dependencies']):
if not importee.startswith(package):
result[importee] = importers
return self.__ext_dep_info
def _internal_dependencies_info(self):
"""return cached internal dependencies information or build and
cache them
"""
if self.__int_dep_info is None:
package = self.linter.current_name
self.__int_dep_info = result = {}
for importee, importers in six.iteritems(self.stats['dependencies']):
if importee.startswith(package):
result[importee] = importers
return self.__int_dep_info
def _check_wildcard_imports(self, node):
for name, _ in node.names:
if name == '*':
self.add_message('wildcard-import', args=node.modname, node=node)
def register(linter):
"""required method to auto register this checker """
linter.register_checker(ImportsChecker(linter))
| 1 | 8,398 | Alternatively (and better) would be to look for the .level attribute of the node. If it's bigger or equal to 1, than that is a relative import. So "from . import x" should have level 1, while "from .. import z" should have level 2 and so on. The same should happen for "from .c import z". | PyCQA-pylint | py |
@@ -25,4 +25,14 @@ public abstract class AbstractASTXPathHandler implements XPathHandler {
public void initialize(IndependentContext context, Language language, Class<?> functionsClass) {
context.declareNamespace("pmd-" + language.getTerseName(), "java:" + functionsClass.getName());
}
+
+ @Override
+ public void initialize() {
+ // override if needed
+ }
+
+ @Override
+ public void initialize(IndependentContext context) {
+ // override if needed
+ }
} | 1 | /**
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.ast.xpath;
import org.jaxen.Navigator;
import net.sourceforge.pmd.annotation.InternalApi;
import net.sourceforge.pmd.lang.Language;
import net.sourceforge.pmd.lang.XPathHandler;
import net.sf.saxon.sxpath.IndependentContext;
@Deprecated
@InternalApi
public abstract class AbstractASTXPathHandler implements XPathHandler {
@Override
public Navigator getNavigator() {
return new DocumentNavigator();
}
public void initialize(IndependentContext context, Language language, Class<?> functionsClass) {
context.declareNamespace("pmd-" + language.getTerseName(), "java:" + functionsClass.getName());
}
}
| 1 | 17,229 | Btw. this API must definitely change with PMD 7 - we are exposing here a implementation detail (that we use Saxon). And it happens, that the way, how custom functions are registered, changed with Saxon 9.5... which makes the need for a implementation agnostic API relevant... | pmd-pmd | java |
@@ -44,6 +44,8 @@ type GenericDeploymentSpec struct {
Timeout Duration `json:"timeout,omitempty" default:"6h"`
// List of encrypted secrets and targets that should be decoded before using.
Encryption *SecretEncryption `json:"encryption"`
+ // Notification to be sent to users via Slack or email
+ Notification *Notification `json:"notification"`
}
type DeploymentPlanner struct { | 1 | // Copyright 2020 The PipeCD Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"encoding/json"
"fmt"
"time"
"github.com/pipe-cd/pipe/pkg/model"
)
const (
defaultWaitApprovalTimeout = Duration(6 * time.Hour)
defaultAnalysisQueryTimeout = Duration(30 * time.Second)
)
type GenericDeploymentSpec struct {
// Configuration used while planning deployment.
Planner DeploymentPlanner `json:"planner"`
// Forcibly use QuickSync or Pipeline when commit message matched the specified pattern.
CommitMatcher DeploymentCommitMatcher `json:"commitMatcher"`
// Pipeline for deploying progressively.
Pipeline *DeploymentPipeline `json:"pipeline"`
// The list of sealed secrets that should be decrypted.
SealedSecrets []SealedSecretMapping `json:"sealedSecrets"`
// List of directories or files where their changes will trigger the deployment.
// Regular expression can be used.
TriggerPaths []string `json:"triggerPaths,omitempty"`
// The maximum length of time to execute deployment before giving up.
// Default is 6h.
Timeout Duration `json:"timeout,omitempty" default:"6h"`
// List of encrypted secrets and targets that should be decoded before using.
Encryption *SecretEncryption `json:"encryption"`
}
type DeploymentPlanner struct {
// Disable auto-detecting to use QUICK_SYNC or PROGRESSIVE_SYNC.
// Always use the speficied pipeline for all deployments.
AlwaysUsePipeline bool `json:"alwaysUsePipeline"`
}
func (s *GenericDeploymentSpec) Validate() error {
if s.Pipeline != nil {
for _, stage := range s.Pipeline.Stages {
if stage.AnalysisStageOptions != nil {
if err := stage.AnalysisStageOptions.Validate(); err != nil {
return err
}
}
}
}
if e := s.Encryption; e != nil {
if err := e.Validate(); err != nil {
return err
}
}
return nil
}
func (s GenericDeploymentSpec) GetStage(index int32) (PipelineStage, bool) {
if s.Pipeline == nil {
return PipelineStage{}, false
}
if int(index) >= len(s.Pipeline.Stages) {
return PipelineStage{}, false
}
return s.Pipeline.Stages[index], true
}
// HasStage checks if the given stage is included in the pipeline.
func (s GenericDeploymentSpec) HasStage(stage model.Stage) bool {
if s.Pipeline == nil {
return false
}
for _, s := range s.Pipeline.Stages {
if s.Name == stage {
return true
}
}
return false
}
// DeploymentCommitMatcher provides a way to decide how to deploy.
type DeploymentCommitMatcher struct {
// It makes sure to perform syncing if the commit message matches this regular expression.
QuickSync string `json:"quickSync"`
// It makes sure to perform pipeline if the commit message matches this regular expression.
Pipeline string `json:"pipeline"`
}
// DeploymentPipeline represents the way to deploy the application.
// The pipeline is triggered by changes in any of the following objects:
// - Target PodSpec (Target can be Deployment, DaemonSet, StatefulSet)
// - ConfigMaps, Secrets that are mounted as volumes or envs in the deployment.
type DeploymentPipeline struct {
Stages []PipelineStage `json:"stages"`
}
// PipelineStage represents a single stage of a pipeline.
// This is used as a generic struct for all stage type.
type PipelineStage struct {
Id string
Name model.Stage
Desc string
Timeout Duration
WaitStageOptions *WaitStageOptions
WaitApprovalStageOptions *WaitApprovalStageOptions
AnalysisStageOptions *AnalysisStageOptions
K8sPrimaryRolloutStageOptions *K8sPrimaryRolloutStageOptions
K8sCanaryRolloutStageOptions *K8sCanaryRolloutStageOptions
K8sCanaryCleanStageOptions *K8sCanaryCleanStageOptions
K8sBaselineRolloutStageOptions *K8sBaselineRolloutStageOptions
K8sBaselineCleanStageOptions *K8sBaselineCleanStageOptions
K8sTrafficRoutingStageOptions *K8sTrafficRoutingStageOptions
TerraformSyncStageOptions *TerraformSyncStageOptions
TerraformPlanStageOptions *TerraformPlanStageOptions
TerraformApplyStageOptions *TerraformApplyStageOptions
CloudRunSyncStageOptions *CloudRunSyncStageOptions
CloudRunPromoteStageOptions *CloudRunPromoteStageOptions
LambdaSyncStageOptions *LambdaSyncStageOptions
LambdaCanaryRolloutStageOptions *LambdaCanaryRolloutStageOptions
LambdaPromoteStageOptions *LambdaPromoteStageOptions
ECSSyncStageOptions *ECSSyncStageOptions
ECSCanaryRolloutStageOptions *ECSCanaryRolloutStageOptions
ECSPrimaryRolloutStageOptions *ECSPrimaryRolloutStageOptions
ECSCanaryCleanStageOptions *ECSCanaryCleanStageOptions
ECSTrafficRoutingStageOptions *ECSTrafficRoutingStageOptions
}
type genericPipelineStage struct {
Id string `json:"id"`
Name model.Stage `json:"name"`
Desc string `json:"desc,omitempty"`
Timeout Duration `json:"timeout"`
With json.RawMessage `json:"with"`
}
func (s *PipelineStage) UnmarshalJSON(data []byte) error {
var err error
gs := genericPipelineStage{}
if err = json.Unmarshal(data, &gs); err != nil {
return err
}
s.Id = gs.Id
s.Name = gs.Name
s.Desc = gs.Desc
s.Timeout = gs.Timeout
switch s.Name {
case model.StageWait:
s.WaitStageOptions = &WaitStageOptions{}
if len(gs.With) > 0 {
err = json.Unmarshal(gs.With, s.WaitStageOptions)
}
case model.StageWaitApproval:
s.WaitApprovalStageOptions = &WaitApprovalStageOptions{}
if len(gs.With) > 0 {
err = json.Unmarshal(gs.With, s.WaitApprovalStageOptions)
}
if s.WaitApprovalStageOptions.Timeout <= 0 {
s.WaitApprovalStageOptions.Timeout = defaultWaitApprovalTimeout
}
case model.StageAnalysis:
s.AnalysisStageOptions = &AnalysisStageOptions{}
if len(gs.With) > 0 {
err = json.Unmarshal(gs.With, s.AnalysisStageOptions)
}
for i := 0; i < len(s.AnalysisStageOptions.Metrics); i++ {
if s.AnalysisStageOptions.Metrics[i].Timeout <= 0 {
s.AnalysisStageOptions.Metrics[i].Timeout = defaultAnalysisQueryTimeout
}
}
case model.StageK8sPrimaryRollout:
s.K8sPrimaryRolloutStageOptions = &K8sPrimaryRolloutStageOptions{}
if len(gs.With) > 0 {
err = json.Unmarshal(gs.With, s.K8sPrimaryRolloutStageOptions)
}
case model.StageK8sCanaryRollout:
s.K8sCanaryRolloutStageOptions = &K8sCanaryRolloutStageOptions{}
if len(gs.With) > 0 {
err = json.Unmarshal(gs.With, s.K8sCanaryRolloutStageOptions)
}
case model.StageK8sCanaryClean:
s.K8sCanaryCleanStageOptions = &K8sCanaryCleanStageOptions{}
if len(gs.With) > 0 {
err = json.Unmarshal(gs.With, s.K8sCanaryCleanStageOptions)
}
case model.StageK8sBaselineRollout:
s.K8sBaselineRolloutStageOptions = &K8sBaselineRolloutStageOptions{}
if len(gs.With) > 0 {
err = json.Unmarshal(gs.With, s.K8sBaselineRolloutStageOptions)
}
case model.StageK8sBaselineClean:
s.K8sBaselineCleanStageOptions = &K8sBaselineCleanStageOptions{}
if len(gs.With) > 0 {
err = json.Unmarshal(gs.With, s.K8sBaselineCleanStageOptions)
}
case model.StageK8sTrafficRouting:
s.K8sTrafficRoutingStageOptions = &K8sTrafficRoutingStageOptions{}
if len(gs.With) > 0 {
err = json.Unmarshal(gs.With, s.K8sTrafficRoutingStageOptions)
}
case model.StageTerraformSync:
s.TerraformSyncStageOptions = &TerraformSyncStageOptions{}
if len(gs.With) > 0 {
err = json.Unmarshal(gs.With, s.TerraformSyncStageOptions)
}
case model.StageTerraformPlan:
s.TerraformPlanStageOptions = &TerraformPlanStageOptions{}
if len(gs.With) > 0 {
err = json.Unmarshal(gs.With, s.TerraformPlanStageOptions)
}
case model.StageTerraformApply:
s.TerraformApplyStageOptions = &TerraformApplyStageOptions{}
if len(gs.With) > 0 {
err = json.Unmarshal(gs.With, s.TerraformApplyStageOptions)
}
case model.StageCloudRunSync:
s.CloudRunSyncStageOptions = &CloudRunSyncStageOptions{}
if len(gs.With) > 0 {
err = json.Unmarshal(gs.With, s.CloudRunSyncStageOptions)
}
case model.StageCloudRunPromote:
s.CloudRunPromoteStageOptions = &CloudRunPromoteStageOptions{}
if len(gs.With) > 0 {
err = json.Unmarshal(gs.With, s.CloudRunPromoteStageOptions)
}
case model.StageLambdaSync:
s.LambdaSyncStageOptions = &LambdaSyncStageOptions{}
if len(gs.With) > 0 {
err = json.Unmarshal(gs.With, s.LambdaSyncStageOptions)
}
case model.StageLambdaPromote:
s.LambdaPromoteStageOptions = &LambdaPromoteStageOptions{}
if len(gs.With) > 0 {
err = json.Unmarshal(gs.With, s.LambdaPromoteStageOptions)
}
case model.StageLambdaCanaryRollout:
s.LambdaCanaryRolloutStageOptions = &LambdaCanaryRolloutStageOptions{}
if len(gs.With) > 0 {
err = json.Unmarshal(gs.With, s.LambdaCanaryRolloutStageOptions)
}
case model.StageECSSync:
s.ECSSyncStageOptions = &ECSSyncStageOptions{}
if len(gs.With) > 0 {
err = json.Unmarshal(gs.With, s.ECSSyncStageOptions)
}
case model.StageECSCanaryRollout:
s.ECSCanaryRolloutStageOptions = &ECSCanaryRolloutStageOptions{}
if len(gs.With) > 0 {
err = json.Unmarshal(gs.With, s.ECSCanaryRolloutStageOptions)
}
case model.StageECSPrimaryRollout:
s.ECSPrimaryRolloutStageOptions = &ECSPrimaryRolloutStageOptions{}
if len(gs.With) > 0 {
err = json.Unmarshal(gs.With, s.ECSPrimaryRolloutStageOptions)
}
case model.StageECSCanaryClean:
s.ECSCanaryCleanStageOptions = &ECSCanaryCleanStageOptions{}
if len(gs.With) > 0 {
err = json.Unmarshal(gs.With, s.ECSCanaryCleanStageOptions)
}
case model.StageECSTrafficRouting:
s.ECSTrafficRoutingStageOptions = &ECSTrafficRoutingStageOptions{}
if len(gs.With) > 0 {
err = json.Unmarshal(gs.With, s.ECSTrafficRoutingStageOptions)
}
default:
err = fmt.Errorf("unsupported stage name: %s", s.Name)
}
return err
}
// WaitStageOptions contains all configurable values for a WAIT stage.
type WaitStageOptions struct {
Duration Duration `json:"duration"`
}
// WaitStageOptions contains all configurable values for a WAIT_APPROVAL stage.
type WaitApprovalStageOptions struct {
// The maximum length of time to wait before giving up.
// Defaults to 6h.
Timeout Duration `json:"timeout"`
Approvers []string `json:"approvers"`
}
// AnalysisStageOptions contains all configurable values for a K8S_ANALYSIS stage.
type AnalysisStageOptions struct {
// How long the analysis process should be executed.
Duration Duration `json:"duration"`
// TODO: Consider about how to handle a pod restart
// possible count of pod restarting
RestartThreshold int `json:"restartThreshold"`
Metrics []TemplatableAnalysisMetrics `json:"metrics"`
Logs []TemplatableAnalysisLog `json:"logs"`
Https []TemplatableAnalysisHTTP `json:"https"`
}
func (a *AnalysisStageOptions) Validate() error {
if a.Duration == 0 {
return fmt.Errorf("the ANALYSIS stage requires duration field")
}
return nil
}
type AnalysisTemplateRef struct {
Name string `json:"name"`
Args map[string]string `json:"args"`
}
// TemplatableAnalysisMetrics wraps AnalysisMetrics to allow specify template to use.
type TemplatableAnalysisMetrics struct {
AnalysisMetrics
Template AnalysisTemplateRef `json:"template"`
}
// TemplatableAnalysisLog wraps AnalysisLog to allow specify template to use.
type TemplatableAnalysisLog struct {
AnalysisLog
Template AnalysisTemplateRef `json:"template"`
}
// TemplatableAnalysisHTTP wraps AnalysisHTTP to allow specify template to use.
type TemplatableAnalysisHTTP struct {
AnalysisHTTP
Template AnalysisTemplateRef `json:"template"`
}
type SealedSecretMapping struct {
// Relative path from the application directory to sealed secret file.
Path string `json:"path"`
// The filename for the decrypted secret.
// Empty means the same name with the sealed secret file.
OutFilename string `json:"outFilename"`
// The directory name where to put the decrypted secret.
// Empty means the same directory with the sealed secret file.
OutDir string `json:"outDir"`
}
type SecretEncryption struct {
// List of encrypted secrets.
EncryptedSecrets map[string]string `json:"encryptedSecrets"`
// List of files to be decrypted before using.
DecryptionTargets []string `json:"decryptionTargets"`
}
func (e *SecretEncryption) Validate() error {
for k, v := range e.EncryptedSecrets {
if k == "" {
return fmt.Errorf("key field in encryptedSecrets must not be empty")
}
if v == "" {
return fmt.Errorf("value field of %s in encryptedSecrets must not be empty", k)
}
}
return nil
}
| 1 | 19,952 | nit: `Additional configuration used while sending notification to external services.` | pipe-cd-pipe | go |
@@ -1656,7 +1656,7 @@ class TargetLocator {
window(nameOrHandle) {
return this.driver_.schedule(
new command.Command(command.Name.SWITCH_TO_WINDOW).
- setParameter('name', nameOrHandle),
+ setParameter('handle', nameOrHandle),
'WebDriver.switchTo().window(' + nameOrHandle + ')');
}
| 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
/**
* @fileoverview The heart of the WebDriver JavaScript API.
*/
'use strict';
const actions = require('./actions');
const by = require('./by');
const Capabilities = require('./capabilities').Capabilities;
const command = require('./command');
const error = require('./error');
const input = require('./input');
const logging = require('./logging');
const Session = require('./session').Session;
const Symbols = require('./symbols');
const promise = require('./promise');
/**
* Defines a condition for use with WebDriver's {@linkplain WebDriver#wait wait
* command}.
*
* @template OUT
*/
class Condition {
/**
* @param {string} message A descriptive error message. Should complete the
* sentence "Waiting [...]"
* @param {function(!WebDriver): OUT} fn The condition function to
* evaluate on each iteration of the wait loop.
*/
constructor(message, fn) {
/** @private {string} */
this.description_ = 'Waiting ' + message;
/** @type {function(!WebDriver): OUT} */
this.fn = fn;
}
/** @return {string} A description of this condition. */
description() {
return this.description_;
}
}
/**
* Defines a condition that will result in a {@link WebElement}.
*
* @extends {Condition<!(WebElement|promise.Promise<!WebElement>)>}
*/
class WebElementCondition extends Condition {
/**
* @param {string} message A descriptive error message. Should complete the
* sentence "Waiting [...]"
* @param {function(!WebDriver): !(WebElement|promise.Promise<!WebElement>)}
* fn The condition function to evaluate on each iteration of the wait
* loop.
*/
constructor(message, fn) {
super(message, fn);
}
}
//////////////////////////////////////////////////////////////////////////////
//
// WebDriver
//
//////////////////////////////////////////////////////////////////////////////
/**
* Translates a command to its wire-protocol representation before passing it
* to the given `executor` for execution.
* @param {!command.Executor} executor The executor to use.
* @param {!command.Command} command The command to execute.
* @return {!Promise} A promise that will resolve with the command response.
*/
function executeCommand(executor, command) {
return toWireValue(command.getParameters()).
then(function(parameters) {
command.setParameters(parameters);
return executor.execute(command);
});
}
/**
* Converts an object to its JSON representation in the WebDriver wire protocol.
* When converting values of type object, the following steps will be taken:
* <ol>
* <li>if the object is a WebElement, the return value will be the element's
* server ID
* <li>if the object defines a {@link Symbols.serialize} method, this algorithm
* will be recursively applied to the object's serialized representation
* <li>if the object provides a "toJSON" function, this algorithm will
* recursively be applied to the result of that function
* <li>otherwise, the value of each key will be recursively converted according
* to the rules above.
* </ol>
*
* @param {*} obj The object to convert.
* @return {!Promise<?>} A promise that will resolve to the input value's JSON
* representation.
*/
function toWireValue(obj) {
if (promise.isPromise(obj)) {
return Promise.resolve(obj).then(toWireValue);
}
return Promise.resolve(convertValue(obj));
}
function convertValue(value) {
if (value === void 0 || value === null) {
return value;
}
if (typeof value === 'boolean'
|| typeof value === 'number'
|| typeof value === 'string') {
return value;
}
if (Array.isArray(value)) {
return convertKeys(value);
}
if (typeof value === 'function') {
return '' + value;
}
if (typeof value[Symbols.serialize] === 'function') {
return toWireValue(value[Symbols.serialize]());
} else if (typeof value.toJSON === 'function') {
return toWireValue(value.toJSON());
}
return convertKeys(value);
}
function convertKeys(obj) {
const isArray = Array.isArray(obj);
const numKeys = isArray ? obj.length : Object.keys(obj).length;
const ret = isArray ? new Array(numKeys) : {};
if (!numKeys) {
return Promise.resolve(ret);
}
let numResolved = 0;
function forEachKey(obj, fn) {
if (Array.isArray(obj)) {
for (let i = 0, n = obj.length; i < n; i++) {
fn(obj[i], i);
}
} else {
for (let key in obj) {
fn(obj[key], key);
}
}
}
return new Promise(function(done, reject) {
forEachKey(obj, function(value, key) {
if (promise.isPromise(value)) {
value.then(toWireValue).then(setValue, reject);
} else {
value = convertValue(value);
if (promise.isPromise(value)) {
value.then(toWireValue).then(setValue, reject);
} else {
setValue(value);
}
}
function setValue(value) {
ret[key] = value;
maybeFulfill();
}
});
function maybeFulfill() {
if (++numResolved === numKeys) {
done(ret);
}
}
});
}
/**
* Converts a value from its JSON representation according to the WebDriver wire
* protocol. Any JSON object that defines a WebElement ID will be decoded to a
* {@link WebElement} object. All other values will be passed through as is.
*
* @param {!WebDriver} driver The driver to use as the parent of any unwrapped
* {@link WebElement} values.
* @param {*} value The value to convert.
* @return {*} The converted value.
*/
function fromWireValue(driver, value) {
if (Array.isArray(value)) {
value = value.map(v => fromWireValue(driver, v));
} else if (WebElement.isId(value)) {
let id = WebElement.extractId(value);
value = new WebElement(driver, id);
} else if (value && typeof value === 'object') {
let result = {};
for (let key in value) {
if (value.hasOwnProperty(key)) {
result[key] = fromWireValue(driver, value[key]);
}
}
value = result;
}
return value;
}
/**
* Creates a new WebDriver client, which provides control over a browser.
*
* Every command.Command returns a {@link promise.Promise} that
* represents the result of that command. Callbacks may be registered on this
* object to manipulate the command result or catch an expected error. Any
* commands scheduled with a callback are considered sub-commands and will
* execute before the next command in the current frame. For example:
*
* var message = [];
* driver.call(message.push, message, 'a').then(function() {
* driver.call(message.push, message, 'b');
* });
* driver.call(message.push, message, 'c');
* driver.call(function() {
* alert('message is abc? ' + (message.join('') == 'abc'));
* });
*
*/
class WebDriver {
/**
* @param {!(Session|promise.Promise<!Session>)} session Either a
* known session or a promise that will be resolved to a session.
* @param {!command.Executor} executor The executor to use when sending
* commands to the browser.
* @param {promise.ControlFlow=} opt_flow The flow to
* schedule commands through. Defaults to the active flow object.
*/
constructor(session, executor, opt_flow) {
/** @private {!promise.Promise<!Session>} */
this.session_ = promise.fulfilled(session);
/** @private {!command.Executor} */
this.executor_ = executor;
/** @private {!promise.ControlFlow} */
this.flow_ = opt_flow || promise.controlFlow();
/** @private {input.FileDetector} */
this.fileDetector_ = null;
}
/**
* Creates a new WebDriver client for an existing session.
* @param {!command.Executor} executor Command executor to use when querying
* for session details.
* @param {string} sessionId ID of the session to attach to.
* @param {promise.ControlFlow=} opt_flow The control flow all
* driver commands should execute under. Defaults to the
* {@link promise.controlFlow() currently active} control flow.
* @return {!WebDriver} A new client for the specified session.
*/
static attachToSession(executor, sessionId, opt_flow) {
let flow = opt_flow || promise.controlFlow();
let cmd = new command.Command(command.Name.DESCRIBE_SESSION)
.setParameter('sessionId', sessionId);
let session = flow.execute(
() => executeCommand(executor, cmd).catch(err => {
// The DESCRIBE_SESSION command is not supported by the W3C spec, so
// if we get back an unknown command, just return a session with
// unknown capabilities.
if (err instanceof error.UnknownCommandError) {
return new Session(sessionId, new Capabilities);
}
throw err;
}),
'WebDriver.attachToSession()');
return new WebDriver(session, executor, flow);
}
/**
* Creates a new WebDriver session.
*
* By default, the requested session `capabilities` are merely "desired" and
* the remote end will still create a new session even if it cannot satisfy
* all of the requested capabilities. You can query which capabilities a
* session actually has using the
* {@linkplain #getCapabilities() getCapabilities()} method on the returned
* WebDriver instance.
*
* To define _required capabilities_, provide the `capabilities` as an object
* literal with `required` and `desired` keys. The `desired` key may be
* omitted if all capabilities are required, and vice versa. If the server
* cannot create a session with all of the required capabilities, it will
* return an {@linkplain error.SessionNotCreatedError}.
*
* let required = new Capabilities().set('browserName', 'firefox');
* let desired = new Capabilities().set('version', '45');
* let driver = WebDriver.createSession(executor, {required, desired});
*
* This function will always return a WebDriver instance. If there is an error
* creating the session, such as the aforementioned SessionNotCreatedError,
* the driver will have a rejected {@linkplain #getSession session} promise.
* It is recommended that this promise is left _unhandled_ so it will
* propagate through the {@linkplain promise.ControlFlow control flow} and
* cause subsequent commands to fail.
*
* let required = Capabilities.firefox();
* let driver = WebDriver.createSession(executor, {required});
*
* // If the createSession operation failed, then this command will also
* // also fail, propagating the creation failure.
* driver.get('http://www.google.com').catch(e => console.log(e));
*
* @param {!command.Executor} executor The executor to create the new session
* with.
* @param {(!Capabilities|
* {desired: (Capabilities|undefined),
* required: (Capabilities|undefined)})} capabilities The desired
* capabilities for the new session.
* @param {promise.ControlFlow=} opt_flow The control flow all driver
* commands should execute under, including the initial session creation.
* Defaults to the {@link promise.controlFlow() currently active}
* control flow.
* @return {!WebDriver} The driver for the newly created session.
*/
static createSession(executor, capabilities, opt_flow) {
let flow = opt_flow || promise.controlFlow();
let cmd = new command.Command(command.Name.NEW_SESSION);
if (capabilities && (capabilities.desired || capabilities.required)) {
cmd.setParameter('desiredCapabilities', capabilities.desired);
cmd.setParameter('requiredCapabilities', capabilities.required);
} else {
cmd.setParameter('desiredCapabilities', capabilities);
}
let session = flow.execute(
() => executeCommand(executor, cmd),
'WebDriver.createSession()');
return new WebDriver(session, executor, flow);
}
/**
* @return {!promise.ControlFlow} The control flow used by this
* instance.
*/
controlFlow() {
return this.flow_;
}
/**
* Schedules a {@link command.Command} to be executed by this driver's
* {@link command.Executor}.
*
* @param {!command.Command} command The command to schedule.
* @param {string} description A description of the command for debugging.
* @return {!promise.Promise<T>} A promise that will be resolved
* with the command result.
* @template T
*/
schedule(command, description) {
var self = this;
checkHasNotQuit();
command.setParameter('sessionId', this.session_);
// If any of the command parameters are rejected promises, those
// rejections may be reported as unhandled before the control flow
// attempts to execute the command. To ensure parameters errors
// propagate through the command itself, we resolve all of the
// command parameters now, but suppress any errors until the ControlFlow
// actually executes the command. This addresses scenarios like catching
// an element not found error in:
//
// driver.findElement(By.id('foo')).click().catch(function(e) {
// if (e instanceof NoSuchElementError) {
// // Do something.
// }
// });
var prepCommand = toWireValue(command.getParameters());
prepCommand.catch(function() {});
var flow = this.flow_;
var executor = this.executor_;
return flow.execute(function() {
// A call to WebDriver.quit() may have been scheduled in the same event
// loop as this |command|, which would prevent us from detecting that the
// driver has quit above. Therefore, we need to make another quick check.
// We still check above so we can fail as early as possible.
checkHasNotQuit();
// Retrieve resolved command parameters; any previously suppressed errors
// will now propagate up through the control flow as part of the command
// execution.
return prepCommand.then(function(parameters) {
command.setParameters(parameters);
return executor.execute(command);
}).then(value => fromWireValue(self, value));
}, description);
function checkHasNotQuit() {
if (!self.session_) {
throw new error.NoSuchSessionError(
'This driver instance does not have a valid session ID ' +
'(did you call WebDriver.quit()?) and may no longer be ' +
'used.');
}
}
}
/**
* Sets the {@linkplain input.FileDetector file detector} that should be
* used with this instance.
* @param {input.FileDetector} detector The detector to use or {@code null}.
*/
setFileDetector(detector) {
this.fileDetector_ = detector;
}
/**
* @return {!command.Executor} The command executor used by this instance.
*/
getExecutor() {
return this.executor_;
}
/**
* @return {!promise.Promise<!Session>} A promise for this client's
* session.
*/
getSession() {
return this.session_;
}
/**
* @return {!promise.Promise<!Capabilities>} A promise
* that will resolve with the this instance's capabilities.
*/
getCapabilities() {
return this.session_.then(session => session.getCapabilities());
}
/**
* Schedules a command to quit the current session. After calling quit, this
* instance will be invalidated and may no longer be used to issue commands
* against the browser.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the command has completed.
*/
quit() {
var result = this.schedule(
new command.Command(command.Name.QUIT),
'WebDriver.quit()');
// Delete our session ID when the quit command finishes; this will allow us
// to throw an error when attemnpting to use a driver post-quit.
return result.finally(() => delete this.session_);
}
/**
* Creates a new action sequence using this driver. The sequence will not be
* scheduled for execution until {@link actions.ActionSequence#perform} is
* called. Example:
*
* driver.actions().
* mouseDown(element1).
* mouseMove(element2).
* mouseUp().
* perform();
*
* @return {!actions.ActionSequence} A new action sequence for this instance.
*/
actions() {
return new actions.ActionSequence(this);
}
/**
* Creates a new touch sequence using this driver. The sequence will not be
* scheduled for execution until {@link actions.TouchSequence#perform} is
* called. Example:
*
* driver.touchActions().
* tap(element1).
* doubleTap(element2).
* perform();
*
* @return {!actions.TouchSequence} A new touch sequence for this instance.
*/
touchActions() {
return new actions.TouchSequence(this);
}
/**
* Schedules a command to execute JavaScript in the context of the currently
* selected frame or window. The script fragment will be executed as the body
* of an anonymous function. If the script is provided as a function object,
* that function will be converted to a string for injection into the target
* window.
*
* Any arguments provided in addition to the script will be included as script
* arguments and may be referenced using the {@code arguments} object.
* Arguments may be a boolean, number, string, or {@linkplain WebElement}.
* Arrays and objects may also be used as script arguments as long as each item
* adheres to the types previously mentioned.
*
* The script may refer to any variables accessible from the current window.
* Furthermore, the script will execute in the window's context, thus
* {@code document} may be used to refer to the current document. Any local
* variables will not be available once the script has finished executing,
* though global variables will persist.
*
* If the script has a return value (i.e. if the script contains a return
* statement), then the following steps will be taken for resolving this
* functions return value:
*
* - For a HTML element, the value will resolve to a {@linkplain WebElement}
* - Null and undefined return values will resolve to null</li>
* - Booleans, numbers, and strings will resolve as is</li>
* - Functions will resolve to their string representation</li>
* - For arrays and objects, each member item will be converted according to
* the rules above
*
* @param {!(string|Function)} script The script to execute.
* @param {...*} var_args The arguments to pass to the script.
* @return {!promise.Promise<T>} A promise that will resolve to the
* scripts return value.
* @template T
*/
executeScript(script, var_args) {
if (typeof script === 'function') {
script = 'return (' + script + ').apply(null, arguments);';
}
let args =
arguments.length > 1 ? Array.prototype.slice.call(arguments, 1) : [];
return this.schedule(
new command.Command(command.Name.EXECUTE_SCRIPT).
setParameter('script', script).
setParameter('args', args),
'WebDriver.executeScript()');
}
/**
* Schedules a command to execute asynchronous JavaScript in the context of the
* currently selected frame or window. The script fragment will be executed as
* the body of an anonymous function. If the script is provided as a function
* object, that function will be converted to a string for injection into the
* target window.
*
* Any arguments provided in addition to the script will be included as script
* arguments and may be referenced using the {@code arguments} object.
* Arguments may be a boolean, number, string, or {@code WebElement}.
* Arrays and objects may also be used as script arguments as long as each item
* adheres to the types previously mentioned.
*
* Unlike executing synchronous JavaScript with {@link #executeScript},
* scripts executed with this function must explicitly signal they are finished
* by invoking the provided callback. This callback will always be injected
* into the executed function as the last argument, and thus may be referenced
* with {@code arguments[arguments.length - 1]}. The following steps will be
* taken for resolving this functions return value against the first argument
* to the script's callback function:
*
* - For a HTML element, the value will resolve to a
* {@link WebElement}
* - Null and undefined return values will resolve to null
* - Booleans, numbers, and strings will resolve as is
* - Functions will resolve to their string representation
* - For arrays and objects, each member item will be converted according to
* the rules above
*
* __Example #1:__ Performing a sleep that is synchronized with the currently
* selected window:
*
* var start = new Date().getTime();
* driver.executeAsyncScript(
* 'window.setTimeout(arguments[arguments.length - 1], 500);').
* then(function() {
* console.log(
* 'Elapsed time: ' + (new Date().getTime() - start) + ' ms');
* });
*
* __Example #2:__ Synchronizing a test with an AJAX application:
*
* var button = driver.findElement(By.id('compose-button'));
* button.click();
* driver.executeAsyncScript(
* 'var callback = arguments[arguments.length - 1];' +
* 'mailClient.getComposeWindowWidget().onload(callback);');
* driver.switchTo().frame('composeWidget');
* driver.findElement(By.id('to')).sendKeys('[email protected]');
*
* __Example #3:__ Injecting a XMLHttpRequest and waiting for the result. In
* this example, the inject script is specified with a function literal. When
* using this format, the function is converted to a string for injection, so it
* should not reference any symbols not defined in the scope of the page under
* test.
*
* driver.executeAsyncScript(function() {
* var callback = arguments[arguments.length - 1];
* var xhr = new XMLHttpRequest();
* xhr.open("GET", "/resource/data.json", true);
* xhr.onreadystatechange = function() {
* if (xhr.readyState == 4) {
* callback(xhr.responseText);
* }
* };
* xhr.send('');
* }).then(function(str) {
* console.log(JSON.parse(str)['food']);
* });
*
* @param {!(string|Function)} script The script to execute.
* @param {...*} var_args The arguments to pass to the script.
* @return {!promise.Promise<T>} A promise that will resolve to the
* scripts return value.
* @template T
*/
executeAsyncScript(script, var_args) {
if (typeof script === 'function') {
script = 'return (' + script + ').apply(null, arguments);';
}
let args = Array.prototype.slice.call(arguments, 1);
return this.schedule(
new command.Command(command.Name.EXECUTE_ASYNC_SCRIPT).
setParameter('script', script).
setParameter('args', args),
'WebDriver.executeScript()');
}
/**
* Schedules a command to execute a custom function.
* @param {function(...): (T|promise.Promise<T>)} fn The function to
* execute.
* @param {Object=} opt_scope The object in whose scope to execute the function.
* @param {...*} var_args Any arguments to pass to the function.
* @return {!promise.Promise<T>} A promise that will be resolved'
* with the function's result.
* @template T
*/
call(fn, opt_scope, var_args) {
let args = Array.prototype.slice.call(arguments, 2);
let flow = this.flow_;
return flow.execute(function() {
return promise.fullyResolved(args).then(function(args) {
if (promise.isGenerator(fn)) {
args.unshift(fn, opt_scope);
return promise.consume.apply(null, args);
}
return fn.apply(opt_scope, args);
});
}, 'WebDriver.call(' + (fn.name || 'function') + ')');
}
/**
* Schedules a command to wait for a condition to hold. The condition may be
* specified by a {@link Condition}, as a custom function, or as any
* promise-like thenable.
*
* For a {@link Condition} or function, the wait will repeatedly
* evaluate the condition until it returns a truthy value. If any errors occur
* while evaluating the condition, they will be allowed to propagate. In the
* event a condition returns a {@link promise.Promise promise}, the polling
* loop will wait for it to be resolved and use the resolved value for whether
* the condition has been satisified. Note the resolution time for a promise
* is factored into whether a wait has timed out.
*
* Note, if the provided condition is a {@link WebElementCondition}, then
* the wait will return a {@link WebElementPromise} that will resolve to the
* element that satisified the condition.
*
* _Example:_ waiting up to 10 seconds for an element to be present on the
* page.
*
* var button = driver.wait(until.elementLocated(By.id('foo')), 10000);
* button.click();
*
* This function may also be used to block the command flow on the resolution
* of any thenable promise object. When given a promise, the command will
* simply wait for its resolution before completing. A timeout may be provided
* to fail the command if the promise does not resolve before the timeout
* expires.
*
* _Example:_ Suppose you have a function, `startTestServer`, that returns a
* promise for when a server is ready for requests. You can block a WebDriver
* client on this promise with:
*
* var started = startTestServer();
* driver.wait(started, 5 * 1000, 'Server should start within 5 seconds');
* driver.get(getServerUrl());
*
* @param {!(promise.Promise<T>|
* Condition<T>|
* function(!WebDriver): T)} condition The condition to
* wait on, defined as a promise, condition object, or a function to
* evaluate as a condition.
* @param {number=} opt_timeout How long to wait for the condition to be true.
* @param {string=} opt_message An optional message to use if the wait times
* out.
* @return {!(promise.Promise<T>|WebElementPromise)} A promise that will be
* resolved with the first truthy value returned by the condition
* function, or rejected if the condition times out. If the input
* input condition is an instance of a {@link WebElementCondition},
* the returned value will be a {@link WebElementPromise}.
* @template T
*/
wait(condition, opt_timeout, opt_message) {
if (promise.isPromise(condition)) {
return this.flow_.wait(
/** @type {!promise.Promise} */(condition),
opt_timeout, opt_message);
}
var message = opt_message;
var fn = /** @type {!Function} */(condition);
if (condition instanceof Condition) {
message = message || condition.description();
fn = condition.fn;
}
var driver = this;
var result = this.flow_.wait(function() {
if (promise.isGenerator(fn)) {
return promise.consume(fn, null, [driver]);
}
return fn(driver);
}, opt_timeout, message);
if (condition instanceof WebElementCondition) {
result = new WebElementPromise(this, result.then(function(value) {
if (!(value instanceof WebElement)) {
throw TypeError(
'WebElementCondition did not resolve to a WebElement: '
+ Object.prototype.toString.call(value));
}
return value;
}));
}
return result;
}
/**
* Schedules a command to make the driver sleep for the given amount of time.
* @param {number} ms The amount of time, in milliseconds, to sleep.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the sleep has finished.
*/
sleep(ms) {
return this.flow_.timeout(ms, 'WebDriver.sleep(' + ms + ')');
}
/**
* Schedules a command to retrieve the current window handle.
* @return {!promise.Promise<string>} A promise that will be
* resolved with the current window handle.
*/
getWindowHandle() {
return this.schedule(
new command.Command(command.Name.GET_CURRENT_WINDOW_HANDLE),
'WebDriver.getWindowHandle()');
}
/**
* Schedules a command to retrieve the current list of available window handles.
* @return {!promise.Promise.<!Array<string>>} A promise that will
* be resolved with an array of window handles.
*/
getAllWindowHandles() {
return this.schedule(
new command.Command(command.Name.GET_WINDOW_HANDLES),
'WebDriver.getAllWindowHandles()');
}
/**
* Schedules a command to retrieve the current page's source. The page source
* returned is a representation of the underlying DOM: do not expect it to be
* formatted or escaped in the same way as the response sent from the web
* server.
* @return {!promise.Promise<string>} A promise that will be
* resolved with the current page source.
*/
getPageSource() {
return this.schedule(
new command.Command(command.Name.GET_PAGE_SOURCE),
'WebDriver.getPageSource()');
}
/**
* Schedules a command to close the current window.
* @return {!promise.Promise<void>} A promise that will be resolved
* when this command has completed.
*/
close() {
return this.schedule(new command.Command(command.Name.CLOSE),
'WebDriver.close()');
}
/**
* Schedules a command to navigate to the given URL.
* @param {string} url The fully qualified URL to open.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the document has finished loading.
*/
get(url) {
return this.navigate().to(url);
}
/**
* Schedules a command to retrieve the URL of the current page.
* @return {!promise.Promise<string>} A promise that will be
* resolved with the current URL.
*/
getCurrentUrl() {
return this.schedule(
new command.Command(command.Name.GET_CURRENT_URL),
'WebDriver.getCurrentUrl()');
}
/**
* Schedules a command to retrieve the current page's title.
* @return {!promise.Promise<string>} A promise that will be
* resolved with the current page's title.
*/
getTitle() {
return this.schedule(new command.Command(command.Name.GET_TITLE),
'WebDriver.getTitle()');
}
/**
* Schedule a command to find an element on the page. If the element cannot be
* found, a {@link bot.ErrorCode.NO_SUCH_ELEMENT} result will be returned
* by the driver. Unlike other commands, this error cannot be suppressed. In
* other words, scheduling a command to find an element doubles as an assert
* that the element is present on the page. To test whether an element is
* present on the page, use {@link #isElementPresent} instead.
*
* The search criteria for an element may be defined using one of the
* factories in the {@link webdriver.By} namespace, or as a short-hand
* {@link webdriver.By.Hash} object. For example, the following two statements
* are equivalent:
*
* var e1 = driver.findElement(By.id('foo'));
* var e2 = driver.findElement({id:'foo'});
*
* You may also provide a custom locator function, which takes as input this
* instance and returns a {@link WebElement}, or a promise that will resolve
* to a WebElement. If the returned promise resolves to an array of
* WebElements, WebDriver will use the first element. For example, to find the
* first visible link on a page, you could write:
*
* var link = driver.findElement(firstVisibleLink);
*
* function firstVisibleLink(driver) {
* var links = driver.findElements(By.tagName('a'));
* return promise.filter(links, function(link) {
* return link.isDisplayed();
* });
* }
*
* @param {!(by.By|Function)} locator The locator to use.
* @return {!WebElementPromise} A WebElement that can be used to issue
* commands against the located element. If the element is not found, the
* element will be invalidated and all scheduled commands aborted.
*/
findElement(locator) {
let id;
locator = by.checkedLocator(locator);
if (typeof locator === 'function') {
id = this.findElementInternal_(locator, this);
} else {
let cmd = new command.Command(command.Name.FIND_ELEMENT).
setParameter('using', locator.using).
setParameter('value', locator.value);
id = this.schedule(cmd, 'WebDriver.findElement(' + locator + ')');
}
return new WebElementPromise(this, id);
}
/**
* @param {!Function} locatorFn The locator function to use.
* @param {!(WebDriver|WebElement)} context The search
* context.
* @return {!promise.Promise.<!WebElement>} A
* promise that will resolve to a list of WebElements.
* @private
*/
findElementInternal_(locatorFn, context) {
return this.call(() => locatorFn(context)).then(function(result) {
if (Array.isArray(result)) {
result = result[0];
}
if (!(result instanceof WebElement)) {
throw new TypeError('Custom locator did not return a WebElement');
}
return result;
});
}
/**
* Schedule a command to search for multiple elements on the page.
*
* @param {!(by.By|Function)} locator The locator to use.
* @return {!promise.Promise.<!Array.<!WebElement>>} A
* promise that will resolve to an array of WebElements.
*/
findElements(locator) {
locator = by.checkedLocator(locator);
if (typeof locator === 'function') {
return this.findElementsInternal_(locator, this);
} else {
let cmd = new command.Command(command.Name.FIND_ELEMENTS).
setParameter('using', locator.using).
setParameter('value', locator.value);
let res = this.schedule(cmd, 'WebDriver.findElements(' + locator + ')');
return res.catch(function(e) {
if (e instanceof error.NoSuchElementError) {
return [];
}
throw e;
});
}
}
/**
* @param {!Function} locatorFn The locator function to use.
* @param {!(WebDriver|WebElement)} context The search context.
* @return {!promise.Promise<!Array<!WebElement>>} A promise that
* will resolve to an array of WebElements.
* @private
*/
findElementsInternal_(locatorFn, context) {
return this.call(() => locatorFn(context)).then(function(result) {
if (result instanceof WebElement) {
return [result];
}
if (!Array.isArray(result)) {
return [];
}
return result.filter(function(item) {
return item instanceof WebElement;
});
});
}
/**
* Schedule a command to take a screenshot. The driver makes a best effort to
* return a screenshot of the following, in order of preference:
*
* 1. Entire page
* 2. Current window
* 3. Visible portion of the current frame
* 4. The entire display containing the browser
*
* @return {!promise.Promise<string>} A promise that will be
* resolved to the screenshot as a base-64 encoded PNG.
*/
takeScreenshot() {
return this.schedule(new command.Command(command.Name.SCREENSHOT),
'WebDriver.takeScreenshot()');
}
/**
* @return {!Options} The options interface for this instance.
*/
manage() {
return new Options(this);
}
/**
* @return {!Navigation} The navigation interface for this instance.
*/
navigate() {
return new Navigation(this);
}
/**
* @return {!TargetLocator} The target locator interface for this
* instance.
*/
switchTo() {
return new TargetLocator(this);
}
}
/**
* Interface for navigating back and forth in the browser history.
*
* This class should never be instantiated directly. Insead, obtain an instance
* with
*
* webdriver.navigate()
*
* @see WebDriver#navigate()
*/
class Navigation {
/**
* @param {!WebDriver} driver The parent driver.
* @private
*/
constructor(driver) {
/** @private {!WebDriver} */
this.driver_ = driver;
}
/**
* Schedules a command to navigate to a new URL.
* @param {string} url The URL to navigate to.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the URL has been loaded.
*/
to(url) {
return this.driver_.schedule(
new command.Command(command.Name.GET).
setParameter('url', url),
'WebDriver.navigate().to(' + url + ')');
}
/**
* Schedules a command to move backwards in the browser history.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the navigation event has completed.
*/
back() {
return this.driver_.schedule(
new command.Command(command.Name.GO_BACK),
'WebDriver.navigate().back()');
}
/**
* Schedules a command to move forwards in the browser history.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the navigation event has completed.
*/
forward() {
return this.driver_.schedule(
new command.Command(command.Name.GO_FORWARD),
'WebDriver.navigate().forward()');
}
/**
* Schedules a command to refresh the current page.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the navigation event has completed.
*/
refresh() {
return this.driver_.schedule(
new command.Command(command.Name.REFRESH),
'WebDriver.navigate().refresh()');
}
}
/**
* Provides methods for managing browser and driver state.
*
* This class should never be instantiated directly. Insead, obtain an instance
* with {@linkplain WebDriver#manage() webdriver.manage()}.
*/
class Options {
/**
* @param {!WebDriver} driver The parent driver.
* @private
*/
constructor(driver) {
/** @private {!WebDriver} */
this.driver_ = driver;
}
/**
* Schedules a command to add a cookie.
*
* __Sample Usage:__
*
* // Set a basic cookie.
* driver.options().addCookie({name: 'foo', value: 'bar'});
*
* // Set a cookie that expires in 10 minutes.
* let expiry = new Date(Date.now() + (10 * 60 * 1000));
* driver.options().addCookie({name: 'foo', value: 'bar', expiry});
*
* // The cookie expiration may also be specified in seconds since epoch.
* driver.options().addCookie({
* name: 'foo',
* value: 'bar',
* expiry: Math.floor(Date.now() / 1000)
* });
*
* @param {!Options.Cookie} spec Defines the cookie to add.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the cookie has been added to the page.
* @throws {error.InvalidArgumentError} if any of the cookie parameters are
* invalid.
* @throws {TypeError} if `spec` is not a cookie object.
*/
addCookie(spec) {
if (!spec || typeof spec !== 'object') {
throw TypeError('addCookie called with non-cookie parameter');
}
// We do not allow '=' or ';' in the name.
let name = spec.name;
if (/[;=]/.test(name)) {
throw new error.InvalidArgumentError(
'Invalid cookie name "' + name + '"');
}
// We do not allow ';' in value.
let value = spec.value;
if (/;/.test(value)) {
throw new error.InvalidArgumentError(
'Invalid cookie value "' + value + '"');
}
let cookieString = name + '=' + value +
(spec.domain ? ';domain=' + spec.domain : '') +
(spec.path ? ';path=' + spec.path : '') +
(spec.secure ? ';secure' : '');
let expiry;
if (typeof spec.expiry === 'number') {
expiry = Math.floor(spec.expiry);
cookieString += ';expires=' + new Date(spec.expiry * 1000).toUTCString();
} else if (spec.expiry instanceof Date) {
let date = /** @type {!Date} */(spec.expiry);
expiry = Math.floor(date.getTime() / 1000);
cookieString += ';expires=' + date.toUTCString();
}
return this.driver_.schedule(
new command.Command(command.Name.ADD_COOKIE).
setParameter('cookie', {
'name': name,
'value': value,
'path': spec.path,
'domain': spec.domain,
'secure': !!spec.secure,
'expiry': expiry
}),
'WebDriver.manage().addCookie(' + cookieString + ')');
}
/**
* Schedules a command to delete all cookies visible to the current page.
* @return {!promise.Promise<void>} A promise that will be resolved
* when all cookies have been deleted.
*/
deleteAllCookies() {
return this.driver_.schedule(
new command.Command(command.Name.DELETE_ALL_COOKIES),
'WebDriver.manage().deleteAllCookies()');
}
/**
* Schedules a command to delete the cookie with the given name. This command
* is a no-op if there is no cookie with the given name visible to the current
* page.
* @param {string} name The name of the cookie to delete.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the cookie has been deleted.
*/
deleteCookie(name) {
return this.driver_.schedule(
new command.Command(command.Name.DELETE_COOKIE).
setParameter('name', name),
'WebDriver.manage().deleteCookie(' + name + ')');
}
/**
* Schedules a command to retrieve all cookies visible to the current page.
* Each cookie will be returned as a JSON object as described by the WebDriver
* wire protocol.
* @return {!promise.Promise<!Array<!Options.Cookie>>} A promise that will be
* resolved with the cookies visible to the current browsing context.
*/
getCookies() {
return this.driver_.schedule(
new command.Command(command.Name.GET_ALL_COOKIES),
'WebDriver.manage().getCookies()');
}
/**
* Schedules a command to retrieve the cookie with the given name. Returns null
* if there is no such cookie. The cookie will be returned as a JSON object as
* described by the WebDriver wire protocol.
*
* @param {string} name The name of the cookie to retrieve.
* @return {!promise.Promise<?Options.Cookie>} A promise that will be resolved
* with the named cookie, or `null` if there is no such cookie.
*/
getCookie(name) {
return this.getCookies().then(function(cookies) {
for (let cookie of cookies) {
if (cookie && cookie['name'] === name) {
return cookie;
}
}
return null;
});
}
/**
* @return {!Logs} The interface for managing driver
* logs.
*/
logs() {
return new Logs(this.driver_);
}
/**
* @return {!Timeouts} The interface for managing driver timeouts.
*/
timeouts() {
return new Timeouts(this.driver_);
}
/**
* @return {!Window} The interface for managing the current window.
*/
window() {
return new Window(this.driver_);
}
}
/**
* A record object describing a browser cookie.
*
* @record
*/
Options.Cookie = function() {};
/**
* The name of the cookie.
*
* @type {string}
*/
Options.Cookie.prototype.name;
/**
* The cookie value.
*
* @type {string}
*/
Options.Cookie.prototype.value;
/**
* The cookie path. Defaults to "/" when adding a cookie.
*
* @type {(string|undefined)}
*/
Options.Cookie.prototype.path;
/**
* The domain the cookie is visible to. Defaults to the current browsing
* context's document's URL when adding a cookie.
*
* @type {(string|undefined)}
*/
Options.Cookie.prototype.domain;
/**
* Whether the cookie is a secure cookie. Defaults to false when adding a new
* cookie.
*
* @type {(boolean|undefined)}
*/
Options.Cookie.prototype.secure;
/**
* Whether the cookie is an HTTP only cookie. Defaults to false when adding a
* new cookie.
*
* @type {(boolean|undefined)}
*/
Options.Cookie.prototype.httpOnly;
/**
* When the cookie expires.
*
* When {@linkplain Options#addCookie() adding a cookie}, this may be specified
* in _seconds_ since Unix epoch (January 1, 1970). The expiry will default to
* 20 years in the future if omitted.
*
* The expiry is always returned in seconds since epoch when
* {@linkplain Options#getCookies() retrieving cookies} from the browser.
*
* @type {(!Date|number|undefined)}
*/
Options.Cookie.prototype.expiry;
/**
* An interface for managing timeout behavior for WebDriver instances.
*
* This class should never be instantiated directly. Insead, obtain an instance
* with
*
* webdriver.manage().timeouts()
*
* @see WebDriver#manage()
* @see Options#timeouts()
*/
class Timeouts {
/**
* @param {!WebDriver} driver The parent driver.
* @private
*/
constructor(driver) {
/** @private {!WebDriver} */
this.driver_ = driver;
}
/**
* Specifies the amount of time the driver should wait when searching for an
* element if it is not immediately present.
*
* When searching for a single element, the driver should poll the page
* until the element has been found, or this timeout expires before failing
* with a {@link bot.ErrorCode.NO_SUCH_ELEMENT} error. When searching
* for multiple elements, the driver should poll the page until at least one
* element has been found or this timeout has expired.
*
* Setting the wait timeout to 0 (its default value), disables implicit
* waiting.
*
* Increasing the implicit wait timeout should be used judiciously as it
* will have an adverse effect on test run time, especially when used with
* slower location strategies like XPath.
*
* @param {number} ms The amount of time to wait, in milliseconds.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the implicit wait timeout has been set.
*/
implicitlyWait(ms) {
return this._scheduleCommand(ms, 'implicit', 'implicitlyWait');
}
/**
* Sets the amount of time to wait, in milliseconds, for an asynchronous
* script to finish execution before returning an error. If the timeout is
* less than or equal to 0, the script will be allowed to run indefinitely.
*
* @param {number} ms The amount of time to wait, in milliseconds.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the script timeout has been set.
*/
setScriptTimeout(ms) {
return this._scheduleCommand(ms, 'script', 'setScriptTimeout');
}
/**
* Sets the amount of time to wait for a page load to complete before
* returning an error. If the timeout is negative, page loads may be
* indefinite.
*
* @param {number} ms The amount of time to wait, in milliseconds.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the timeout has been set.
*/
pageLoadTimeout(ms) {
return this._scheduleCommand(ms, 'page load', 'pageLoadTimeout');
}
_scheduleCommand(ms, timeoutIdentifier, timeoutName) {
return this.driver_.schedule(
new command.Command(command.Name.SET_TIMEOUT).
setParameter('type', timeoutIdentifier).
setParameter('ms', ms),
`WebDriver.manage().timeouts().${timeoutName}(${ms})`);
}
}
/**
* An interface for managing the current window.
*
* This class should never be instantiated directly. Insead, obtain an instance
* with
*
* webdriver.manage().window()
*
* @see WebDriver#manage()
* @see Options#window()
*/
class Window {
/**
* @param {!WebDriver} driver The parent driver.
* @private
*/
constructor(driver) {
/** @private {!WebDriver} */
this.driver_ = driver;
}
/**
* Retrieves the window's current position, relative to the top left corner of
* the screen.
* @return {!promise.Promise.<{x: number, y: number}>} A promise
* that will be resolved with the window's position in the form of a
* {x:number, y:number} object literal.
*/
getPosition() {
return this.driver_.schedule(
new command.Command(command.Name.GET_WINDOW_POSITION).
setParameter('windowHandle', 'current'),
'WebDriver.manage().window().getPosition()');
}
/**
* Repositions the current window.
* @param {number} x The desired horizontal position, relative to the left
* side of the screen.
* @param {number} y The desired vertical position, relative to the top of the
* of the screen.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the command has completed.
*/
setPosition(x, y) {
return this.driver_.schedule(
new command.Command(command.Name.SET_WINDOW_POSITION).
setParameter('windowHandle', 'current').
setParameter('x', x).
setParameter('y', y),
'WebDriver.manage().window().setPosition(' + x + ', ' + y + ')');
}
/**
* Retrieves the window's current size.
* @return {!promise.Promise<{width: number, height: number}>} A
* promise that will be resolved with the window's size in the form of a
* {width:number, height:number} object literal.
*/
getSize() {
return this.driver_.schedule(
new command.Command(command.Name.GET_WINDOW_SIZE).
setParameter('windowHandle', 'current'),
'WebDriver.manage().window().getSize()');
}
/**
* Resizes the current window.
* @param {number} width The desired window width.
* @param {number} height The desired window height.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the command has completed.
*/
setSize(width, height) {
return this.driver_.schedule(
new command.Command(command.Name.SET_WINDOW_SIZE).
setParameter('windowHandle', 'current').
setParameter('width', width).
setParameter('height', height),
'WebDriver.manage().window().setSize(' + width + ', ' + height + ')');
}
/**
* Maximizes the current window.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the command has completed.
*/
maximize() {
return this.driver_.schedule(
new command.Command(command.Name.MAXIMIZE_WINDOW).
setParameter('windowHandle', 'current'),
'WebDriver.manage().window().maximize()');
}
}
/**
* Interface for managing WebDriver log records.
*
* This class should never be instantiated directly. Instead, obtain an
* instance with
*
* webdriver.manage().logs()
*
* @see WebDriver#manage()
* @see Options#logs()
*/
class Logs {
/**
* @param {!WebDriver} driver The parent driver.
* @private
*/
constructor(driver) {
/** @private {!WebDriver} */
this.driver_ = driver;
}
/**
* Fetches available log entries for the given type.
*
* Note that log buffers are reset after each call, meaning that available
* log entries correspond to those entries not yet returned for a given log
* type. In practice, this means that this call will return the available log
* entries since the last call, or from the start of the session.
*
* @param {!logging.Type} type The desired log type.
* @return {!promise.Promise.<!Array.<!logging.Entry>>} A
* promise that will resolve to a list of log entries for the specified
* type.
*/
get(type) {
let cmd = new command.Command(command.Name.GET_LOG).
setParameter('type', type);
return this.driver_.schedule(
cmd, 'WebDriver.manage().logs().get(' + type + ')').
then(function(entries) {
return entries.map(function(entry) {
if (!(entry instanceof logging.Entry)) {
return new logging.Entry(
entry['level'], entry['message'], entry['timestamp'],
entry['type']);
}
return entry;
});
});
}
/**
* Retrieves the log types available to this driver.
* @return {!promise.Promise<!Array<!logging.Type>>} A
* promise that will resolve to a list of available log types.
*/
getAvailableLogTypes() {
return this.driver_.schedule(
new command.Command(command.Name.GET_AVAILABLE_LOG_TYPES),
'WebDriver.manage().logs().getAvailableLogTypes()');
}
}
/**
* An interface for changing the focus of the driver to another frame or window.
*
* This class should never be instantiated directly. Instead, obtain an
* instance with
*
* webdriver.switchTo()
*
* @see WebDriver#switchTo()
*/
class TargetLocator {
/**
* @param {!WebDriver} driver The parent driver.
* @private
*/
constructor(driver) {
/** @private {!WebDriver} */
this.driver_ = driver;
}
/**
* Schedules a command retrieve the {@code document.activeElement} element on
* the current document, or {@code document.body} if activeElement is not
* available.
* @return {!WebElementPromise} The active element.
*/
activeElement() {
var id = this.driver_.schedule(
new command.Command(command.Name.GET_ACTIVE_ELEMENT),
'WebDriver.switchTo().activeElement()');
return new WebElementPromise(this.driver_, id);
}
/**
* Schedules a command to switch focus of all future commands to the topmost
* frame on the page.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the driver has changed focus to the default content.
*/
defaultContent() {
return this.driver_.schedule(
new command.Command(command.Name.SWITCH_TO_FRAME).
setParameter('id', null),
'WebDriver.switchTo().defaultContent()');
}
/**
* Schedules a command to switch the focus of all future commands to another
* frame on the page. The target frame may be specified as one of the
* following:
*
* - A number that specifies a (zero-based) index into [window.frames](
* https://developer.mozilla.org/en-US/docs/Web/API/Window.frames).
* - A {@link WebElement} reference, which correspond to a `frame` or `iframe`
* DOM element.
* - The `null` value, to select the topmost frame on the page. Passing `null`
* is the same as calling {@link #defaultContent defaultContent()}.
*
* If the specified frame can not be found, the returned promise will be
* rejected with a {@linkplain error.NoSuchFrameError}.
*
* @param {(number|WebElement|null)} id The frame locator.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the driver has changed focus to the specified frame.
*/
frame(id) {
return this.driver_.schedule(
new command.Command(command.Name.SWITCH_TO_FRAME).
setParameter('id', id),
'WebDriver.switchTo().frame(' + id + ')');
}
/**
* Schedules a command to switch the focus of all future commands to another
* window. Windows may be specified by their {@code window.name} attribute or
* by its handle (as returned by {@link WebDriver#getWindowHandles}).
*
* If the specified window cannot be found, the returned promise will be
* rejected with a {@linkplain error.NoSuchWindowError}.
*
* @param {string} nameOrHandle The name or window handle of the window to
* switch focus to.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the driver has changed focus to the specified window.
*/
window(nameOrHandle) {
return this.driver_.schedule(
new command.Command(command.Name.SWITCH_TO_WINDOW).
setParameter('name', nameOrHandle),
'WebDriver.switchTo().window(' + nameOrHandle + ')');
}
/**
* Schedules a command to change focus to the active modal dialog, such as
* those opened by `window.alert()`, `window.confirm()`, and
* `window.prompt()`. The returned promise will be rejected with a
* {@linkplain error.NoSuchAlertError} if there are no open alerts.
*
* @return {!AlertPromise} The open alert.
*/
alert() {
var text = this.driver_.schedule(
new command.Command(command.Name.GET_ALERT_TEXT),
'WebDriver.switchTo().alert()');
var driver = this.driver_;
return new AlertPromise(driver, text.then(function(text) {
return new Alert(driver, text);
}));
}
}
//////////////////////////////////////////////////////////////////////////////
//
// WebElement
//
//////////////////////////////////////////////////////////////////////////////
const LEGACY_ELEMENT_ID_KEY = 'ELEMENT';
const ELEMENT_ID_KEY = 'element-6066-11e4-a52e-4f735466cecf';
/**
* Represents a DOM element. WebElements can be found by searching from the
* document root using a {@link WebDriver} instance, or by searching
* under another WebElement:
*
* driver.get('http://www.google.com');
* var searchForm = driver.findElement(By.tagName('form'));
* var searchBox = searchForm.findElement(By.name('q'));
* searchBox.sendKeys('webdriver');
*/
class WebElement {
/**
* @param {!WebDriver} driver the parent WebDriver instance for this element.
* @param {(!IThenable<string>|string)} id The server-assigned opaque ID for
* the underlying DOM element.
*/
constructor(driver, id) {
/** @private {!WebDriver} */
this.driver_ = driver;
/** @private {!promise.Promise<string>} */
this.id_ = promise.fulfilled(id);
}
/**
* @param {string} id The raw ID.
* @param {boolean=} opt_noLegacy Whether to exclude the legacy element key.
* @return {!Object} The element ID for use with WebDriver's wire protocol.
*/
static buildId(id, opt_noLegacy) {
return opt_noLegacy
? {[ELEMENT_ID_KEY]: id}
: {[ELEMENT_ID_KEY]: id, [LEGACY_ELEMENT_ID_KEY]: id};
}
/**
* Extracts the encoded WebElement ID from the object.
*
* @param {?} obj The object to extract the ID from.
* @return {string} the extracted ID.
* @throws {TypeError} if the object is not a valid encoded ID.
*/
static extractId(obj) {
if (obj && typeof obj === 'object') {
if (typeof obj[ELEMENT_ID_KEY] === 'string') {
return obj[ELEMENT_ID_KEY];
} else if (typeof obj[LEGACY_ELEMENT_ID_KEY] === 'string') {
return obj[LEGACY_ELEMENT_ID_KEY];
}
}
throw new TypeError('object is not a WebElement ID');
}
/**
* @param {?} obj the object to test.
* @return {boolean} whether the object is a valid encoded WebElement ID.
*/
static isId(obj) {
return obj && typeof obj === 'object'
&& (typeof obj[ELEMENT_ID_KEY] === 'string'
|| typeof obj[LEGACY_ELEMENT_ID_KEY] === 'string');
}
/**
* Compares two WebElements for equality.
*
* @param {!WebElement} a A WebElement.
* @param {!WebElement} b A WebElement.
* @return {!promise.Promise<boolean>} A promise that will be
* resolved to whether the two WebElements are equal.
*/
static equals(a, b) {
if (a === b) {
return promise.fulfilled(true);
}
let ids = [a.getId(), b.getId()];
return promise.all(ids).then(function(ids) {
// If the two element's have the same ID, they should be considered
// equal. Otherwise, they may still be equivalent, but we'll need to
// ask the server to check for us.
if (ids[0] === ids[1]) {
return true;
}
let cmd = new command.Command(command.Name.ELEMENT_EQUALS);
cmd.setParameter('id', ids[0]);
cmd.setParameter('other', ids[1]);
return a.driver_.schedule(cmd, 'WebElement.equals()');
});
}
/** @return {!WebDriver} The parent driver for this instance. */
getDriver() {
return this.driver_;
}
/**
* @return {!promise.Promise<string>} A promise that resolves to
* the server-assigned opaque ID assigned to this element.
*/
getId() {
return this.id_;
}
/**
* @return {!Object} Returns the serialized representation of this WebElement.
*/
[Symbols.serialize]() {
return this.getId().then(WebElement.buildId);
}
/**
* Schedules a command that targets this element with the parent WebDriver
* instance. Will ensure this element's ID is included in the command
* parameters under the "id" key.
*
* @param {!command.Command} command The command to schedule.
* @param {string} description A description of the command for debugging.
* @return {!promise.Promise<T>} A promise that will be resolved
* with the command result.
* @template T
* @see WebDriver#schedule
* @private
*/
schedule_(command, description) {
command.setParameter('id', this.getId());
return this.driver_.schedule(command, description);
}
/**
* Schedule a command to find a descendant of this element. If the element
* cannot be found, the returned promise will be rejected with a
* {@linkplain error.NoSuchElementError NoSuchElementError}.
*
* The search criteria for an element may be defined using one of the static
* factories on the {@link by.By} class, or as a short-hand
* {@link ./by.ByHash} object. For example, the following two statements
* are equivalent:
*
* var e1 = element.findElement(By.id('foo'));
* var e2 = element.findElement({id:'foo'});
*
* You may also provide a custom locator function, which takes as input this
* instance and returns a {@link WebElement}, or a promise that will resolve
* to a WebElement. If the returned promise resolves to an array of
* WebElements, WebDriver will use the first element. For example, to find the
* first visible link on a page, you could write:
*
* var link = element.findElement(firstVisibleLink);
*
* function firstVisibleLink(element) {
* var links = element.findElements(By.tagName('a'));
* return promise.filter(links, function(link) {
* return link.isDisplayed();
* });
* }
*
* @param {!(by.By|Function)} locator The locator strategy to use when
* searching for the element.
* @return {!WebElementPromise} A WebElement that can be used to issue
* commands against the located element. If the element is not found, the
* element will be invalidated and all scheduled commands aborted.
*/
findElement(locator) {
locator = by.checkedLocator(locator);
let id;
if (typeof locator === 'function') {
id = this.driver_.findElementInternal_(locator, this);
} else {
let cmd = new command.Command(
command.Name.FIND_CHILD_ELEMENT).
setParameter('using', locator.using).
setParameter('value', locator.value);
id = this.schedule_(cmd, 'WebElement.findElement(' + locator + ')');
}
return new WebElementPromise(this.driver_, id);
}
/**
* Schedules a command to find all of the descendants of this element that
* match the given search criteria.
*
* @param {!(by.By|Function)} locator The locator strategy to use when
* searching for the element.
* @return {!promise.Promise<!Array<!WebElement>>} A
* promise that will resolve to an array of WebElements.
*/
findElements(locator) {
locator = by.checkedLocator(locator);
let id;
if (typeof locator === 'function') {
return this.driver_.findElementsInternal_(locator, this);
} else {
var cmd = new command.Command(
command.Name.FIND_CHILD_ELEMENTS).
setParameter('using', locator.using).
setParameter('value', locator.value);
return this.schedule_(cmd, 'WebElement.findElements(' + locator + ')');
}
}
/**
* Schedules a command to click on this element.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the click command has completed.
*/
click() {
return this.schedule_(
new command.Command(command.Name.CLICK_ELEMENT),
'WebElement.click()');
}
/**
* Schedules a command to type a sequence on the DOM element represented by
* this instance.
*
* Modifier keys (SHIFT, CONTROL, ALT, META) are stateful; once a modifier is
* processed in the keysequence, that key state is toggled until one of the
* following occurs:
*
* - The modifier key is encountered again in the sequence. At this point the
* state of the key is toggled (along with the appropriate keyup/down
* events).
* - The {@link input.Key.NULL} key is encountered in the sequence. When
* this key is encountered, all modifier keys current in the down state are
* released (with accompanying keyup events). The NULL key can be used to
* simulate common keyboard shortcuts:
*
* element.sendKeys("text was",
* Key.CONTROL, "a", Key.NULL,
* "now text is");
* // Alternatively:
* element.sendKeys("text was",
* Key.chord(Key.CONTROL, "a"),
* "now text is");
*
* - The end of the keysequence is encountered. When there are no more keys
* to type, all depressed modifier keys are released (with accompanying
* keyup events).
*
* If this element is a file input ({@code <input type="file">}), the
* specified key sequence should specify the path to the file to attach to
* the element. This is analgous to the user clicking "Browse..." and entering
* the path into the file select dialog.
*
* var form = driver.findElement(By.css('form'));
* var element = form.findElement(By.css('input[type=file]'));
* element.sendKeys('/path/to/file.txt');
* form.submit();
*
* For uploads to function correctly, the entered path must reference a file
* on the _browser's_ machine, not the local machine running this script. When
* running against a remote Selenium server, a {@link input.FileDetector}
* may be used to transparently copy files to the remote machine before
* attempting to upload them in the browser.
*
* __Note:__ On browsers where native keyboard events are not supported
* (e.g. Firefox on OS X), key events will be synthesized. Special
* punctionation keys will be synthesized according to a standard QWERTY en-us
* keyboard layout.
*
* @param {...(number|string|!IThenable<(number|string)>)} var_args The
* sequence of keys to type. Number keys may be referenced numerically or
* by string (1 or '1'). All arguments will be joined into a single
* sequence.
* @return {!promise.Promise<void>} A promise that will be resolved
* when all keys have been typed.
*/
sendKeys(var_args) {
let keys = Promise.all(Array.prototype.slice.call(arguments, 0)).
then(keys => {
let ret = [];
keys.forEach(key => {
let type = typeof key;
if (type === 'number') {
key = String(key);
} else if (type !== 'string') {
throw TypeError(
'each key must be a number of string; got ' + type);
}
// The W3C protocol requires keys to be specified as an array where
// each element is a single key.
ret.push.apply(ret, key.split(''));
});
return ret;
});
if (!this.driver_.fileDetector_) {
return this.schedule_(
new command.Command(command.Name.SEND_KEYS_TO_ELEMENT).
setParameter('value', keys),
'WebElement.sendKeys()');
}
// Suppress unhandled rejection errors until the flow executes the command.
keys.catch(function() {});
var element = this;
return this.driver_.flow_.execute(function() {
return keys.then(function(keys) {
return element.driver_.fileDetector_
.handleFile(element.driver_, keys.join(''));
}).then(function(keys) {
return element.schedule_(
new command.Command(command.Name.SEND_KEYS_TO_ELEMENT).
setParameter('value', keys.split('')),
'WebElement.sendKeys()');
});
}, 'WebElement.sendKeys()');
}
/**
* Schedules a command to query for the tag/node name of this element.
* @return {!promise.Promise<string>} A promise that will be
* resolved with the element's tag name.
*/
getTagName() {
return this.schedule_(
new command.Command(command.Name.GET_ELEMENT_TAG_NAME),
'WebElement.getTagName()');
}
/**
* Schedules a command to query for the computed style of the element
* represented by this instance. If the element inherits the named style from
* its parent, the parent will be queried for its value. Where possible, color
* values will be converted to their hex representation (e.g. #00ff00 instead
* of rgb(0, 255, 0)).
*
* _Warning:_ the value returned will be as the browser interprets it, so
* it may be tricky to form a proper assertion.
*
* @param {string} cssStyleProperty The name of the CSS style property to look
* up.
* @return {!promise.Promise<string>} A promise that will be
* resolved with the requested CSS value.
*/
getCssValue(cssStyleProperty) {
var name = command.Name.GET_ELEMENT_VALUE_OF_CSS_PROPERTY;
return this.schedule_(
new command.Command(name).
setParameter('propertyName', cssStyleProperty),
'WebElement.getCssValue(' + cssStyleProperty + ')');
}
/**
* Schedules a command to query for the value of the given attribute of the
* element. Will return the current value, even if it has been modified after
* the page has been loaded. More exactly, this method will return the value
* of the given attribute, unless that attribute is not present, in which case
* the value of the property with the same name is returned. If neither value
* is set, null is returned (for example, the "value" property of a textarea
* element). The "style" attribute is converted as best can be to a
* text representation with a trailing semi-colon. The following are deemed to
* be "boolean" attributes and will return either "true" or null:
*
* async, autofocus, autoplay, checked, compact, complete, controls, declare,
* defaultchecked, defaultselected, defer, disabled, draggable, ended,
* formnovalidate, hidden, indeterminate, iscontenteditable, ismap, itemscope,
* loop, multiple, muted, nohref, noresize, noshade, novalidate, nowrap, open,
* paused, pubdate, readonly, required, reversed, scoped, seamless, seeking,
* selected, spellcheck, truespeed, willvalidate
*
* Finally, the following commonly mis-capitalized attribute/property names
* are evaluated as expected:
*
* - "class"
* - "readonly"
*
* @param {string} attributeName The name of the attribute to query.
* @return {!promise.Promise<?string>} A promise that will be
* resolved with the attribute's value. The returned value will always be
* either a string or null.
*/
getAttribute(attributeName) {
return this.schedule_(
new command.Command(command.Name.GET_ELEMENT_ATTRIBUTE).
setParameter('name', attributeName),
'WebElement.getAttribute(' + attributeName + ')');
}
/**
* Get the visible (i.e. not hidden by CSS) innerText of this element,
* including sub-elements, without any leading or trailing whitespace.
*
* @return {!promise.Promise<string>} A promise that will be
* resolved with the element's visible text.
*/
getText() {
return this.schedule_(
new command.Command(command.Name.GET_ELEMENT_TEXT),
'WebElement.getText()');
}
/**
* Schedules a command to compute the size of this element's bounding box, in
* pixels.
* @return {!promise.Promise.<{width: number, height: number}>} A
* promise that will be resolved with the element's size as a
* {@code {width:number, height:number}} object.
*/
getSize() {
return this.schedule_(
new command.Command(command.Name.GET_ELEMENT_SIZE),
'WebElement.getSize()');
}
/**
* Schedules a command to compute the location of this element in page space.
* @return {!promise.Promise.<{x: number, y: number}>} A promise that
* will be resolved to the element's location as a
* {@code {x:number, y:number}} object.
*/
getLocation() {
return this.schedule_(
new command.Command(command.Name.GET_ELEMENT_LOCATION),
'WebElement.getLocation()');
}
/**
* Schedules a command to query whether the DOM element represented by this
* instance is enabled, as dicted by the {@code disabled} attribute.
* @return {!promise.Promise<boolean>} A promise that will be
* resolved with whether this element is currently enabled.
*/
isEnabled() {
return this.schedule_(
new command.Command(command.Name.IS_ELEMENT_ENABLED),
'WebElement.isEnabled()');
}
/**
* Schedules a command to query whether this element is selected.
* @return {!promise.Promise<boolean>} A promise that will be
* resolved with whether this element is currently selected.
*/
isSelected() {
return this.schedule_(
new command.Command(command.Name.IS_ELEMENT_SELECTED),
'WebElement.isSelected()');
}
/**
* Schedules a command to submit the form containing this element (or this
* element if it is a FORM element). This command is a no-op if the element is
* not contained in a form.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the form has been submitted.
*/
submit() {
return this.schedule_(
new command.Command(command.Name.SUBMIT_ELEMENT),
'WebElement.submit()');
}
/**
* Schedules a command to clear the `value` of this element. This command has
* no effect if the underlying DOM element is neither a text INPUT element
* nor a TEXTAREA element.
* @return {!promise.Promise<void>} A promise that will be resolved
* when the element has been cleared.
*/
clear() {
return this.schedule_(
new command.Command(command.Name.CLEAR_ELEMENT),
'WebElement.clear()');
}
/**
* Schedules a command to test whether this element is currently displayed.
* @return {!promise.Promise<boolean>} A promise that will be
* resolved with whether this element is currently visible on the page.
*/
isDisplayed() {
return this.schedule_(
new command.Command(command.Name.IS_ELEMENT_DISPLAYED),
'WebElement.isDisplayed()');
}
/**
* Take a screenshot of the visible region encompassed by this element's
* bounding rectangle.
*
* @param {boolean=} opt_scroll Optional argument that indicates whether the
* element should be scrolled into view before taking a screenshot.
* Defaults to false.
* @return {!promise.Promise<string>} A promise that will be
* resolved to the screenshot as a base-64 encoded PNG.
*/
takeScreenshot(opt_scroll) {
var scroll = !!opt_scroll;
return this.schedule_(
new command.Command(command.Name.TAKE_ELEMENT_SCREENSHOT)
.setParameter('scroll', scroll),
'WebElement.takeScreenshot(' + scroll + ')');
}
}
/**
* WebElementPromise is a promise that will be fulfilled with a WebElement.
* This serves as a forward proxy on WebElement, allowing calls to be
* scheduled without directly on this instance before the underlying
* WebElement has been fulfilled. In other words, the following two statements
* are equivalent:
*
* driver.findElement({id: 'my-button'}).click();
* driver.findElement({id: 'my-button'}).then(function(el) {
* return el.click();
* });
*
* @implements {promise.Thenable<!WebElement>}
* @final
*/
class WebElementPromise extends WebElement {
/**
* @param {!WebDriver} driver The parent WebDriver instance for this
* element.
* @param {!promise.Promise<!WebElement>} el A promise
* that will resolve to the promised element.
*/
constructor(driver, el) {
super(driver, 'unused');
/** @override */
this.cancel = el.cancel.bind(el);
/** @override */
this.isPending = el.isPending.bind(el);
/** @override */
this.then = el.then.bind(el);
/** @override */
this.catch = el.catch.bind(el);
/** @override */
this.finally = el.finally.bind(el);
/**
* Defers returning the element ID until the wrapped WebElement has been
* resolved.
* @override
*/
this.getId = function() {
return el.then(function(el) {
return el.getId();
});
};
}
}
promise.Thenable.addImplementation(WebElementPromise);
//////////////////////////////////////////////////////////////////////////////
//
// Alert
//
//////////////////////////////////////////////////////////////////////////////
/**
* Represents a modal dialog such as {@code alert}, {@code confirm}, or
* {@code prompt}. Provides functions to retrieve the message displayed with
* the alert, accept or dismiss the alert, and set the response text (in the
* case of {@code prompt}).
*/
class Alert {
/**
* @param {!WebDriver} driver The driver controlling the browser this alert
* is attached to.
* @param {string} text The message text displayed with this alert.
*/
constructor(driver, text) {
/** @private {!WebDriver} */
this.driver_ = driver;
/** @private {!promise.Promise<string>} */
this.text_ = promise.fulfilled(text);
}
/**
* Retrieves the message text displayed with this alert. For instance, if the
* alert were opened with alert("hello"), then this would return "hello".
*
* @return {!promise.Promise<string>} A promise that will be
* resolved to the text displayed with this alert.
*/
getText() {
return this.text_;
}
/**
* Sets the username and password in an alert prompting for credentials (such
* as a Basic HTTP Auth prompt). This method will implicitly
* {@linkplain #accept() submit} the dialog.
*
* @param {string} username The username to send.
* @param {string} password The password to send.
* @return {!promise.Promise<void>} A promise that will be resolved when this
* command has completed.
*/
authenticateAs(username, password) {
return this.driver_.schedule(
new command.Command(command.Name.SET_ALERT_CREDENTIALS),
'WebDriver.switchTo().alert()'
+ `.authenticateAs("${username}", "${password}")`);
}
/**
* Accepts this alert.
*
* @return {!promise.Promise<void>} A promise that will be resolved
* when this command has completed.
*/
accept() {
return this.driver_.schedule(
new command.Command(command.Name.ACCEPT_ALERT),
'WebDriver.switchTo().alert().accept()');
}
/**
* Dismisses this alert.
*
* @return {!promise.Promise<void>} A promise that will be resolved
* when this command has completed.
*/
dismiss() {
return this.driver_.schedule(
new command.Command(command.Name.DISMISS_ALERT),
'WebDriver.switchTo().alert().dismiss()');
}
/**
* Sets the response text on this alert. This command will return an error if
* the underlying alert does not support response text (e.g. window.alert and
* window.confirm).
*
* @param {string} text The text to set.
* @return {!promise.Promise<void>} A promise that will be resolved
* when this command has completed.
*/
sendKeys(text) {
return this.driver_.schedule(
new command.Command(command.Name.SET_ALERT_TEXT).
setParameter('text', text),
'WebDriver.switchTo().alert().sendKeys(' + text + ')');
}
}
/**
* AlertPromise is a promise that will be fulfilled with an Alert. This promise
* serves as a forward proxy on an Alert, allowing calls to be scheduled
* directly on this instance before the underlying Alert has been fulfilled. In
* other words, the following two statements are equivalent:
*
* driver.switchTo().alert().dismiss();
* driver.switchTo().alert().then(function(alert) {
* return alert.dismiss();
* });
*
* @implements {promise.Thenable.<!webdriver.Alert>}
* @final
*/
class AlertPromise extends Alert {
/**
* @param {!WebDriver} driver The driver controlling the browser this
* alert is attached to.
* @param {!promise.Thenable<!Alert>} alert A thenable
* that will be fulfilled with the promised alert.
*/
constructor(driver, alert) {
super(driver, 'unused');
/** @override */
this.cancel = alert.cancel.bind(alert);
/** @override */
this.isPending = alert.isPending.bind(alert);
/** @override */
this.then = alert.then.bind(alert);
/** @override */
this.catch = alert.catch.bind(alert);
/** @override */
this.finally = alert.finally.bind(alert);
/**
* Defer returning text until the promised alert has been resolved.
* @override
*/
this.getText = function() {
return alert.then(function(alert) {
return alert.getText();
});
};
/**
* Defers action until the alert has been located.
* @override
*/
this.authenticateAs = function(username, password) {
return alert.then(function(alert) {
return alert.authenticateAs(username, password);
});
};
/**
* Defers action until the alert has been located.
* @override
*/
this.accept = function() {
return alert.then(function(alert) {
return alert.accept();
});
};
/**
* Defers action until the alert has been located.
* @override
*/
this.dismiss = function() {
return alert.then(function(alert) {
return alert.dismiss();
});
};
/**
* Defers action until the alert has been located.
* @override
*/
this.sendKeys = function(text) {
return alert.then(function(alert) {
return alert.sendKeys(text);
});
};
}
}
promise.Thenable.addImplementation(AlertPromise);
// PUBLIC API
module.exports = {
Alert: Alert,
AlertPromise: AlertPromise,
Condition: Condition,
Logs: Logs,
Navigation: Navigation,
Options: Options,
TargetLocator: TargetLocator,
Timeouts: Timeouts,
WebDriver: WebDriver,
WebElement: WebElement,
WebElementCondition: WebElementCondition,
WebElementPromise: WebElementPromise,
Window: Window
};
| 1 | 13,587 | This should only be sent if the driver is speaking to a W3C conformant remote, so we need an if-condition check like we have in the Python bindings. | SeleniumHQ-selenium | js |
@@ -247,7 +247,7 @@ describe 'LinksControllerTest' do
get :new, project_id: project.url_name, category_id: category_id
assigns(:category_name).must_equal 'Homepage'
- assigns(:link).title.must_equal :Homepage
+ assigns(:link).title.must_equal 'Homepage'
end
it 'load_category_and_title_for_new_download_link' do | 1 | require 'test_helper'
describe 'LinksControllerTest' do
let(:project) { create(:project) }
let(:admin) { create(:admin) }
let(:user) { create(:account) }
before do
@link_homepage = create(:link, project: project, link_category_id: Link::CATEGORIES[:Homepage])
@link_download = create(:link, project: project, link_category_id: Link::CATEGORIES[:Download])
end
it 'after edit user is taken to index if they came from another page' do
link = nil
edit_as(admin) do
link = create(:link, project_id: project.id)
end
edit_as(admin) do
request.session[:return_to] = 'https://test.host:80/p/linux'
put :update, project_id: project.url_name, id: link.id,
link: attributes_for(:link)
must_redirect_to project_links_path(project)
end
end
it 'after save user is taken to index' do
link = nil
edit_as(admin) do
link = create(:link, project_id: project.id)
end
edit_as(user) do
put :update, project_id: project.url_name, id: link.id,
link: attributes_for(:link)
must_redirect_to project_links_path(project)
end
end
it 'non-manager index action displays alert' do
skip 'TODO: Dependent on restrict_edits_to_managers'
restrict_edits_to_managers project
edit_as(user) do
get :index, project_id: project.url_name
assert_select '.alert', text: "×\n\nYou can view, but not change this data. Only managers may change this data."
end
end
it 'non-manager new action redirect to a login prompt' do
skip 'TODO: Dependent on restrict_edits_to_managers'
restrict_edits_to_managers project
edit_as(user) do
get :new, project_id: project.url_name
project.reload.wont_be :edit_authorized?
must_redirect_to new_session_path
end
end
it 'non-manager edit action redirect to a login prompt' do
skip 'TODO: Dependent on restrict_edits_to_managers'
link = nil
edit_as(admin) do
link = create(:link, project_id: project.id)
end
restrict_edits_to_managers project
edit_as(user) do
get :edit, project_id: project.url_name, id: link.id
must_redirect_to new_session_path
end
end
describe 'single category links' do
let(:link) do
as(admin) do
project.links.find_by_link_category_id(Link::CATEGORIES[:Homepage])
end
end
describe 'new' do
it 'must not be shown if the link already exists' do
as(admin) do
get :new, project_id: project.url_name
assigns(:categories)[:Homepage].must_be_nil
end
end
it 'must be shown if link does not exist' do
as(admin) do
link.editor_account = admin
link.destroy
get :new, project_id: project.url_name
assigns(:categories)[:Homepage].must_equal Link::CATEGORIES[:Homepage]
end
end
it 'download link must not be shown if it already exists' do
as(admin) do
link.editor_account = admin
link.update!(title: 'Project Download page',
link_category_id: Link::CATEGORIES[:Download])
get :new, project_id: project.url_name
assigns(:categories)[:Download].must_be_nil
end
end
end
describe 'create' do
it 'must not be shown if the link already exists' do
as(admin) do
project.links.first.link_category_id.must_equal Link::CATEGORIES[:Homepage]
post :create, project_id: project.url_name,
link: attributes_for(:link)
assigns(:categories)[:Homepage].must_be_nil
end
end
it 'must be shown if link does not exist' do
as(admin) do
link.editor_account = admin
link.destroy
post :create, project_id: project.url_name,
link: attributes_for(:link)
assigns(:categories)[:Homepage].must_equal Link::CATEGORIES[:Homepage]
end
end
it 'must be shown if link is being created' do
as(admin) do
link.editor_account = admin
link.destroy
post :create, project_id: project.url_name,
link: attributes_for(:link, link_category_id: Link::CATEGORIES[:Homepage])
assigns(:categories)[:Homepage].must_equal Link::CATEGORIES[:Homepage]
end
end
end
describe 'update' do
let(:other_link) do
create(:link, project_id: project.id,
link_category_id: Link::CATEGORIES[:Other])
end
it 'must be shown if the link is being updated' do
as(admin) do
put :update, id: link.id, project_id: project.url_name, link: { title: nil }
assigns(:categories)[:Homepage].must_equal Link::CATEGORIES[:Homepage]
end
end
it 'must be shown if does not exist and other link is being updated' do
as(admin) do
link.editor_account = admin
link.destroy
put :update, id: other_link.id, project_id: project.url_name,
link: { title: :new_title }
assigns(:categories)[:Homepage].must_equal Link::CATEGORIES[:Homepage]
end
end
end
it 'must be shown if the link is being edited' do
as(admin) do
get :edit, id: link.id, project_id: project.url_name
assigns(:categories)[:Homepage].must_equal Link::CATEGORIES[:Homepage]
end
end
end
it 'index' do
get :index, project_id: project.url_name
must_respond_with :success
end
it 'new' do
login_as(admin)
get :new, project_id: project.url_name
must_respond_with :success
end
it 'edit' do
link = create(:link, project: project)
login_as(admin)
get :edit, project_id: project.url_name, id: link.id
must_respond_with :success
end
it 'create_with_existing_link' do
link1 = create(:link, project_id: project.id, link_category_id: Link::CATEGORIES[:Homepage])
link1.destroy
create(:link, project_id: project.id, link_category_id: Link::CATEGORIES[:Homepage])
login_as(admin)
assert_difference('project.reload.links.count', 1) do
post :create, project_id: project.url_name,
link: attributes_for(:link, link_category_id: Link::CATEGORIES[:Homepage])
must_redirect_to project_links_path(project)
flash[:success].must_equal I18n.t('links.create.success')
end
end
it 'create_without_existing_link' do
login_as(admin)
assert_difference('project.reload.links.count', 1) do
post :create, project_id: project.url_name,
link: attributes_for(:link, link_category_id: Link::CATEGORIES[:Homepage])
must_redirect_to project_links_path(project)
flash[:success].must_equal I18n.t('links.create.success')
end
end
it 'create' do
login_as(admin)
assert_difference('project.reload.links.count', 1) do
post :create, project_id: project.url_name,
link: attributes_for(:link, link_category_id: Link::CATEGORIES[:Homepage])
must_redirect_to project_links_path(project)
end
end
it 'load_category_and_title_for_new_homepage_link' do
category_id = Link::CATEGORIES[:Homepage]
login_as(admin)
get :new, project_id: project.url_name, category_id: category_id
assigns(:category_name).must_equal 'Homepage'
assigns(:link).title.must_equal :Homepage
end
it 'load_category_and_title_for_new_download_link' do
login_as(admin)
get :new, project_id: project.url_name, category_id: Link::CATEGORIES[:Download]
assigns(:category_name).must_equal 'Download'
assigns(:link).title.must_equal :Downloads
end
it 'load_category_and_title_for_new_other_link' do
category_id = Link::CATEGORIES[:Other]
login_as(admin)
get :new, project_id: project.url_name, category_id: category_id
assigns(:category_name).must_equal 'Other'
assigns(:link).title.must_be_nil
end
it 'should_allow_same_url_in_two_categories' do
project = create(:project)
link_to_be_deleted = create(:link, link_category_id: Link::CATEGORIES[:Homepage], project: project)
create(:link, project: project, link_category_id: Link::CATEGORIES[:Download])
login_as(admin)
delete :destroy, id: link_to_be_deleted.id, project_id: project.url_name
project.links.size.must_equal 1
end
it 'should gracefully handle errors when trying to delete a link' do
link = create(:link, project: create(:project))
Link.any_instance.stubs(:destroy).returns false
delete :destroy, id: link.id, project_id: link.project.url_name
must_respond_with 302
end
it 'should_not_create_if_link_was_soft_deleted_already_in_a_link_category' do
project = create(:project)
create(:link, project: project, link_category_id: Link::CATEGORIES[:Homepage])
login_as(admin)
assert_no_difference 'project.links.count' do
post :create, project_id: project.url_name,
link: { title: 'A Link', link_category_id: Link::CATEGORIES[:Homepage] }
end
end
it 'load_category_and_title_for_edit_link' do
category_id = Link::CATEGORIES['Forums']
create(:link, title: 'Title', project: project, link_category_id: category_id)
link = Link.find_by_link_category_id(Link::CATEGORIES[:Forums])
login_as(admin)
get :edit, project_id: project.url_name, id: link.id
assigns(:category_name).must_equal 'Forums'
assigns(:link).title.must_equal 'Title'
end
end
| 1 | 6,968 | This should have failed before. | blackducksoftware-ohloh-ui | rb |
@@ -0,0 +1,19 @@
+package paymentchannel
+
+import (
+ "github.com/filecoin-project/go-address"
+ "github.com/filecoin-project/specs-actors/actors/builtin/paych"
+)
+
+// ChannelInfo is the primary payment channel record
+type ChannelInfo struct {
+ Owner address.Address // Payout (From) address for this channel, has ability to sign and send funds
+ State *paych.State
+ Vouchers []*VoucherInfo // All vouchers submitted for this channel
+}
+
+// Voucher Info is a record of a voucher submitted for a payment channel
+type VoucherInfo struct {
+ Voucher *paych.SignedVoucher
+ Proof []byte
+} | 1 | 1 | 23,040 | the paymentchannel dir is where the paymentchannel manager will live. it will store the types below. | filecoin-project-venus | go |
|
@@ -219,7 +219,9 @@ webdriver.CommandName = {
GET_SESSION_LOGS: 'getSessionLogs',
// Non-standard commands used by the standalone Selenium server.
- UPLOAD_FILE: 'uploadFile'
+ UPLOAD_FILE: 'uploadFile',
+
+ GET_CANVAS_URL: 'getCanvasUrl'
};
| 1 | // Copyright 2011 Software Freedom Conservancy. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/**
* @fileoverview Contains several classes for handling commands.
*/
goog.provide('webdriver.Command');
goog.provide('webdriver.CommandExecutor');
goog.provide('webdriver.CommandName');
/**
* Describes a command to be executed by the WebDriverJS framework.
* @param {!webdriver.CommandName} name The name of this command.
* @constructor
*/
webdriver.Command = function(name) {
/**
* The name of this command.
* @private {!webdriver.CommandName}
*/
this.name_ = name;
/**
* The parameters to this command.
* @private {!Object.<*>}
*/
this.parameters_ = {};
};
/**
* @return {!webdriver.CommandName} This command's name.
*/
webdriver.Command.prototype.getName = function() {
return this.name_;
};
/**
* Sets a parameter to send with this command.
* @param {string} name The parameter name.
* @param {*} value The parameter value.
* @return {!webdriver.Command} A self reference.
*/
webdriver.Command.prototype.setParameter = function(name, value) {
this.parameters_[name] = value;
return this;
};
/**
* Sets the parameters for this command.
* @param {!Object.<*>} parameters The command parameters.
* @return {!webdriver.Command} A self reference.
*/
webdriver.Command.prototype.setParameters = function(parameters) {
this.parameters_ = parameters;
return this;
};
/**
* Returns a named command parameter.
* @param {string} key The parameter key to look up.
* @return {*} The parameter value, or undefined if it has not been set.
*/
webdriver.Command.prototype.getParameter = function(key) {
return this.parameters_[key];
};
/**
* @return {!Object.<*>} The parameters to send with this command.
*/
webdriver.Command.prototype.getParameters = function() {
return this.parameters_;
};
/**
* Enumeration of predefined names command names that all command processors
* will support.
* @enum {string}
*/
// TODO: Delete obsolete command names.
webdriver.CommandName = {
GET_SERVER_STATUS: 'getStatus',
NEW_SESSION: 'newSession',
GET_SESSIONS: 'getSessions',
DESCRIBE_SESSION: 'getSessionCapabilities',
CLOSE: 'close',
QUIT: 'quit',
GET_CURRENT_URL: 'getCurrentUrl',
GET: 'get',
GO_BACK: 'goBack',
GO_FORWARD: 'goForward',
REFRESH: 'refresh',
ADD_COOKIE: 'addCookie',
GET_COOKIE: 'getCookie',
GET_ALL_COOKIES: 'getCookies',
DELETE_COOKIE: 'deleteCookie',
DELETE_ALL_COOKIES: 'deleteAllCookies',
GET_ACTIVE_ELEMENT: 'getActiveElement',
FIND_ELEMENT: 'findElement',
FIND_ELEMENTS: 'findElements',
FIND_CHILD_ELEMENT: 'findChildElement',
FIND_CHILD_ELEMENTS: 'findChildElements',
CLEAR_ELEMENT: 'clearElement',
CLICK_ELEMENT: 'clickElement',
SEND_KEYS_TO_ELEMENT: 'sendKeysToElement',
SUBMIT_ELEMENT: 'submitElement',
GET_CURRENT_WINDOW_HANDLE: 'getCurrentWindowHandle',
GET_WINDOW_HANDLES: 'getWindowHandles',
GET_WINDOW_POSITION: 'getWindowPosition',
SET_WINDOW_POSITION: 'setWindowPosition',
GET_WINDOW_SIZE: 'getWindowSize',
SET_WINDOW_SIZE: 'setWindowSize',
MAXIMIZE_WINDOW: 'maximizeWindow',
SWITCH_TO_WINDOW: 'switchToWindow',
SWITCH_TO_FRAME: 'switchToFrame',
GET_PAGE_SOURCE: 'getPageSource',
GET_TITLE: 'getTitle',
EXECUTE_SCRIPT: 'executeScript',
EXECUTE_ASYNC_SCRIPT: 'executeAsyncScript',
GET_ELEMENT_TEXT: 'getElementText',
GET_ELEMENT_TAG_NAME: 'getElementTagName',
IS_ELEMENT_SELECTED: 'isElementSelected',
IS_ELEMENT_ENABLED: 'isElementEnabled',
IS_ELEMENT_DISPLAYED: 'isElementDisplayed',
GET_ELEMENT_LOCATION: 'getElementLocation',
GET_ELEMENT_LOCATION_IN_VIEW: 'getElementLocationOnceScrolledIntoView',
GET_ELEMENT_SIZE: 'getElementSize',
GET_ELEMENT_ATTRIBUTE: 'getElementAttribute',
GET_ELEMENT_VALUE_OF_CSS_PROPERTY: 'getElementValueOfCssProperty',
ELEMENT_EQUALS: 'elementEquals',
SCREENSHOT: 'screenshot',
IMPLICITLY_WAIT: 'implicitlyWait',
SET_SCRIPT_TIMEOUT: 'setScriptTimeout',
SET_TIMEOUT: 'setTimeout',
ACCEPT_ALERT: 'acceptAlert',
DISMISS_ALERT: 'dismissAlert',
GET_ALERT_TEXT: 'getAlertText',
SET_ALERT_TEXT: 'setAlertValue',
EXECUTE_SQL: 'executeSQL',
GET_LOCATION: 'getLocation',
SET_LOCATION: 'setLocation',
GET_APP_CACHE: 'getAppCache',
GET_APP_CACHE_STATUS: 'getStatus',
CLEAR_APP_CACHE: 'clearAppCache',
IS_BROWSER_ONLINE: 'isBrowserOnline',
SET_BROWSER_ONLINE: 'setBrowserOnline',
GET_LOCAL_STORAGE_ITEM: 'getLocalStorageItem',
GET_LOCAL_STORAGE_KEYS: 'getLocalStorageKeys',
SET_LOCAL_STORAGE_ITEM: 'setLocalStorageItem',
REMOVE_LOCAL_STORAGE_ITEM: 'removeLocalStorageItem',
CLEAR_LOCAL_STORAGE: 'clearLocalStorage',
GET_LOCAL_STORAGE_SIZE: 'getLocalStorageSize',
GET_SESSION_STORAGE_ITEM: 'getSessionStorageItem',
GET_SESSION_STORAGE_KEYS: 'getSessionStorageKey',
SET_SESSION_STORAGE_ITEM: 'setSessionStorageItem',
REMOVE_SESSION_STORAGE_ITEM: 'removeSessionStorageItem',
CLEAR_SESSION_STORAGE: 'clearSessionStorage',
GET_SESSION_STORAGE_SIZE: 'getSessionStorageSize',
SET_SCREEN_ORIENTATION: 'setScreenOrientation',
GET_SCREEN_ORIENTATION: 'getScreenOrientation',
// These belong to the Advanced user interactions - an element is
// optional for these commands.
CLICK: 'mouseClick',
DOUBLE_CLICK: 'mouseDoubleClick',
MOUSE_DOWN: 'mouseButtonDown',
MOUSE_UP: 'mouseButtonUp',
MOVE_TO: 'mouseMoveTo',
SEND_KEYS_TO_ACTIVE_ELEMENT: 'sendKeysToActiveElement',
// These belong to the Advanced Touch API
TOUCH_SINGLE_TAP: 'touchSingleTap',
TOUCH_DOWN: 'touchDown',
TOUCH_UP: 'touchUp',
TOUCH_MOVE: 'touchMove',
TOUCH_SCROLL: 'touchScroll',
TOUCH_DOUBLE_TAP: 'touchDoubleTap',
TOUCH_LONG_PRESS: 'touchLongPress',
TOUCH_FLICK: 'touchFlick',
GET_AVAILABLE_LOG_TYPES: 'getAvailableLogTypes',
GET_LOG: 'getLog',
GET_SESSION_LOGS: 'getSessionLogs',
// Non-standard commands used by the standalone Selenium server.
UPLOAD_FILE: 'uploadFile'
};
/**
* Handles the execution of WebDriver {@link webdriver.Command commands}.
* @interface
*/
webdriver.CommandExecutor = function() {};
/**
* Executes the given {@code command}. If there is an error executing the
* command, the provided callback will be invoked with the offending error.
* Otherwise, the callback will be invoked with a null Error and non-null
* {@link bot.response.ResponseObject} object.
* @param {!webdriver.Command} command The command to execute.
* @param {function(Error, !bot.response.ResponseObject=)} callback the function
* to invoke when the command response is ready.
*/
webdriver.CommandExecutor.prototype.execute = goog.abstractMethod;
| 1 | 11,766 | This is Safari specific and should be defined somewhere in the `safaridriver` namespace | SeleniumHQ-selenium | rb |
@@ -58,13 +58,13 @@ func TestNewRound(t *testing.T) {
require := require.New(t)
bc, roll := makeChain(t)
rc := &roundCalculator{bc, true, roll, bc.CandidatesByHeight, 0}
- proposer, err := rc.calculateProposer(5, 1, []string{"1", "2", "3", "4", "5"})
+ _, err := rc.calculateProposer(5, 1, []string{"1", "2", "3", "4", "5"})
require.Error(err)
var validDelegates [24]string
for i := 0; i < 24; i++ {
validDelegates[i] = identityset.Address(i).String()
}
- proposer, err = rc.calculateProposer(5, 1, validDelegates[:])
+ proposer, err := rc.calculateProposer(5, 1, validDelegates[:])
require.NoError(err)
require.Equal(validDelegates[6], proposer)
| 1 | // Copyright (c) 2019 IoTeX Foundation
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package rolldpos
import (
"context"
"io/ioutil"
"os"
"testing"
"time"
"github.com/iotexproject/go-pkgs/crypto"
"github.com/stretchr/testify/require"
"github.com/iotexproject/iotex-core/action/protocol"
"github.com/iotexproject/iotex-core/action/protocol/account"
"github.com/iotexproject/iotex-core/action/protocol/poll"
"github.com/iotexproject/iotex-core/action/protocol/rewarding"
"github.com/iotexproject/iotex-core/action/protocol/rolldpos"
"github.com/iotexproject/iotex-core/blockchain"
"github.com/iotexproject/iotex-core/blockchain/genesis"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/pkg/unit"
"github.com/iotexproject/iotex-core/test/identityset"
"github.com/iotexproject/iotex-core/testutil"
)
func TestUpdateRound(t *testing.T) {
require := require.New(t)
bc, roll := makeChain(t)
rc := &roundCalculator{bc, true, roll, bc.CandidatesByHeight, 0}
ra, err := rc.NewRound(1, time.Second, time.Unix(1562382392, 0), nil)
require.NoError(err)
// height < round.Height()
_, err = rc.UpdateRound(ra, 0, time.Second, time.Unix(1562382492, 0), time.Second)
require.Error(err)
// height == round.Height() and now.Before(round.StartTime())
_, err = rc.UpdateRound(ra, 1, time.Second, time.Unix(1562382092, 0), time.Second)
require.NoError(err)
// height >= round.NextEpochStartHeight() Delegates error
_, err = rc.UpdateRound(ra, 500, time.Second, time.Unix(1562382092, 0), time.Second)
require.Error(err)
// (31+120)%24
ra, err = rc.UpdateRound(ra, 31, time.Second, time.Unix(1562382522, 0), time.Second)
require.NoError(err)
require.Equal(identityset.Address(7).String(), ra.proposer)
}
func TestNewRound(t *testing.T) {
require := require.New(t)
bc, roll := makeChain(t)
rc := &roundCalculator{bc, true, roll, bc.CandidatesByHeight, 0}
proposer, err := rc.calculateProposer(5, 1, []string{"1", "2", "3", "4", "5"})
require.Error(err)
var validDelegates [24]string
for i := 0; i < 24; i++ {
validDelegates[i] = identityset.Address(i).String()
}
proposer, err = rc.calculateProposer(5, 1, validDelegates[:])
require.NoError(err)
require.Equal(validDelegates[6], proposer)
rc.timeBasedRotation = false
proposer, err = rc.calculateProposer(50, 1, validDelegates[:])
require.NoError(err)
require.Equal(validDelegates[2], proposer)
ra, err := rc.NewRound(1, time.Second, time.Unix(1562382392, 0), nil)
require.NoError(err)
require.Equal(uint32(19), ra.roundNum)
require.Equal(uint64(1), ra.height)
// sorted by address hash
require.Equal(identityset.Address(16).String(), ra.proposer)
rc.timeBasedRotation = true
ra, err = rc.NewRound(1, time.Second, time.Unix(1562382392, 0), nil)
require.NoError(err)
require.Equal(uint32(19), ra.roundNum)
require.Equal(uint64(1), ra.height)
require.Equal(identityset.Address(5).String(), ra.proposer)
}
func TestDelegates(t *testing.T) {
require := require.New(t)
bc, roll := makeChain(t)
rc := &roundCalculator{bc, true, roll, bc.CandidatesByHeight, 0}
_, err := rc.Delegates(361)
require.Error(err)
dels, err := rc.Delegates(4)
require.NoError(err)
require.Equal(roll.NumDelegates(), uint64(len(dels)))
require.False(rc.IsDelegate(identityset.Address(25).String(), 2))
require.True(rc.IsDelegate(identityset.Address(5).String(), 2))
}
func TestRoundInfo(t *testing.T) {
require := require.New(t)
bc, roll := makeChain(t)
rc := &roundCalculator{bc, true, roll, bc.CandidatesByHeight, 0}
require.NotNil(rc)
// error for lastBlockTime.Before(now)
_, _, err := rc.RoundInfo(1, time.Second, time.Unix(1562382300, 0))
require.Error(err)
// height is 1 with withToleration false
roundNum, roundStartTime, err := rc.RoundInfo(1, time.Second, time.Unix(1562382392, 0))
require.NoError(err)
require.Equal(uint32(19), roundNum)
require.True(roundStartTime.Equal(time.Unix(1562382392, 0)))
// height is 1 with withToleration true and duration%c.blockInterval < c.toleratedOvertime
roundNum, roundStartTime, err = rc.roundInfo(1, time.Second, time.Unix(1562382392, 500000), 501*time.Microsecond)
require.NoError(err)
require.Equal(uint32(19), roundNum)
require.True(roundStartTime.Equal(time.Unix(1562382392, 0)))
// height is 1 with withToleration true and duration%c.blockInterval >= c.toleratedOvertime
roundNum, roundStartTime, err = rc.roundInfo(1, time.Second, time.Unix(1562382392, 500000), 500*time.Microsecond)
require.NoError(err)
require.Equal(uint32(20), roundNum)
require.True(roundStartTime.After(time.Unix(1562382392, 0)))
// height is 4 with withToleration true and duration%c.blockInterval >= c.toleratedOvertime
roundNum, roundStartTime, err = rc.roundInfo(4, time.Second, time.Unix(1562382392, 500000), 500*time.Microsecond)
require.NoError(err)
require.Equal(uint32(18), roundNum)
require.True(roundStartTime.Equal(time.Unix(1562382393, 0)))
}
func makeChain(t *testing.T) (blockchain.Blockchain, *rolldpos.Protocol) {
require := require.New(t)
cfg := config.Default
testTrieFile, _ := ioutil.TempFile(os.TempDir(), "trie")
testTriePath := testTrieFile.Name()
testDBFile, _ := ioutil.TempFile(os.TempDir(), "db")
testDBPath := testDBFile.Name()
testIndexFile, _ := ioutil.TempFile(os.TempDir(), "index")
testIndexPath := testIndexFile.Name()
cfg.Chain.TrieDBPath = testTriePath
cfg.Chain.ChainDBPath = testDBPath
cfg.Chain.IndexDBPath = testIndexPath
cfg.Consensus.Scheme = config.RollDPoSScheme
cfg.Network.Port = testutil.RandomPort()
cfg.API.Port = testutil.RandomPort()
cfg.Genesis.Timestamp = 1562382372
sk, err := crypto.GenerateKey()
cfg.Chain.ProducerPrivKey = sk.HexString()
require.NoError(err)
for i := 0; i < identityset.Size(); i++ {
addr := identityset.Address(i).String()
value := unit.ConvertIotxToRau(100000000).String()
cfg.Genesis.InitBalanceMap[addr] = value
if uint64(i) < cfg.Genesis.NumDelegates {
d := genesis.Delegate{
OperatorAddrStr: addr,
RewardAddrStr: addr,
VotesStr: value,
}
cfg.Genesis.Delegates = append(cfg.Genesis.Delegates, d)
}
}
registry := protocol.Registry{}
chain := blockchain.NewBlockchain(
cfg,
blockchain.DefaultStateFactoryOption(),
blockchain.BoltDBDaoOption(),
blockchain.RegistryOption(®istry),
)
rolldposProtocol := rolldpos.NewProtocol(
cfg.Genesis.NumCandidateDelegates,
cfg.Genesis.NumDelegates,
cfg.Genesis.NumSubEpochs,
)
require.NoError(registry.Register(rolldpos.ProtocolID, rolldposProtocol))
rewardingProtocol := rewarding.NewProtocol(chain, rolldposProtocol)
registry.Register(rewarding.ProtocolID, rewardingProtocol)
acc := account.NewProtocol(config.NewHeightUpgrade(cfg))
registry.Register(account.ProtocolID, acc)
require.NoError(registry.Register(poll.ProtocolID, poll.NewLifeLongDelegatesProtocol(cfg.Genesis.Delegates)))
chain.Validator().AddActionEnvelopeValidators(protocol.NewGenericValidator(chain))
chain.Validator().AddActionValidators(acc, rewardingProtocol)
chain.GetFactory().AddActionHandlers(acc, rewardingProtocol)
ctx := context.Background()
require.NoError(chain.Start(ctx))
for i := 0; i < 50; i++ {
blk, err := chain.MintNewBlock(
nil,
time.Unix(cfg.Genesis.Timestamp+int64(i), 0),
)
require.NoError(blk.Finalize(nil, time.Unix(cfg.Genesis.Timestamp+int64(i), 0)))
require.NoError(err)
require.NoError(chain.CommitBlock(blk))
}
require.Equal(uint64(50), chain.TipHeight())
require.NoError(err)
return chain, rolldposProtocol
}
| 1 | 19,334 | assignments should only be cuddled with other assignments (from `wsl`) | iotexproject-iotex-core | go |
@@ -641,12 +641,13 @@ Blockly.genUid.soup_ = '!#%()*+,-./:;=?@[]^_`{|}~' +
* Measure some text using a canvas in-memory.
* @param {string} fontSize E.g., '10pt'
* @param {string} fontFamily E.g., 'Arial'
+ * @param {string} fontWeight E.g., '600'
* @param {string} text The actual text to measure
* @return {number} Width of the text in px.
*/
-Blockly.measureText = function(fontSize, fontFamily, text) {
+Blockly.measureText = function(fontSize, fontFamily, fontWeight, text) {
var canvas = document.createElement('canvas');
var context = canvas.getContext('2d');
- context.font = fontSize + fontFamily;
+ context.font = fontWeight + ' ' + fontSize + ' ' + fontFamily;
return context.measureText(text).width;
}; | 1 | /**
* @license
* Visual Blocks Editor
*
* Copyright 2012 Google Inc.
* https://developers.google.com/blockly/
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @fileoverview Utility methods.
* These methods are not specific to Blockly, and could be factored out into
* a JavaScript framework such as Closure.
* @author [email protected] (Neil Fraser)
*/
'use strict';
goog.provide('Blockly.utils');
goog.require('goog.dom');
goog.require('goog.events.BrowserFeature');
goog.require('goog.math.Coordinate');
goog.require('goog.userAgent');
/**
* Cached value for whether 3D is supported
* @type {boolean}
* @private
*/
Blockly.cache3dSupported_ = null;
/**
* Add a CSS class to a element.
* Similar to Closure's goog.dom.classes.add, except it handles SVG elements.
* @param {!Element} element DOM element to add class to.
* @param {string} className Name of class to add.
* @private
*/
Blockly.addClass_ = function(element, className) {
var classes = element.getAttribute('class') || '';
if ((' ' + classes + ' ').indexOf(' ' + className + ' ') == -1) {
if (classes) {
classes += ' ';
}
element.setAttribute('class', classes + className);
}
};
/**
* Remove a CSS class from a element.
* Similar to Closure's goog.dom.classes.remove, except it handles SVG elements.
* @param {!Element} element DOM element to remove class from.
* @param {string} className Name of class to remove.
* @private
*/
Blockly.removeClass_ = function(element, className) {
var classes = element.getAttribute('class');
if ((' ' + classes + ' ').indexOf(' ' + className + ' ') != -1) {
var classList = classes.split(/\s+/);
for (var i = 0; i < classList.length; i++) {
if (!classList[i] || classList[i] == className) {
classList.splice(i, 1);
i--;
}
}
if (classList.length) {
element.setAttribute('class', classList.join(' '));
} else {
element.removeAttribute('class');
}
}
};
/**
* Checks if an element has the specified CSS class.
* Similar to Closure's goog.dom.classes.has, except it handles SVG elements.
* @param {!Element} element DOM element to check.
* @param {string} className Name of class to check.
* @return {boolean} True if class exists, false otherwise.
* @private
*/
Blockly.hasClass_ = function(element, className) {
var classes = element.getAttribute('class');
return (' ' + classes + ' ').indexOf(' ' + className + ' ') != -1;
};
/**
* Bind an event to a function call.
* @param {!Node} node Node upon which to listen.
* @param {string} name Event name to listen to (e.g. 'mousedown').
* @param {Object} thisObject The value of 'this' in the function.
* @param {!Function} func Function to call when event is triggered.
* @return {!Array.<!Array>} Opaque data that can be passed to unbindEvent_.
* @private
*/
Blockly.bindEvent_ = function(node, name, thisObject, func) {
if (thisObject) {
var wrapFunc = function(e) {
func.call(thisObject, e);
};
} else {
var wrapFunc = func;
}
node.addEventListener(name, wrapFunc, false);
var bindData = [[node, name, wrapFunc]];
// Add equivalent touch event.
if (name in Blockly.bindEvent_.TOUCH_MAP) {
wrapFunc = function(e) {
// Punt on multitouch events.
if (e.changedTouches.length == 1) {
// Map the touch event's properties to the event.
var touchPoint = e.changedTouches[0];
e.clientX = touchPoint.clientX;
e.clientY = touchPoint.clientY;
}
func.call(thisObject, e);
// Stop the browser from scrolling/zooming the page.
e.preventDefault();
};
for (var i = 0, eventName;
eventName = Blockly.bindEvent_.TOUCH_MAP[name][i]; i++) {
node.addEventListener(eventName, wrapFunc, false);
bindData.push([node, eventName, wrapFunc]);
}
}
return bindData;
};
/**
* The TOUCH_MAP lookup dictionary specifies additional touch events to fire,
* in conjunction with mouse events.
* @type {Object}
*/
Blockly.bindEvent_.TOUCH_MAP = {};
if (goog.events.BrowserFeature.TOUCH_ENABLED) {
Blockly.bindEvent_.TOUCH_MAP = {
'mousedown': ['touchstart'],
'mousemove': ['touchmove'],
'mouseup': ['touchend', 'touchcancel']
};
}
/**
* Unbind one or more events event from a function call.
* @param {!Array.<!Array>} bindData Opaque data from bindEvent_. This list is
* emptied during the course of calling this function.
* @return {!Function} The function call.
* @private
*/
Blockly.unbindEvent_ = function(bindData) {
while (bindData.length) {
var bindDatum = bindData.pop();
var node = bindDatum[0];
var name = bindDatum[1];
var func = bindDatum[2];
node.removeEventListener(name, func, false);
}
return func;
};
/**
* Fire a synthetic event synchronously.
* @param {!EventTarget} node The event's target node.
* @param {string} eventName Name of event (e.g. 'click').
*/
Blockly.fireUiEventNow = function(node, eventName) {
// Remove the event from the anti-duplicate database.
var list = Blockly.fireUiEvent.DB_[eventName];
if (list) {
var i = list.indexOf(node);
if (i != -1) {
list.splice(i, 1);
}
}
// Create a UI event in a browser-compatible way.
if (typeof UIEvent == 'function') {
// W3
var evt = new UIEvent(eventName, {});
} else {
// MSIE
var evt = document.createEvent('UIEvent');
evt.initUIEvent(eventName, false, false, window, 0);
}
node.dispatchEvent(evt);
};
/**
* Fire a synthetic event asynchronously. Groups of simultaneous events (e.g.
* a tree of blocks being deleted) are merged into one event.
* @param {!EventTarget} node The event's target node.
* @param {string} eventName Name of event (e.g. 'click').
*/
Blockly.fireUiEvent = function(node, eventName) {
var list = Blockly.fireUiEvent.DB_[eventName];
if (list) {
if (list.indexOf(node) != -1) {
// This event is already scheduled to fire.
return;
}
list.push(node);
} else {
Blockly.fireUiEvent.DB_[eventName] = [node];
}
var fire = function() {
Blockly.fireUiEventNow(node, eventName);
};
setTimeout(fire, 0);
};
/**
* Database of upcoming firing event types.
* Used to fire only one event after multiple changes.
* @type {!Object}
* @private
*/
Blockly.fireUiEvent.DB_ = {};
/**
* Don't do anything for this event, just halt propagation.
* @param {!Event} e An event.
*/
Blockly.noEvent = function(e) {
// This event has been handled. No need to bubble up to the document.
e.preventDefault();
e.stopPropagation();
};
/**
* Is this event targeting a text input widget?
* @param {!Event} e An event.
* @return {boolean} True if text input.
* @private
*/
Blockly.isTargetInput_ = function(e) {
return e.target.type == 'textarea' || e.target.type == 'text' ||
e.target.type == 'number' || e.target.type == 'email' ||
e.target.type == 'password' || e.target.type == 'search' ||
e.target.type == 'tel' || e.target.type == 'url' ||
e.target.isContentEditable;
};
/**
* Return the coordinates of the top-left corner of this element relative to
* its parent. Only for SVG elements and children (e.g. rect, g, path).
* @param {!Element} element SVG element to find the coordinates of.
* @return {!goog.math.Coordinate} Object with .x and .y properties.
* @private
*/
Blockly.getRelativeXY_ = function(element) {
var xy = new goog.math.Coordinate(0, 0);
// First, check for x and y attributes.
var x = element.getAttribute('x');
if (x) {
xy.x = parseInt(x, 10);
}
var y = element.getAttribute('y');
if (y) {
xy.y = parseInt(y, 10);
}
// Second, check for transform="translate(...)" attribute.
var transform = element.getAttribute('transform');
if (transform) {
var transformComponents = transform.match(Blockly.getRelativeXY_.XY_REGEXP_);
if (transformComponents) {
xy.x += parseFloat(transformComponents[1]);
if (transformComponents[3]) {
xy.y += parseFloat(transformComponents[3]);
}
}
}
// Third, check for style="transform: translate3d(...)".
var style = element.getAttribute('style');
if (style && style.indexOf('translate3d') > -1) {
var styleComponents = style.match(Blockly.getRelativeXY_.XY_3D_REGEXP_);
if (styleComponents) {
xy.x += parseFloat(styleComponents[1]);
if (styleComponents[3]) {
xy.y += parseFloat(styleComponents[3]);
}
}
}
return xy;
};
/**
* Static regex to pull the x,y values out of an SVG translate() directive.
* Note that Firefox and IE (9,10) return 'translate(12)' instead of
* 'translate(12, 0)'.
* Note that IE (9,10) returns 'translate(16 8)' instead of 'translate(16, 8)'.
* Note that IE has been reported to return scientific notation (0.123456e-42).
* @type {!RegExp}
* @private
*/
Blockly.getRelativeXY_.XY_REGEXP_ =
/translate\(\s*([-+\d.e]+)([ ,]\s*([-+\d.e]+)\s*\))?/;
/**
* Static regex to pull the x,y,z values out of a translate3d() style property.
* Accounts for same exceptions as XY_REGEXP_.
* @type {!RegExp}
* @private
*/
Blockly.getRelativeXY_.XY_3D_REGEXP_ =
/transform:\s*translate3d\(\s*([-+\d.e]+)px([ ,]\s*([-+\d.e]+)\s*)px([ ,]\s*([-+\d.e]+)\s*)px\)?/;
/**
* Return the absolute coordinates of the top-left corner of this element,
* scales that after canvas SVG element, if it's a descendant.
* The origin (0,0) is the top-left corner of the Blockly SVG.
* @param {!Element} element Element to find the coordinates of.
* @param {!Blockly.Workspace} workspace Element must be in this workspace.
* @return {!goog.math.Coordinate} Object with .x and .y properties.
* @private
*/
Blockly.getSvgXY_ = function(element, workspace) {
var x = 0;
var y = 0;
var scale = 1;
if (goog.dom.contains(workspace.getCanvas(), element) ||
goog.dom.contains(workspace.getBubbleCanvas(), element)) {
// Before the SVG canvas, scale the coordinates.
scale = workspace.scale;
}
do {
// Loop through this block and every parent.
var xy = Blockly.getRelativeXY_(element);
if (element == workspace.getCanvas() ||
element == workspace.getBubbleCanvas()) {
// After the SVG canvas, don't scale the coordinates.
scale = 1;
}
x += xy.x * scale;
y += xy.y * scale;
element = element.parentNode;
} while (element && element != workspace.getParentSvg());
return new goog.math.Coordinate(x, y);
};
/**
* Check if 3D transforms are supported by adding an element
* and attempting to set the property.
* @return {boolean} true if 3D transforms are supported
*/
Blockly.is3dSupported = function() {
if (Blockly.cache3dSupported_ !== null) {
return Blockly.cache3dSupported_;
}
// CC-BY-SA Lorenzo Polidori
// https://stackoverflow.com/questions/5661671/detecting-transform-translate3d-support
if (!window.getComputedStyle) {
return false;
}
var el = document.createElement('p'),
has3d,
transforms = {
'webkitTransform':'-webkit-transform',
'OTransform':'-o-transform',
'msTransform':'-ms-transform',
'MozTransform':'-moz-transform',
'transform':'transform'
};
// Add it to the body to get the computed style.
document.body.insertBefore(el, null);
for (var t in transforms) {
if (el.style[t] !== undefined) {
el.style[t] = "translate3d(1px,1px,1px)";
has3d = window.getComputedStyle(el).getPropertyValue(transforms[t]);
}
}
document.body.removeChild(el);
Blockly.cache3dSupported_ = (has3d !== undefined && has3d.length > 0 && has3d !== "none");
return Blockly.cache3dSupported_;
};
/**
* Helper method for creating SVG elements.
* @param {string} name Element's tag name.
* @param {!Object} attrs Dictionary of attribute names and values.
* @param {Element} parent Optional parent on which to append the element.
* @param {Blockly.Workspace=} opt_workspace Optional workspace for access to
* context (scale...).
* @return {!SVGElement} Newly created SVG element.
*/
Blockly.createSvgElement = function(name, attrs, parent, opt_workspace) {
var e = /** @type {!SVGElement} */ (
document.createElementNS(Blockly.SVG_NS, name));
for (var key in attrs) {
e.setAttribute(key, attrs[key]);
}
// IE defines a unique attribute "runtimeStyle", it is NOT applied to
// elements created with createElementNS. However, Closure checks for IE
// and assumes the presence of the attribute and crashes.
if (document.body.runtimeStyle) { // Indicates presence of IE-only attr.
e.runtimeStyle = e.currentStyle = e.style;
}
if (parent) {
parent.appendChild(e);
}
return e;
};
/**
* Set css classes to allow/disallow the browser from selecting/highlighting
* text, etc. on the page.
* @param {boolean} selectable Whether elements on the page can be selected.
*/
Blockly.setPageSelectable = function(selectable) {
if (selectable) {
Blockly.removeClass_(document.body, 'blocklyNonSelectable');
} else {
Blockly.addClass_(document.body, 'blocklyNonSelectable');
}
};
/**
* Is this event a right-click?
* @param {!Event} e Mouse event.
* @return {boolean} True if right-click.
*/
Blockly.isRightButton = function(e) {
if (e.ctrlKey && goog.userAgent.MAC) {
// Control-clicking on Mac OS X is treated as a right-click.
// WebKit on Mac OS X fails to change button to 2 (but Gecko does).
return true;
}
return e.button == 2;
};
/**
* Return the converted coordinates of the given mouse event.
* The origin (0,0) is the top-left corner of the Blockly svg.
* @param {!Event} e Mouse event.
* @param {!Element} svg SVG element.
* @return {!Object} Object with .x and .y properties.
*/
Blockly.mouseToSvg = function(e, svg) {
var svgPoint = svg.createSVGPoint();
svgPoint.x = e.clientX;
svgPoint.y = e.clientY;
var matrix = svg.getScreenCTM();
matrix = matrix.inverse();
return svgPoint.matrixTransform(matrix);
};
/**
* Given an array of strings, return the length of the shortest one.
* @param {!Array.<string>} array Array of strings.
* @return {number} Length of shortest string.
*/
Blockly.shortestStringLength = function(array) {
if (!array.length) {
return 0;
}
var len = array[0].length;
for (var i = 1; i < array.length; i++) {
len = Math.min(len, array[i].length);
}
return len;
};
/**
* Given an array of strings, return the length of the common prefix.
* Words may not be split. Any space after a word is included in the length.
* @param {!Array.<string>} array Array of strings.
* @param {number=} opt_shortest Length of shortest string.
* @return {number} Length of common prefix.
*/
Blockly.commonWordPrefix = function(array, opt_shortest) {
if (!array.length) {
return 0;
} else if (array.length == 1) {
return array[0].length;
}
var wordPrefix = 0;
var max = opt_shortest || Blockly.shortestStringLength(array);
for (var len = 0; len < max; len++) {
var letter = array[0][len];
for (var i = 1; i < array.length; i++) {
if (letter != array[i][len]) {
return wordPrefix;
}
}
if (letter == ' ') {
wordPrefix = len + 1;
}
}
for (var i = 1; i < array.length; i++) {
var letter = array[i][len];
if (letter && letter != ' ') {
return wordPrefix;
}
}
return max;
};
/**
* Given an array of strings, return the length of the common suffix.
* Words may not be split. Any space after a word is included in the length.
* @param {!Array.<string>} array Array of strings.
* @param {number=} opt_shortest Length of shortest string.
* @return {number} Length of common suffix.
*/
Blockly.commonWordSuffix = function(array, opt_shortest) {
if (!array.length) {
return 0;
} else if (array.length == 1) {
return array[0].length;
}
var wordPrefix = 0;
var max = opt_shortest || Blockly.shortestStringLength(array);
for (var len = 0; len < max; len++) {
var letter = array[0].substr(-len - 1, 1);
for (var i = 1; i < array.length; i++) {
if (letter != array[i].substr(-len - 1, 1)) {
return wordPrefix;
}
}
if (letter == ' ') {
wordPrefix = len + 1;
}
}
for (var i = 1; i < array.length; i++) {
var letter = array[i].charAt(array[i].length - len - 1);
if (letter && letter != ' ') {
return wordPrefix;
}
}
return max;
};
/**
* Is the given string a number (includes negative and decimals).
* @param {string} str Input string.
* @return {boolean} True if number, false otherwise.
*/
Blockly.isNumber = function(str) {
return !!str.match(/^\s*-?\d+(\.\d+)?\s*$/);
};
/**
* Parse a string with any number of interpolation tokens (%1, %2, ...).
* '%' characters may be self-escaped (%%).
* @param {string} message Text containing interpolation tokens.
* @return {!Array.<string|number>} Array of strings and numbers.
*/
Blockly.tokenizeInterpolation = function(message) {
var tokens = [];
var chars = message.split('');
chars.push(''); // End marker.
// Parse the message with a finite state machine.
// 0 - Base case.
// 1 - % found.
// 2 - Digit found.
var state = 0;
var buffer = [];
var number = null;
for (var i = 0; i < chars.length; i++) {
var c = chars[i];
if (state == 0) {
if (c == '%') {
state = 1; // Start escape.
} else {
buffer.push(c); // Regular char.
}
} else if (state == 1) {
if (c == '%') {
buffer.push(c); // Escaped %: %%
state = 0;
} else if ('0' <= c && c <= '9') {
state = 2;
number = c;
var text = buffer.join('');
if (text) {
tokens.push(text);
}
buffer.length = 0;
} else {
buffer.push('%', c); // Not an escape: %a
state = 0;
}
} else if (state == 2) {
if ('0' <= c && c <= '9') {
number += c; // Multi-digit number.
} else {
tokens.push(parseInt(number, 10));
i--; // Parse this char again.
state = 0;
}
}
}
var text = buffer.join('');
if (text) {
tokens.push(text);
}
return tokens;
};
/**
* Generate a unique ID. This should be globally unique.
* 87 characters ^ 20 length > 128 bits (better than a UUID).
* @return {string}
*/
Blockly.genUid = function() {
var length = 20;
var soupLength = Blockly.genUid.soup_.length;
var id = [];
for (var i = 0; i < length; i++) {
id[i] = Blockly.genUid.soup_.charAt(Math.random() * soupLength);
}
return id.join('');
};
/**
* Legal characters for the unique ID.
* Should be all on a US keyboard. No XML special characters or control codes.
* Removed $ due to issue 251.
* @private
*/
Blockly.genUid.soup_ = '!#%()*+,-./:;=?@[]^_`{|}~' +
'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789';
/**
* Measure some text using a canvas in-memory.
* @param {string} fontSize E.g., '10pt'
* @param {string} fontFamily E.g., 'Arial'
* @param {string} text The actual text to measure
* @return {number} Width of the text in px.
*/
Blockly.measureText = function(fontSize, fontFamily, text) {
var canvas = document.createElement('canvas');
var context = canvas.getContext('2d');
context.font = fontSize + fontFamily;
return context.measureText(text).width;
};
| 1 | 7,717 | prefer changing the signature by adding params to the end, not the middle, I think. | LLK-scratch-blocks | js |
@@ -301,6 +301,15 @@ func makeStatefulSetService(p *monitoringv1.Prometheus, config Config) *v1.Servi
},
},
}
+
+ if p.Spec.Thanos != nil {
+ svc.Spec.Ports = append(svc.Spec.Ports, v1.ServicePort{
+ Name: "grpc",
+ Port: 10901,
+ TargetPort: intstr.FromString("grpc"),
+ })
+ }
+
return svc
}
| 1 | // Copyright 2016 The prometheus-operator Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"fmt"
"net/url"
"path"
"strings"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"github.com/blang/semver"
monitoringv1 "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1"
"github.com/coreos/prometheus-operator/pkg/k8sutil"
"github.com/pkg/errors"
)
const (
governingServiceName = "prometheus-operated"
DefaultPrometheusVersion = "v2.7.1"
DefaultThanosVersion = "v0.7.0"
defaultRetention = "24h"
defaultReplicaExternalLabelName = "prometheus_replica"
storageDir = "/prometheus"
confDir = "/etc/prometheus/config"
confOutDir = "/etc/prometheus/config_out"
rulesDir = "/etc/prometheus/rules"
secretsDir = "/etc/prometheus/secrets/"
configmapsDir = "/etc/prometheus/configmaps/"
configFilename = "prometheus.yaml.gz"
configEnvsubstFilename = "prometheus.env.yaml"
sSetInputHashName = "prometheus-operator-input-hash"
defaultPortName = "web"
)
var (
minReplicas int32 = 1
defaultMaxConcurrency int32 = 20
managedByOperatorLabel = "managed-by"
managedByOperatorLabelValue = "prometheus-operator"
managedByOperatorLabels = map[string]string{
managedByOperatorLabel: managedByOperatorLabelValue,
}
probeTimeoutSeconds int32 = 3
CompatibilityMatrix = []string{
"v1.4.0",
"v1.4.1",
"v1.5.0",
"v1.5.1",
"v1.5.2",
"v1.5.3",
"v1.6.0",
"v1.6.1",
"v1.6.2",
"v1.6.3",
"v1.7.0",
"v1.7.1",
"v1.7.2",
"v1.8.0",
"v2.0.0",
"v2.2.1",
"v2.3.1",
"v2.3.2",
"v2.4.0",
"v2.4.1",
"v2.4.2",
"v2.4.3",
"v2.5.0",
"v2.6.0",
"v2.6.1",
"v2.7.0",
"v2.7.1",
"v2.7.2",
"v2.8.1",
"v2.9.2",
"v2.10.0",
}
)
func makeStatefulSet(
p monitoringv1.Prometheus,
config *Config,
ruleConfigMapNames []string,
inputHash string,
) (*appsv1.StatefulSet, error) {
// p is passed in by value, not by reference. But p contains references like
// to annotation map, that do not get copied on function invocation. Ensure to
// prevent side effects before editing p by creating a deep copy. For more
// details see https://github.com/coreos/prometheus-operator/issues/1659.
p = *p.DeepCopy()
// TODO(fabxc): is this the right point to inject defaults?
// Ideally we would do it before storing but that's currently not possible.
// Potentially an update handler on first insertion.
if p.Spec.BaseImage == "" {
p.Spec.BaseImage = config.PrometheusDefaultBaseImage
}
if p.Spec.Version == "" {
p.Spec.Version = DefaultPrometheusVersion
}
if p.Spec.Thanos != nil && p.Spec.Thanos.Version == nil {
v := DefaultThanosVersion
p.Spec.Thanos.Version = &v
}
if p.Spec.PortName == "" {
p.Spec.PortName = defaultPortName
}
versionStr := strings.TrimLeft(p.Spec.Version, "v")
version, err := semver.Parse(versionStr)
if err != nil {
return nil, errors.Wrap(err, "parse version")
}
if p.Spec.Replicas == nil {
p.Spec.Replicas = &minReplicas
}
intZero := int32(0)
if p.Spec.Replicas != nil && *p.Spec.Replicas < 0 {
p.Spec.Replicas = &intZero
}
if p.Spec.Retention == "" {
p.Spec.Retention = defaultRetention
}
if p.Spec.Resources.Requests == nil {
p.Spec.Resources.Requests = v1.ResourceList{}
}
_, memoryRequestFound := p.Spec.Resources.Requests[v1.ResourceMemory]
memoryLimit, memoryLimitFound := p.Spec.Resources.Limits[v1.ResourceMemory]
if !memoryRequestFound && version.Major == 1 {
defaultMemoryRequest := resource.MustParse("2Gi")
compareResult := memoryLimit.Cmp(defaultMemoryRequest)
// If limit is given and smaller or equal to 2Gi, then set memory
// request to the given limit. This is necessary as if limit < request,
// then a Pod is not schedulable.
if memoryLimitFound && compareResult <= 0 {
p.Spec.Resources.Requests[v1.ResourceMemory] = memoryLimit
} else {
p.Spec.Resources.Requests[v1.ResourceMemory] = defaultMemoryRequest
}
}
spec, err := makeStatefulSetSpec(p, config, ruleConfigMapNames)
if err != nil {
return nil, errors.Wrap(err, "make StatefulSet spec")
}
boolTrue := true
statefulset := &appsv1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
Name: prefixedName(p.Name),
Labels: config.Labels.Merge(p.ObjectMeta.Labels),
Annotations: p.ObjectMeta.Annotations,
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: p.APIVersion,
BlockOwnerDeletion: &boolTrue,
Controller: &boolTrue,
Kind: p.Kind,
Name: p.Name,
UID: p.UID,
},
},
},
Spec: *spec,
}
if statefulset.ObjectMeta.Annotations == nil {
statefulset.ObjectMeta.Annotations = map[string]string{
sSetInputHashName: inputHash,
}
} else {
statefulset.ObjectMeta.Annotations[sSetInputHashName] = inputHash
}
if p.Spec.ImagePullSecrets != nil && len(p.Spec.ImagePullSecrets) > 0 {
statefulset.Spec.Template.Spec.ImagePullSecrets = p.Spec.ImagePullSecrets
}
storageSpec := p.Spec.Storage
if storageSpec == nil {
statefulset.Spec.Template.Spec.Volumes = append(statefulset.Spec.Template.Spec.Volumes, v1.Volume{
Name: volumeName(p.Name),
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
})
} else if storageSpec.EmptyDir != nil {
emptyDir := storageSpec.EmptyDir
statefulset.Spec.Template.Spec.Volumes = append(statefulset.Spec.Template.Spec.Volumes, v1.Volume{
Name: volumeName(p.Name),
VolumeSource: v1.VolumeSource{
EmptyDir: emptyDir,
},
})
} else {
pvcTemplate := storageSpec.VolumeClaimTemplate
if pvcTemplate.Name == "" {
pvcTemplate.Name = volumeName(p.Name)
}
pvcTemplate.Spec.AccessModes = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}
pvcTemplate.Spec.Resources = storageSpec.VolumeClaimTemplate.Spec.Resources
pvcTemplate.Spec.Selector = storageSpec.VolumeClaimTemplate.Spec.Selector
statefulset.Spec.VolumeClaimTemplates = append(statefulset.Spec.VolumeClaimTemplates, pvcTemplate)
}
for _, volume := range p.Spec.Volumes {
statefulset.Spec.Template.Spec.Volumes = append(statefulset.Spec.Template.Spec.Volumes, volume)
}
return statefulset, nil
}
func makeEmptyConfigurationSecret(p *monitoringv1.Prometheus, config Config) (*v1.Secret, error) {
s := makeConfigSecret(p, config)
s.ObjectMeta.Annotations = map[string]string{
"empty": "true",
}
return s, nil
}
func makeConfigSecret(p *monitoringv1.Prometheus, config Config) *v1.Secret {
boolTrue := true
return &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: configSecretName(p.Name),
Labels: config.Labels.Merge(managedByOperatorLabels),
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: p.APIVersion,
BlockOwnerDeletion: &boolTrue,
Controller: &boolTrue,
Kind: p.Kind,
Name: p.Name,
UID: p.UID,
},
},
},
Data: map[string][]byte{
configFilename: {},
},
}
}
func makeStatefulSetService(p *monitoringv1.Prometheus, config Config) *v1.Service {
p = p.DeepCopy()
if p.Spec.PortName == "" {
p.Spec.PortName = defaultPortName
}
svc := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: governingServiceName,
OwnerReferences: []metav1.OwnerReference{
metav1.OwnerReference{
Name: p.GetName(),
Kind: p.Kind,
APIVersion: p.APIVersion,
UID: p.GetUID(),
},
},
Labels: config.Labels.Merge(map[string]string{
"operated-prometheus": "true",
}),
},
Spec: v1.ServiceSpec{
ClusterIP: "None",
Ports: []v1.ServicePort{
{
Name: p.Spec.PortName,
Port: 9090,
TargetPort: intstr.FromString(p.Spec.PortName),
},
},
Selector: map[string]string{
"app": "prometheus",
},
},
}
return svc
}
func makeStatefulSetSpec(p monitoringv1.Prometheus, c *Config, ruleConfigMapNames []string) (*appsv1.StatefulSetSpec, error) {
// Prometheus may take quite long to shut down to checkpoint existing data.
// Allow up to 10 minutes for clean termination.
terminationGracePeriod := int64(600)
versionStr := strings.TrimLeft(p.Spec.Version, "v")
version, err := semver.Parse(versionStr)
if err != nil {
return nil, errors.Wrap(err, "parse version")
}
promArgs := []string{
"-web.console.templates=/etc/prometheus/consoles",
"-web.console.libraries=/etc/prometheus/console_libraries",
}
switch version.Major {
case 1:
promArgs = append(promArgs,
"-storage.local.retention="+p.Spec.Retention,
"-storage.local.num-fingerprint-mutexes=4096",
fmt.Sprintf("-storage.local.path=%s", storageDir),
"-storage.local.chunk-encoding-version=2",
fmt.Sprintf("-config.file=%s", path.Join(confOutDir, configEnvsubstFilename)),
)
// We attempt to specify decent storage tuning flags based on how much the
// requested memory can fit. The user has to specify an appropriate buffering
// in memory limits to catch increased memory usage during query bursts.
// More info: https://prometheus.io/docs/operating/storage/.
reqMem := p.Spec.Resources.Requests[v1.ResourceMemory]
if version.Minor < 6 {
// 1024 byte is the fixed chunk size. With increasing number of chunks actually
// in memory, overhead owed to their management, higher ingestion buffers, etc.
// increases.
// We are conservative for now an assume this to be 80% as the Kubernetes environment
// generally has a very high time series churn.
memChunks := reqMem.Value() / 1024 / 5
promArgs = append(promArgs,
"-storage.local.memory-chunks="+fmt.Sprintf("%d", memChunks),
"-storage.local.max-chunks-to-persist="+fmt.Sprintf("%d", memChunks/2),
)
} else {
// Leave 1/3 head room for other overhead.
promArgs = append(promArgs,
"-storage.local.target-heap-size="+fmt.Sprintf("%d", reqMem.Value()/3*2),
)
}
case 2:
retentionTimeFlag := "-storage.tsdb.retention="
if version.Minor >= 7 {
retentionTimeFlag = "-storage.tsdb.retention.time="
if p.Spec.RetentionSize != "" {
promArgs = append(promArgs,
fmt.Sprintf("-storage.tsdb.retention.size=%s", p.Spec.RetentionSize),
)
}
}
promArgs = append(promArgs,
fmt.Sprintf("-config.file=%s", path.Join(confOutDir, configEnvsubstFilename)),
fmt.Sprintf("-storage.tsdb.path=%s", storageDir),
retentionTimeFlag+p.Spec.Retention,
"-web.enable-lifecycle",
"-storage.tsdb.no-lockfile",
)
if p.Spec.Query != nil && p.Spec.Query.LookbackDelta != nil {
promArgs = append(promArgs,
fmt.Sprintf("-query.lookback-delta=%s", *p.Spec.Query.LookbackDelta),
)
}
if version.Minor >= 4 {
if p.Spec.Rules.Alert.ForOutageTolerance != "" {
promArgs = append(promArgs, "-rules.alert.for-outage-tolerance="+p.Spec.Rules.Alert.ForOutageTolerance)
}
if p.Spec.Rules.Alert.ForGracePeriod != "" {
promArgs = append(promArgs, "-rules.alert.for-grace-period="+p.Spec.Rules.Alert.ForGracePeriod)
}
if p.Spec.Rules.Alert.ResendDelay != "" {
promArgs = append(promArgs, "-rules.alert.resend-delay="+p.Spec.Rules.Alert.ResendDelay)
}
}
if version.Minor >= 5 {
if p.Spec.Query != nil && p.Spec.Query.MaxSamples != nil {
promArgs = append(promArgs,
fmt.Sprintf("-query.max-samples=%d", *p.Spec.Query.MaxSamples),
)
}
}
default:
return nil, errors.Errorf("unsupported Prometheus major version %s", version)
}
if p.Spec.Query != nil {
if p.Spec.Query.MaxConcurrency != nil {
if *p.Spec.Query.MaxConcurrency < 1 {
p.Spec.Query.MaxConcurrency = &defaultMaxConcurrency
}
promArgs = append(promArgs,
fmt.Sprintf("-query.max-concurrency=%d", *p.Spec.Query.MaxConcurrency),
)
}
if p.Spec.Query.Timeout != nil {
promArgs = append(promArgs,
fmt.Sprintf("-query.timeout=%s", *p.Spec.Query.Timeout),
)
}
}
var securityContext *v1.PodSecurityContext = nil
if p.Spec.SecurityContext != nil {
securityContext = p.Spec.SecurityContext
}
if p.Spec.EnableAdminAPI {
promArgs = append(promArgs, "-web.enable-admin-api")
}
if p.Spec.ExternalURL != "" {
promArgs = append(promArgs, "-web.external-url="+p.Spec.ExternalURL)
}
webRoutePrefix := "/"
if p.Spec.RoutePrefix != "" {
webRoutePrefix = p.Spec.RoutePrefix
}
promArgs = append(promArgs, "-web.route-prefix="+webRoutePrefix)
if p.Spec.LogLevel != "" && p.Spec.LogLevel != "info" {
promArgs = append(promArgs, fmt.Sprintf("-log.level=%s", p.Spec.LogLevel))
}
if version.GTE(semver.MustParse("2.6.0")) {
if p.Spec.LogFormat != "" && p.Spec.LogFormat != "logfmt" {
promArgs = append(promArgs, fmt.Sprintf("-log.format=%s", p.Spec.LogFormat))
}
}
if version.GTE(semver.MustParse("2.11.0")) && p.Spec.WALCompression != nil {
if *p.Spec.WALCompression {
promArgs = append(promArgs, "-storage.tsdb.wal-compression")
} else {
promArgs = append(promArgs, "-no-storage.tsdb.wal-compression")
}
}
var ports []v1.ContainerPort
if p.Spec.ListenLocal {
promArgs = append(promArgs, "-web.listen-address=127.0.0.1:9090")
} else {
ports = []v1.ContainerPort{
{
Name: p.Spec.PortName,
ContainerPort: 9090,
Protocol: v1.ProtocolTCP,
},
}
}
if version.Major == 2 {
for i, a := range promArgs {
promArgs[i] = "-" + a
}
}
localReloadURL := &url.URL{
Scheme: "http",
Host: c.LocalHost + ":9090",
Path: path.Clean(webRoutePrefix + "/-/reload"),
}
volumes := []v1.Volume{
{
Name: "config",
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: configSecretName(p.Name),
},
},
},
{
Name: "config-out",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
}
for _, name := range ruleConfigMapNames {
volumes = append(volumes, v1.Volume{
Name: name,
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
},
},
})
}
volName := volumeName(p.Name)
if p.Spec.Storage != nil {
if p.Spec.Storage.VolumeClaimTemplate.Name != "" {
volName = p.Spec.Storage.VolumeClaimTemplate.Name
}
}
promVolumeMounts := []v1.VolumeMount{
{
Name: "config-out",
ReadOnly: true,
MountPath: confOutDir,
},
{
Name: volName,
MountPath: storageDir,
SubPath: subPathForStorage(p.Spec.Storage),
},
}
for _, name := range ruleConfigMapNames {
promVolumeMounts = append(promVolumeMounts, v1.VolumeMount{
Name: name,
MountPath: rulesDir + "/" + name,
})
}
for _, s := range p.Spec.Secrets {
volumes = append(volumes, v1.Volume{
Name: k8sutil.SanitizeVolumeName("secret-" + s),
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: s,
},
},
})
promVolumeMounts = append(promVolumeMounts, v1.VolumeMount{
Name: k8sutil.SanitizeVolumeName("secret-" + s),
ReadOnly: true,
MountPath: secretsDir + s,
})
}
for _, c := range p.Spec.ConfigMaps {
volumes = append(volumes, v1.Volume{
Name: k8sutil.SanitizeVolumeName("configmap-" + c),
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: v1.LocalObjectReference{
Name: c,
},
},
},
})
promVolumeMounts = append(promVolumeMounts, v1.VolumeMount{
Name: k8sutil.SanitizeVolumeName("configmap-" + c),
ReadOnly: true,
MountPath: configmapsDir + c,
})
}
configReloadVolumeMounts := []v1.VolumeMount{
{
Name: "config",
MountPath: confDir,
},
{
Name: "config-out",
MountPath: confOutDir,
},
}
configReloadArgs := []string{
fmt.Sprintf("--log-format=%s", c.LogFormat),
fmt.Sprintf("--reload-url=%s", localReloadURL),
fmt.Sprintf("--config-file=%s", path.Join(confDir, configFilename)),
fmt.Sprintf("--config-envsubst-file=%s", path.Join(confOutDir, configEnvsubstFilename)),
}
var livenessProbeHandler v1.Handler
var readinessProbeHandler v1.Handler
var livenessFailureThreshold int32
if (version.Major == 1 && version.Minor >= 8) || version.Major == 2 {
livenessProbeHandler = v1.Handler{
HTTPGet: &v1.HTTPGetAction{
Path: path.Clean(webRoutePrefix + "/-/healthy"),
Port: intstr.FromString(p.Spec.PortName),
},
}
readinessProbeHandler = v1.Handler{
HTTPGet: &v1.HTTPGetAction{
Path: path.Clean(webRoutePrefix + "/-/ready"),
Port: intstr.FromString(p.Spec.PortName),
},
}
livenessFailureThreshold = 6
} else {
livenessProbeHandler = v1.Handler{
HTTPGet: &v1.HTTPGetAction{
Path: path.Clean(webRoutePrefix + "/status"),
Port: intstr.FromString(p.Spec.PortName),
},
}
readinessProbeHandler = livenessProbeHandler
// For larger servers, restoring a checkpoint on startup may take quite a bit of time.
// Wait up to 5 minutes (60 fails * 5s per fail)
livenessFailureThreshold = 60
}
var livenessProbe *v1.Probe
var readinessProbe *v1.Probe
if !p.Spec.ListenLocal {
livenessProbe = &v1.Probe{
Handler: livenessProbeHandler,
PeriodSeconds: 5,
TimeoutSeconds: probeTimeoutSeconds,
FailureThreshold: livenessFailureThreshold,
}
readinessProbe = &v1.Probe{
Handler: readinessProbeHandler,
TimeoutSeconds: probeTimeoutSeconds,
PeriodSeconds: 5,
FailureThreshold: 120, // Allow up to 10m on startup for data recovery
}
}
podAnnotations := map[string]string{}
podLabels := map[string]string{}
if p.Spec.PodMetadata != nil {
if p.Spec.PodMetadata.Labels != nil {
for k, v := range p.Spec.PodMetadata.Labels {
podLabels[k] = v
}
}
if p.Spec.PodMetadata.Annotations != nil {
for k, v := range p.Spec.PodMetadata.Annotations {
podAnnotations[k] = v
}
}
}
podLabels["app"] = "prometheus"
podLabels["prometheus"] = p.Name
finalLabels := c.Labels.Merge(podLabels)
var additionalContainers []v1.Container
if len(ruleConfigMapNames) != 0 {
container := v1.Container{
Name: "rules-configmap-reloader",
Image: c.ConfigReloaderImage,
Args: []string{
fmt.Sprintf("--webhook-url=%s", localReloadURL),
},
VolumeMounts: []v1.VolumeMount{},
Resources: v1.ResourceRequirements{Limits: v1.ResourceList{}},
}
if c.ConfigReloaderCPU != "0" {
container.Resources.Limits[v1.ResourceCPU] = resource.MustParse(c.ConfigReloaderCPU)
}
if c.ConfigReloaderMemory != "0" {
container.Resources.Limits[v1.ResourceMemory] = resource.MustParse(c.ConfigReloaderMemory)
}
for _, name := range ruleConfigMapNames {
mountPath := rulesDir + "/" + name
container.VolumeMounts = append(container.VolumeMounts, v1.VolumeMount{
Name: name,
MountPath: mountPath,
})
container.Args = append(container.Args, fmt.Sprintf("--volume-dir=%s", mountPath))
}
additionalContainers = append(additionalContainers, container)
}
if p.Spec.Thanos != nil {
// Version is used by default.
// If the tag is specified, we use the tag to identify the container image.
// If the sha is specified, we use the sha to identify the container image,
// as it has even stronger immutable guarantees to identify the image.
thanosBaseImage := c.ThanosDefaultBaseImage
if p.Spec.Thanos.BaseImage != nil {
thanosBaseImage = *p.Spec.Thanos.BaseImage
}
thanosImage := fmt.Sprintf("%s:%s", thanosBaseImage, *p.Spec.Thanos.Version)
if p.Spec.Thanos.Tag != nil {
thanosImage = fmt.Sprintf("%s:%s", thanosBaseImage, *p.Spec.Thanos.Tag)
}
if p.Spec.Thanos.SHA != nil {
thanosImage = fmt.Sprintf("%s@sha256:%s", thanosBaseImage, *p.Spec.Thanos.SHA)
}
if p.Spec.Thanos.Image != nil && *p.Spec.Thanos.Image != "" {
thanosImage = *p.Spec.Thanos.Image
}
container := v1.Container{
Name: "thanos-sidecar",
Image: thanosImage,
Args: []string{
"sidecar",
fmt.Sprintf("--prometheus.url=http://%s:9090%s", c.LocalHost, path.Clean(webRoutePrefix)),
fmt.Sprintf("--tsdb.path=%s", storageDir),
"--grpc-address=[$(POD_IP)]:10901",
"--http-address=[$(POD_IP)]:10902",
},
Env: []v1.EnvVar{
{
Name: "POD_IP",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "status.podIP",
},
},
},
},
Ports: []v1.ContainerPort{
{
Name: "http",
ContainerPort: 10902,
},
{
Name: "grpc",
ContainerPort: 10901,
},
},
VolumeMounts: []v1.VolumeMount{
{
Name: volName,
MountPath: storageDir,
SubPath: subPathForStorage(p.Spec.Storage),
},
},
Resources: p.Spec.Thanos.Resources,
}
if p.Spec.Thanos.ObjectStorageConfig != nil {
container.Args = append(container.Args, "--objstore.config=$(OBJSTORE_CONFIG)")
container.Env = append(container.Env, v1.EnvVar{
Name: "OBJSTORE_CONFIG",
ValueFrom: &v1.EnvVarSource{
SecretKeyRef: p.Spec.Thanos.ObjectStorageConfig,
},
})
}
if p.Spec.LogLevel != "" {
container.Args = append(container.Args, fmt.Sprintf("--log.level=%s", p.Spec.LogLevel))
}
if p.Spec.LogFormat != "" {
container.Args = append(container.Args, fmt.Sprintf("--log.format=%s", p.Spec.LogFormat))
}
additionalContainers = append(additionalContainers, container)
promArgs = append(promArgs, "--storage.tsdb.min-block-duration=2h", "--storage.tsdb.max-block-duration=2h")
}
// Version is used by default.
// If the tag is specified, we use the tag to identify the container image.
// If the sha is specified, we use the sha to identify the container image,
// as it has even stronger immutable guarantees to identify the image.
prometheusImage := fmt.Sprintf("%s:%s", p.Spec.BaseImage, p.Spec.Version)
if p.Spec.Tag != "" {
prometheusImage = fmt.Sprintf("%s:%s", p.Spec.BaseImage, p.Spec.Tag)
}
if p.Spec.SHA != "" {
prometheusImage = fmt.Sprintf("%s@sha256:%s", p.Spec.BaseImage, p.Spec.SHA)
}
if p.Spec.Image != nil && *p.Spec.Image != "" {
prometheusImage = *p.Spec.Image
}
prometheusConfigReloaderResources := v1.ResourceRequirements{Limits: v1.ResourceList{}}
if c.ConfigReloaderCPU != "0" {
prometheusConfigReloaderResources.Limits[v1.ResourceCPU] = resource.MustParse(c.ConfigReloaderCPU)
}
if c.ConfigReloaderMemory != "0" {
prometheusConfigReloaderResources.Limits[v1.ResourceMemory] = resource.MustParse(c.ConfigReloaderMemory)
}
operatorContainers := append([]v1.Container{
{
Name: "prometheus",
Image: prometheusImage,
Ports: ports,
Args: promArgs,
VolumeMounts: promVolumeMounts,
LivenessProbe: livenessProbe,
ReadinessProbe: readinessProbe,
Resources: p.Spec.Resources,
}, {
Name: "prometheus-config-reloader",
Image: c.PrometheusConfigReloaderImage,
Env: []v1.EnvVar{
{
Name: "POD_NAME",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{FieldPath: "metadata.name"},
},
},
},
Command: []string{"/bin/prometheus-config-reloader"},
Args: configReloadArgs,
VolumeMounts: configReloadVolumeMounts,
Resources: prometheusConfigReloaderResources,
},
}, additionalContainers...)
containers, err := k8sutil.MergePatchContainers(operatorContainers, p.Spec.Containers)
if err != nil {
return nil, errors.Wrap(err, "failed to merge containers spec")
}
// PodManagementPolicy is set to Parallel to mitigate issues in kuberentes: https://github.com/kubernetes/kubernetes/issues/60164
// This is also mentioned as one of limitations of StatefulSets: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#limitations
return &appsv1.StatefulSetSpec{
ServiceName: governingServiceName,
Replicas: p.Spec.Replicas,
PodManagementPolicy: appsv1.ParallelPodManagement,
UpdateStrategy: appsv1.StatefulSetUpdateStrategy{
Type: appsv1.RollingUpdateStatefulSetStrategyType,
},
Selector: &metav1.LabelSelector{
MatchLabels: finalLabels,
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: finalLabels,
Annotations: podAnnotations,
},
Spec: v1.PodSpec{
Containers: containers,
InitContainers: p.Spec.InitContainers,
SecurityContext: securityContext,
ServiceAccountName: p.Spec.ServiceAccountName,
NodeSelector: p.Spec.NodeSelector,
PriorityClassName: p.Spec.PriorityClassName,
TerminationGracePeriodSeconds: &terminationGracePeriod,
Volumes: volumes,
Tolerations: p.Spec.Tolerations,
Affinity: p.Spec.Affinity,
},
},
}, nil
}
func configSecretName(name string) string {
return prefixedName(name)
}
func volumeName(name string) string {
return fmt.Sprintf("%s-db", prefixedName(name))
}
func prefixedName(name string) string {
return fmt.Sprintf("prometheus-%s", name)
}
func subPathForStorage(s *monitoringv1.StorageSpec) string {
if s == nil {
return ""
}
return "prometheus-db"
}
| 1 | 13,114 | I guess we aim for hardcoded port for Prometheus operated ports right? | prometheus-operator-prometheus-operator | go |
@@ -602,8 +602,10 @@ class FormatChecker(BaseTokenChecker):
isinstance(node.parent, nodes.TryFinally) and node in node.parent.finalbody
):
prev_line = node.parent.body[0].tolineno + 1
+ elif isinstance(node.parent, nodes.Module):
+ prev_line = 0
else:
- prev_line = node.parent.statement().fromlineno
+ prev_line = node.parent.statement(future=True).fromlineno
line = node.fromlineno
assert line, node
if prev_line == line and self._visited_lines.get(line) != 2: | 1 | # Copyright (c) 2006-2014 LOGILAB S.A. (Paris, FRANCE) <[email protected]>
# Copyright (c) 2012-2015 Google, Inc.
# Copyright (c) 2013 moxian <[email protected]>
# Copyright (c) 2014-2020 Claudiu Popa <[email protected]>
# Copyright (c) 2014 frost-nzcr4 <[email protected]>
# Copyright (c) 2014 Brett Cannon <[email protected]>
# Copyright (c) 2014 Michal Nowikowski <[email protected]>
# Copyright (c) 2014 Arun Persaud <[email protected]>
# Copyright (c) 2015 Mike Frysinger <[email protected]>
# Copyright (c) 2015 Fabio Natali <[email protected]>
# Copyright (c) 2015 Harut <[email protected]>
# Copyright (c) 2015 Mihai Balint <[email protected]>
# Copyright (c) 2015 Pavel Roskin <[email protected]>
# Copyright (c) 2015 Ionel Cristian Maries <[email protected]>
# Copyright (c) 2016 Petr Pulc <[email protected]>
# Copyright (c) 2016 Moises Lopez <[email protected]>
# Copyright (c) 2016 Ashley Whetter <[email protected]>
# Copyright (c) 2017, 2019-2020 hippo91 <[email protected]>
# Copyright (c) 2017-2018 Bryce Guinta <[email protected]>
# Copyright (c) 2017 Krzysztof Czapla <[email protected]>
# Copyright (c) 2017 Łukasz Rogalski <[email protected]>
# Copyright (c) 2017 James M. Allen <[email protected]>
# Copyright (c) 2017 vinnyrose <[email protected]>
# Copyright (c) 2018-2021 Pierre Sassoulas <[email protected]>
# Copyright (c) 2018, 2020 Bryce Guinta <[email protected]>
# Copyright (c) 2018, 2020 Anthony Sottile <[email protected]>
# Copyright (c) 2018 Lucas Cimon <[email protected]>
# Copyright (c) 2018 Michael Hudson-Doyle <[email protected]>
# Copyright (c) 2018 Natalie Serebryakova <[email protected]>
# Copyright (c) 2018 ssolanki <[email protected]>
# Copyright (c) 2018 Marcus Näslund <[email protected]>
# Copyright (c) 2018 Mike Frysinger <[email protected]>
# Copyright (c) 2018 Fureigh <[email protected]>
# Copyright (c) 2018 Andreas Freimuth <[email protected]>
# Copyright (c) 2018 Jakub Wilk <[email protected]>
# Copyright (c) 2019 Nick Drozd <[email protected]>
# Copyright (c) 2019 Hugo van Kemenade <[email protected]>
# Copyright (c) 2020 Raphael Gaschignard <[email protected]>
# Copyright (c) 2021 Daniël van Noord <[email protected]>
# Copyright (c) 2021 Tushar Sadhwani <[email protected]>
# Copyright (c) 2021 bot <[email protected]>
# Copyright (c) 2021 Ville Skyttä <[email protected]>
# Copyright (c) 2021 Marc Mueller <[email protected]>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/main/LICENSE
"""Python code format's checker.
By default try to follow Guido's style guide :
https://www.python.org/doc/essays/styleguide/
Some parts of the process_token method is based from The Tab Nanny std module.
"""
import tokenize
from functools import reduce
from typing import List
from astroid import nodes
from pylint.checkers import BaseTokenChecker
from pylint.checkers.utils import (
check_messages,
is_overload_stub,
is_protocol_class,
node_frame_class,
)
from pylint.constants import WarningScope
from pylint.interfaces import IAstroidChecker, IRawChecker, ITokenChecker
from pylint.utils.pragma_parser import OPTION_PO, PragmaParserError, parse_pragma
_ASYNC_TOKEN = "async"
_KEYWORD_TOKENS = [
"assert",
"del",
"elif",
"except",
"for",
"if",
"in",
"not",
"raise",
"return",
"while",
"yield",
"with",
]
_SPACED_OPERATORS = [
"==",
"<",
">",
"!=",
"<>",
"<=",
">=",
"+=",
"-=",
"*=",
"**=",
"/=",
"//=",
"&=",
"|=",
"^=",
"%=",
">>=",
"<<=",
]
_OPENING_BRACKETS = ["(", "[", "{"]
_CLOSING_BRACKETS = [")", "]", "}"]
_TAB_LENGTH = 8
_EOL = frozenset([tokenize.NEWLINE, tokenize.NL, tokenize.COMMENT])
_JUNK_TOKENS = (tokenize.COMMENT, tokenize.NL)
# Whitespace checking policy constants
_MUST = 0
_MUST_NOT = 1
_IGNORE = 2
MSGS = {
"C0301": (
"Line too long (%s/%s)",
"line-too-long",
"Used when a line is longer than a given number of characters.",
),
"C0302": (
"Too many lines in module (%s/%s)", # was W0302
"too-many-lines",
"Used when a module has too many lines, reducing its readability.",
),
"C0303": (
"Trailing whitespace",
"trailing-whitespace",
"Used when there is whitespace between the end of a line and the newline.",
),
"C0304": (
"Final newline missing",
"missing-final-newline",
"Used when the last line in a file is missing a newline.",
),
"C0305": (
"Trailing newlines",
"trailing-newlines",
"Used when there are trailing blank lines in a file.",
),
"W0311": (
"Bad indentation. Found %s %s, expected %s",
"bad-indentation",
"Used when an unexpected number of indentation's tabulations or "
"spaces has been found.",
),
"W0301": (
"Unnecessary semicolon", # was W0106
"unnecessary-semicolon",
'Used when a statement is ended by a semi-colon (";"), which '
"isn't necessary (that's python, not C ;).",
),
"C0321": (
"More than one statement on a single line",
"multiple-statements",
"Used when more than on statement are found on the same line.",
{"scope": WarningScope.NODE},
),
"C0325": (
"Unnecessary parens after %r keyword",
"superfluous-parens",
"Used when a single item in parentheses follows an if, for, or "
"other keyword.",
),
"C0327": (
"Mixed line endings LF and CRLF",
"mixed-line-endings",
"Used when there are mixed (LF and CRLF) newline signs in a file.",
),
"C0328": (
"Unexpected line ending format. There is '%s' while it should be '%s'.",
"unexpected-line-ending-format",
"Used when there is different newline than expected.",
),
}
def _last_token_on_line_is(tokens, line_end, token):
return (
line_end > 0
and tokens.token(line_end - 1) == token
or line_end > 1
and tokens.token(line_end - 2) == token
and tokens.type(line_end - 1) == tokenize.COMMENT
)
# The contexts for hanging indents.
# A hanging indented dictionary value after :
HANGING_DICT_VALUE = "dict-value"
# Hanging indentation in an expression.
HANGING = "hanging"
# Hanging indentation in a block header.
HANGING_BLOCK = "hanging-block"
# Continued indentation inside an expression.
CONTINUED = "continued"
# Continued indentation in a block header.
CONTINUED_BLOCK = "continued-block"
SINGLE_LINE = "single"
WITH_BODY = "multi"
class TokenWrapper:
"""A wrapper for readable access to token information."""
def __init__(self, tokens):
self._tokens = tokens
def token(self, idx):
return self._tokens[idx][1]
def type(self, idx):
return self._tokens[idx][0]
def start_line(self, idx):
return self._tokens[idx][2][0]
def start_col(self, idx):
return self._tokens[idx][2][1]
def line(self, idx):
return self._tokens[idx][4]
class FormatChecker(BaseTokenChecker):
"""checks for :
* unauthorized constructions
* strict indentation
* line length
"""
__implements__ = (ITokenChecker, IAstroidChecker, IRawChecker)
# configuration section name
name = "format"
# messages
msgs = MSGS
# configuration options
# for available dict keys/values see the optik parser 'add_option' method
options = (
(
"max-line-length",
{
"default": 100,
"type": "int",
"metavar": "<int>",
"help": "Maximum number of characters on a single line.",
},
),
(
"ignore-long-lines",
{
"type": "regexp",
"metavar": "<regexp>",
"default": r"^\s*(# )?<?https?://\S+>?$",
"help": (
"Regexp for a line that is allowed to be longer than the limit."
),
},
),
(
"single-line-if-stmt",
{
"default": False,
"type": "yn",
"metavar": "<y or n>",
"help": (
"Allow the body of an if to be on the same "
"line as the test if there is no else."
),
},
),
(
"single-line-class-stmt",
{
"default": False,
"type": "yn",
"metavar": "<y or n>",
"help": (
"Allow the body of a class to be on the same "
"line as the declaration if body contains "
"single statement."
),
},
),
(
"max-module-lines",
{
"default": 1000,
"type": "int",
"metavar": "<int>",
"help": "Maximum number of lines in a module.",
},
),
(
"indent-string",
{
"default": " ",
"type": "non_empty_string",
"metavar": "<string>",
"help": "String used as indentation unit. This is usually "
'" " (4 spaces) or "\\t" (1 tab).',
},
),
(
"indent-after-paren",
{
"type": "int",
"metavar": "<int>",
"default": 4,
"help": "Number of spaces of indent required inside a hanging "
"or continued line.",
},
),
(
"expected-line-ending-format",
{
"type": "choice",
"metavar": "<empty or LF or CRLF>",
"default": "",
"choices": ["", "LF", "CRLF"],
"help": (
"Expected format of line ending, "
"e.g. empty (any line ending), LF or CRLF."
),
},
),
)
def __init__(self, linter=None):
super().__init__(linter)
self._lines = None
self._visited_lines = None
self._bracket_stack = [None]
def new_line(self, tokens, line_end, line_start):
"""a new line has been encountered, process it if necessary"""
if _last_token_on_line_is(tokens, line_end, ";"):
self.add_message("unnecessary-semicolon", line=tokens.start_line(line_end))
line_num = tokens.start_line(line_start)
line = tokens.line(line_start)
if tokens.type(line_start) not in _JUNK_TOKENS:
self._lines[line_num] = line.split("\n")[0]
self.check_lines(line, line_num)
def process_module(self, _node: nodes.Module) -> None:
pass
def _check_keyword_parentheses(
self, tokens: List[tokenize.TokenInfo], start: int
) -> None:
"""Check that there are not unnecessary parentheses after a keyword.
Parens are unnecessary if there is exactly one balanced outer pair on a
line, and it is followed by a colon, and contains no commas (i.e. is not a
tuple).
Args:
tokens: list of Tokens; the entire list of Tokens.
start: int; the position of the keyword in the token list.
"""
# If the next token is not a paren, we're fine.
if self._bracket_stack[-1] == ":" and tokens[start].string == "for":
self._bracket_stack.pop()
if tokens[start + 1].string != "(":
return
found_and_or = False
contains_walrus_operator = False
walrus_operator_depth = 0
contains_double_parens = 0
depth = 0
keyword_token = str(tokens[start].string)
line_num = tokens[start].start[0]
for i in range(start, len(tokens) - 1):
token = tokens[i]
# If we hit a newline, then assume any parens were for continuation.
if token.type == tokenize.NL:
return
# Since the walrus operator doesn't exist below python3.8, the tokenizer
# generates independent tokens
if (
token.string == ":=" # <-- python3.8+ path
or token.string + tokens[i + 1].string == ":="
):
contains_walrus_operator = True
walrus_operator_depth = depth
if token.string == "(":
depth += 1
if tokens[i + 1].string == "(":
contains_double_parens = 1
elif token.string == ")":
depth -= 1
if depth:
if contains_double_parens and tokens[i + 1].string == ")":
# For walrus operators in `if (not)` conditions and comprehensions
if keyword_token in {"in", "if", "not"}:
continue
return
contains_double_parens -= 1
continue
# ')' can't happen after if (foo), since it would be a syntax error.
if tokens[i + 1].string in {":", ")", "]", "}", "in"} or tokens[
i + 1
].type in {tokenize.NEWLINE, tokenize.ENDMARKER, tokenize.COMMENT}:
if contains_walrus_operator and walrus_operator_depth - 1 == depth:
return
# The empty tuple () is always accepted.
if i == start + 2:
return
if keyword_token == "not":
if not found_and_or:
self.add_message(
"superfluous-parens", line=line_num, args=keyword_token
)
elif keyword_token in {"return", "yield"}:
self.add_message(
"superfluous-parens", line=line_num, args=keyword_token
)
elif not found_and_or and keyword_token != "in":
self.add_message(
"superfluous-parens", line=line_num, args=keyword_token
)
return
elif depth == 1:
# This is a tuple, which is always acceptable.
if token[1] == ",":
return
# 'and' and 'or' are the only boolean operators with lower precedence
# than 'not', so parens are only required when they are found.
if token[1] in {"and", "or"}:
found_and_or = True
# A yield inside an expression must always be in parentheses,
# quit early without error.
elif token[1] == "yield":
return
# A generator expression always has a 'for' token in it, and
# the 'for' token is only legal inside parens when it is in a
# generator expression. The parens are necessary here, so bail
# without an error.
elif token[1] == "for":
return
# A generator expression can have an 'else' token in it.
# We check the rest of the tokens to see if any problems incure after
# the 'else'.
elif token[1] == "else":
if "(" in (i.string for i in tokens[i:]):
self._check_keyword_parentheses(tokens[i:], 0)
return
def _prepare_token_dispatcher(self):
dispatch = {}
for tokens, handler in ((_KEYWORD_TOKENS, self._check_keyword_parentheses),):
for token in tokens:
dispatch[token] = handler
return dispatch
def process_tokens(self, tokens):
"""process tokens and search for :
_ too long lines (i.e. longer than <max_chars>)
_ optionally bad construct (if given, bad_construct must be a compiled
regular expression).
"""
self._bracket_stack = [None]
indents = [0]
check_equal = False
line_num = 0
self._lines = {}
self._visited_lines = {}
token_handlers = self._prepare_token_dispatcher()
self._last_line_ending = None
last_blank_line_num = 0
for idx, (tok_type, token, start, _, line) in enumerate(tokens):
if start[0] != line_num:
line_num = start[0]
# A tokenizer oddity: if an indented line contains a multi-line
# docstring, the line member of the INDENT token does not contain
# the full line; therefore we check the next token on the line.
if tok_type == tokenize.INDENT:
self.new_line(TokenWrapper(tokens), idx - 1, idx + 1)
else:
self.new_line(TokenWrapper(tokens), idx - 1, idx)
if tok_type == tokenize.NEWLINE:
# a program statement, or ENDMARKER, will eventually follow,
# after some (possibly empty) run of tokens of the form
# (NL | COMMENT)* (INDENT | DEDENT+)?
# If an INDENT appears, setting check_equal is wrong, and will
# be undone when we see the INDENT.
check_equal = True
self._check_line_ending(token, line_num)
elif tok_type == tokenize.INDENT:
check_equal = False
self.check_indent_level(token, indents[-1] + 1, line_num)
indents.append(indents[-1] + 1)
elif tok_type == tokenize.DEDENT:
# there's nothing we need to check here! what's important is
# that when the run of DEDENTs ends, the indentation of the
# program statement (or ENDMARKER) that triggered the run is
# equal to what's left at the top of the indents stack
check_equal = True
if len(indents) > 1:
del indents[-1]
elif tok_type == tokenize.NL:
if not line.strip("\r\n"):
last_blank_line_num = line_num
elif tok_type not in (tokenize.COMMENT, tokenize.ENCODING):
# This is the first concrete token following a NEWLINE, so it
# must be the first token of the next program statement, or an
# ENDMARKER; the "line" argument exposes the leading whitespace
# for this statement; in the case of ENDMARKER, line is an empty
# string, so will properly match the empty string with which the
# "indents" stack was seeded
if check_equal:
check_equal = False
self.check_indent_level(line, indents[-1], line_num)
if tok_type == tokenize.NUMBER and token.endswith("l"):
self.add_message("lowercase-l-suffix", line=line_num)
try:
handler = token_handlers[token]
except KeyError:
pass
else:
handler(tokens, idx)
line_num -= 1 # to be ok with "wc -l"
if line_num > self.config.max_module_lines:
# Get the line where the too-many-lines (or its message id)
# was disabled or default to 1.
message_definition = self.linter.msgs_store.get_message_definitions(
"too-many-lines"
)[0]
names = (message_definition.msgid, "too-many-lines")
line = next(
filter(None, (self.linter._pragma_lineno.get(name) for name in names)),
1,
)
self.add_message(
"too-many-lines",
args=(line_num, self.config.max_module_lines),
line=line,
)
# See if there are any trailing lines. Do not complain about empty
# files like __init__.py markers.
if line_num == last_blank_line_num and line_num > 0:
self.add_message("trailing-newlines", line=line_num)
def _check_line_ending(self, line_ending, line_num):
# check if line endings are mixed
if self._last_line_ending is not None:
# line_ending == "" indicates a synthetic newline added at
# the end of a file that does not, in fact, end with a
# newline.
if line_ending and line_ending != self._last_line_ending:
self.add_message("mixed-line-endings", line=line_num)
self._last_line_ending = line_ending
# check if line ending is as expected
expected = self.config.expected_line_ending_format
if expected:
# reduce multiple \n\n\n\n to one \n
line_ending = reduce(lambda x, y: x + y if x != y else x, line_ending, "")
line_ending = "LF" if line_ending == "\n" else "CRLF"
if line_ending != expected:
self.add_message(
"unexpected-line-ending-format",
args=(line_ending, expected),
line=line_num,
)
@check_messages("multiple-statements")
def visit_default(self, node: nodes.NodeNG) -> None:
"""check the node line number and check it if not yet done"""
if not node.is_statement:
return
if not node.root().pure_python:
return
prev_sibl = node.previous_sibling()
if prev_sibl is not None:
prev_line = prev_sibl.fromlineno
# The line on which a finally: occurs in a try/finally
# is not directly represented in the AST. We infer it
# by taking the last line of the body and adding 1, which
# should be the line of finally:
elif (
isinstance(node.parent, nodes.TryFinally) and node in node.parent.finalbody
):
prev_line = node.parent.body[0].tolineno + 1
else:
prev_line = node.parent.statement().fromlineno
line = node.fromlineno
assert line, node
if prev_line == line and self._visited_lines.get(line) != 2:
self._check_multi_statement_line(node, line)
return
if line in self._visited_lines:
return
try:
tolineno = node.blockstart_tolineno
except AttributeError:
tolineno = node.tolineno
assert tolineno, node
lines = []
for line in range(line, tolineno + 1):
self._visited_lines[line] = 1
try:
lines.append(self._lines[line].rstrip())
except KeyError:
lines.append("")
def _check_multi_statement_line(self, node, line):
"""Check for lines containing multiple statements."""
# Do not warn about multiple nested context managers
# in with statements.
if isinstance(node, nodes.With):
return
# For try... except... finally..., the two nodes
# appear to be on the same line due to how the AST is built.
if isinstance(node, nodes.TryExcept) and isinstance(
node.parent, nodes.TryFinally
):
return
if (
isinstance(node.parent, nodes.If)
and not node.parent.orelse
and self.config.single_line_if_stmt
):
return
if (
isinstance(node.parent, nodes.ClassDef)
and len(node.parent.body) == 1
and self.config.single_line_class_stmt
):
return
# Function overloads that use ``Ellipsis`` are exempted.
if (
isinstance(node, nodes.Expr)
and isinstance(node.value, nodes.Const)
and node.value.value is Ellipsis
):
frame = node.frame()
if is_overload_stub(frame) or is_protocol_class(node_frame_class(frame)):
return
self.add_message("multiple-statements", node=node)
self._visited_lines[line] = 2
def check_line_ending(self, line: str, i: int) -> None:
"""
Check that the final newline is not missing and that there is no trailing whitespace.
"""
if not line.endswith("\n"):
self.add_message("missing-final-newline", line=i)
return
# exclude \f (formfeed) from the rstrip
stripped_line = line.rstrip("\t\n\r\v ")
if line[len(stripped_line) :] not in ("\n", "\r\n"):
self.add_message(
"trailing-whitespace", line=i, col_offset=len(stripped_line)
)
def check_line_length(self, line: str, i: int, checker_off: bool) -> None:
"""
Check that the line length is less than the authorized value
"""
max_chars = self.config.max_line_length
ignore_long_line = self.config.ignore_long_lines
line = line.rstrip()
if len(line) > max_chars and not ignore_long_line.search(line):
if checker_off:
self.linter.add_ignored_message("line-too-long", i)
else:
self.add_message("line-too-long", line=i, args=(len(line), max_chars))
@staticmethod
def remove_pylint_option_from_lines(options_pattern_obj) -> str:
"""
Remove the `# pylint ...` pattern from lines
"""
lines = options_pattern_obj.string
purged_lines = (
lines[: options_pattern_obj.start(1)].rstrip()
+ lines[options_pattern_obj.end(1) :]
)
return purged_lines
@staticmethod
def is_line_length_check_activated(pylint_pattern_match_object) -> bool:
"""
Return true if the line length check is activated
"""
try:
for pragma in parse_pragma(pylint_pattern_match_object.group(2)):
if pragma.action == "disable" and "line-too-long" in pragma.messages:
return False
except PragmaParserError:
# Printing useful information dealing with this error is done in the lint package
pass
return True
@staticmethod
def specific_splitlines(lines: str) -> List[str]:
"""
Split lines according to universal newlines except those in a specific sets
"""
unsplit_ends = {
"\v",
"\x0b",
"\f",
"\x0c",
"\x1c",
"\x1d",
"\x1e",
"\x85",
"\u2028",
"\u2029",
}
res = []
buffer = ""
for atomic_line in lines.splitlines(True):
if atomic_line[-1] not in unsplit_ends:
res.append(buffer + atomic_line)
buffer = ""
else:
buffer += atomic_line
return res
def check_lines(self, lines: str, lineno: int) -> None:
"""
Check lines have :
- a final newline
- no trailing whitespace
- less than a maximum number of characters
"""
# we're first going to do a rough check whether any lines in this set
# go over the line limit. If none of them do, then we don't need to
# parse out the pylint options later on and can just assume that these
# lines are clean
# we'll also handle the line ending check here to avoid double-iteration
# unless the line lengths are suspect
max_chars = self.config.max_line_length
split_lines = self.specific_splitlines(lines)
for offset, line in enumerate(split_lines):
self.check_line_ending(line, lineno + offset)
# hold onto the initial lineno for later
potential_line_length_warning = False
for offset, line in enumerate(split_lines):
# this check is purposefully simple and doesn't rstrip
# since this is running on every line you're checking it's
# advantageous to avoid doing a lot of work
if len(line) > max_chars:
potential_line_length_warning = True
break
# if there were no lines passing the max_chars config, we don't bother
# running the full line check (as we've met an even more strict condition)
if not potential_line_length_warning:
return
# Line length check may be deactivated through `pylint: disable` comment
mobj = OPTION_PO.search(lines)
checker_off = False
if mobj:
if not self.is_line_length_check_activated(mobj):
checker_off = True
# The 'pylint: disable whatever' should not be taken into account for line length count
lines = self.remove_pylint_option_from_lines(mobj)
# here we re-run specific_splitlines since we have filtered out pylint options above
for offset, line in enumerate(self.specific_splitlines(lines)):
self.check_line_length(line, lineno + offset, checker_off)
def check_indent_level(self, string, expected, line_num):
"""return the indent level of the string"""
indent = self.config.indent_string
if indent == "\\t": # \t is not interpreted in the configuration file
indent = "\t"
level = 0
unit_size = len(indent)
while string[:unit_size] == indent:
string = string[unit_size:]
level += 1
suppl = ""
while string and string[0] in " \t":
suppl += string[0]
string = string[1:]
if level != expected or suppl:
i_type = "spaces"
if indent[0] == "\t":
i_type = "tabs"
self.add_message(
"bad-indentation",
line=line_num,
args=(level * unit_size + len(suppl), i_type, expected * unit_size),
)
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(FormatChecker(linter))
| 1 | 17,810 | This is the same effect as doing `node.parent.fromlineno` but avoids the `StatementMissing` exception from calling `statement`. | PyCQA-pylint | py |
@@ -165,12 +165,15 @@ def request_candidate_sets(days, top, similar):
@cli.command(name='request_recommendations')
@click.option("--top", type=int, default=200, help="Generate given number of top artist recommendations")
@click.option("--similar", type=int, default=200, help="Generate given number of similar artist recommendations")
-def request_recommendations(top, similar):
[email protected]("--mb_id", callback=parse_list, default=[], multiple=True, help="Generate recommendations for given users" \
+ " Generate recommendation for all users by default.")
+def request_recommendations(top, similar, mb_id):
""" Send the cluster a request to generate recommendations.
"""
params = {
'recommendation_top_artist_limit': top,
'recommendation_similar_artist_limit': similar,
+ 'musicbrainz_id': mb_id
}
send_request_to_spark_cluster(_prepare_query_message('cf_recording.recommendations.recommend', params=params))
| 1 | import sys
import click
import listenbrainz.utils as utils
import os
import pika
import ujson
from flask import current_app
from listenbrainz.webserver import create_app
QUERIES_JSON_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'request_queries.json')
cli = click.Group()
class InvalidSparkRequestError(Exception):
pass
def _get_possible_queries():
""" Return the dict describing all possible queries that can
be sent to Spark. Listed in listenbrainz/spark/request_queries.json
"""
with open(QUERIES_JSON_PATH) as f:
return ujson.load(f)
def _prepare_query_message(query, params=None):
""" Prepare the JSON message that needs to be sent to the
spark cluster based on the query and the parameters the
query needs
Args:
query (str): the name of the query, should be in request_queries.json
params (dict): the parameters the query needs, should contain all the params
in the correspoding request_queries.json to be valid
Raises:
InvalidSparkRequestError if the query isn't in the list or if the parameters
don't match up
"""
if params is None:
params = {}
possible_queries = _get_possible_queries()
if query not in possible_queries:
raise InvalidSparkRequestError(query)
message = {'query': possible_queries[query]['name']}
required_params = set(possible_queries[query]['params'])
given_params = set(params.keys())
if required_params != given_params:
raise InvalidSparkRequestError
if params:
message['params'] = {}
for key, value in params.items():
message['params'][key] = value
return ujson.dumps(message)
def send_request_to_spark_cluster(message):
with create_app().app_context():
rabbitmq_connection = utils.connect_to_rabbitmq(
username=current_app.config['RABBITMQ_USERNAME'],
password=current_app.config['RABBITMQ_PASSWORD'],
host=current_app.config['RABBITMQ_HOST'],
port=current_app.config['RABBITMQ_PORT'],
virtual_host=current_app.config['RABBITMQ_VHOST'],
error_logger=current_app.logger,
)
try:
channel = rabbitmq_connection.channel()
channel.exchange_declare(exchange=current_app.config['SPARK_REQUEST_EXCHANGE'], exchange_type='fanout')
channel.basic_publish(
exchange=current_app.config['SPARK_REQUEST_EXCHANGE'],
routing_key='',
body=message,
properties=pika.BasicProperties(delivery_mode=2,),
)
except Exception:
# this is a relatively non critical part of LB for now, so just log the error and
# move ahead
current_app.logger.error('Could not send message to spark cluster: %s', ujson.dumps(message), exc_info=True)
@cli.command(name="request_user_stats")
@click.option("--type", 'type_', type=click.Choice(['entity', 'listening_activity']),
help="Type of statistics to calculate", required=True)
@click.option("--range", 'range_', type=click.Choice(['week', 'month', 'year', 'all_time']),
help="Time range of statistics to calculate", required=True)
@click.option("--entity", type=click.Choice(['artists', 'releases', 'recordings']),
help="Entity for which statistics should be calculated")
def request_user_stats(type_, range_, entity):
""" Send a user stats request to the spark cluster
"""
params = {}
if type_ == 'entity' and entity:
params['entity'] = entity
try:
send_request_to_spark_cluster(_prepare_query_message(
'stats.user.{type}.{range}'.format(range=range_, type=type_), params=params))
except InvalidSparkRequestError:
click.echo("Incorrect arguments provided")
@cli.command(name="request_import_full")
def request_import_new_full_dump():
""" Send the cluster a request to import a new full data dump
"""
send_request_to_spark_cluster(_prepare_query_message('import.dump.full'))
@cli.command(name="request_dataframes")
@click.option("--days", type=int, default=180, help="Request model to be trained on data of given number of days")
def request_dataframes(days):
""" Send the cluster a request to create dataframes.
"""
params = {
'train_model_window': days,
}
send_request_to_spark_cluster(_prepare_query_message('cf_recording.recommendations.create_dataframes', params=params))
def parse_list(ctx, args):
return list(args)
@cli.command(name='request_model')
@click.option("--rank", callback=parse_list, default=[5, 10], type=int, multiple=True, help="Number of hidden features")
@click.option("--itr", callback=parse_list, default=[5, 10], type=int, multiple=True, help="Number of iterations to run.")
@click.option("--lmbda", callback=parse_list, default=[0.1, 10.0], type=float, multiple=True, help="Controls over fitting.")
@click.option("--alpha", default=3.0, type=float, help="Baseline level of confidence weighting applied.")
def request_model(rank, itr, lmbda, alpha):
""" Send the cluster a request to train the model.
For more details refer to 'https://spark.apache.org/docs/2.1.0/mllib-collaborative-filtering.html'
"""
params = {
'ranks': rank,
'lambdas': lmbda,
'iterations': itr,
'alpha': alpha,
}
send_request_to_spark_cluster(_prepare_query_message('cf_recording.recommendations.train_model', params=params))
@cli.command(name='request_candidate_sets')
@click.option("--days", type=int, default=7, help="Request recommendations to be generated on history of given number of days")
@click.option("--top", type=int, default=20, help="Calculate given number of top artist.")
@click.option("--similar", type=int, default=20, help="Calculate given number of similar artist.")
def request_candidate_sets(days, top, similar):
""" Send the cluster a request to generate candidate sets.
"""
params = {
'recommendation_generation_window': days,
"top_artist_limit": top,
"similar_artist_limit": similar,
}
send_request_to_spark_cluster(_prepare_query_message('cf_recording.recommendations.candidate_sets', params=params))
@cli.command(name='request_recommendations')
@click.option("--top", type=int, default=200, help="Generate given number of top artist recommendations")
@click.option("--similar", type=int, default=200, help="Generate given number of similar artist recommendations")
def request_recommendations(top, similar):
""" Send the cluster a request to generate recommendations.
"""
params = {
'recommendation_top_artist_limit': top,
'recommendation_similar_artist_limit': similar,
}
send_request_to_spark_cluster(_prepare_query_message('cf_recording.recommendations.recommend', params=params))
@cli.command(name='request_import_mapping')
def request_import_mapping():
""" Send the spark cluster a request to import msid mbid mapping.
"""
send_request_to_spark_cluster(_prepare_query_message('import.mapping'))
@cli.command(name='request_import_artist_relation')
def request_import_artist_relation():
""" Send the spark cluster a request to import artist relation.
"""
send_request_to_spark_cluster(_prepare_query_message('import.artist_relation'))
| 1 | 16,637 | I think user-ids might be better; also note that options should use - and not _ to separate words. Also, how are more than one id specified? comma seperated? I think the usage statement should indicate this. | metabrainz-listenbrainz-server | py |
@@ -117,14 +117,7 @@ func nanosecondsToTicks(ns int64) timeUnit {
// sleepTicks should sleep for specific number of microseconds.
func sleepTicks(d timeUnit) {
- for d != 0 {
- ticks() // update timestamp
- ticks := uint32(d) // current scaling only supports 100 usec to 6553 msec
- if !timerSleep(ticks) {
- return
- }
- d -= timeUnit(ticks)
- }
+ timerSleep(uint32(d))
}
// number of ticks (microseconds) since start. | 1 | // +build stm32,stm32f103
package runtime
import (
"device/arm"
"device/stm32"
"machine"
"runtime/interrupt"
"runtime/volatile"
)
func init() {
initCLK()
initRTC()
initTIM()
machine.UART0.Configure(machine.UARTConfig{})
}
func putchar(c byte) {
machine.UART0.WriteByte(c)
}
// initCLK sets clock to 72MHz using HSE 8MHz crystal w/ PLL X 9 (8MHz x 9 = 72MHz).
func initCLK() {
stm32.FLASH.ACR.SetBits(stm32.FLASH_ACR_LATENCY_WS2) // Two wait states, per datasheet
stm32.RCC.CFGR.SetBits(stm32.RCC_CFGR_PPRE1_Div2 << stm32.RCC_CFGR_PPRE1_Pos) // prescale PCLK1 = HCLK/2
stm32.RCC.CFGR.SetBits(stm32.RCC_CFGR_PPRE2_Div1 << stm32.RCC_CFGR_PPRE2_Pos) // prescale PCLK2 = HCLK/1
stm32.RCC.CR.SetBits(stm32.RCC_CR_HSEON) // enable HSE clock
// wait for the HSEREADY flag
for !stm32.RCC.CR.HasBits(stm32.RCC_CR_HSERDY) {
}
stm32.RCC.CR.SetBits(stm32.RCC_CR_HSION) // enable HSI clock
// wait for the HSIREADY flag
for !stm32.RCC.CR.HasBits(stm32.RCC_CR_HSIRDY) {
}
stm32.RCC.CFGR.SetBits(stm32.RCC_CFGR_PLLSRC) // set PLL source to HSE
stm32.RCC.CFGR.SetBits(stm32.RCC_CFGR_PLLMUL_Mul9 << stm32.RCC_CFGR_PLLMUL_Pos) // multiply by 9
stm32.RCC.CR.SetBits(stm32.RCC_CR_PLLON) // enable the PLL
// wait for the PLLRDY flag
for !stm32.RCC.CR.HasBits(stm32.RCC_CR_PLLRDY) {
}
stm32.RCC.CFGR.SetBits(stm32.RCC_CFGR_SW_PLL) // set clock source to pll
// wait for PLL to be CLK
for !stm32.RCC.CFGR.HasBits(stm32.RCC_CFGR_SWS_PLL << stm32.RCC_CFGR_SWS_Pos) {
}
}
var (
timestamp timeUnit // microseconds since boottime
timerLastCounter uint64
)
var timerWakeup volatile.Register8
func initRTC() {
// Enable the PWR and BKP.
stm32.RCC.APB1ENR.SetBits(stm32.RCC_APB1ENR_PWREN | stm32.RCC_APB1ENR_BKPEN)
// access to backup register
stm32.PWR.CR.SetBits(stm32.PWR_CR_DBP)
// Enable LSE
stm32.RCC.BDCR.SetBits(stm32.RCC_BDCR_LSEON)
// wait until LSE is ready
for !stm32.RCC.BDCR.HasBits(stm32.RCC_BDCR_LSERDY) {
}
// Select LSE
stm32.RCC.BDCR.SetBits(stm32.RCC_BDCR_RTCSEL_LSE << stm32.RCC_BDCR_RTCSEL_Pos)
// set prescaler to "max" per datasheet
stm32.RTC.PRLH.Set(stm32.RTC_PRLH_PRLH_Msk)
stm32.RTC.PRLL.Set(stm32.RTC_PRLL_PRLL_Msk)
// set count to zero
stm32.RTC.CNTH.Set(0x0)
stm32.RTC.CNTL.Set(0x0)
// Enable RTC
stm32.RCC.BDCR.SetBits(stm32.RCC_BDCR_RTCEN)
// Clear RSF
stm32.RTC.CRL.ClearBits(stm32.RTC_CRL_RSF)
// Wait till flag is set
for !stm32.RTC.CRL.HasBits(stm32.RTC_CRL_RSF) {
}
}
// Enable the TIM3 clock.
func initTIM() {
stm32.RCC.APB1ENR.SetBits(stm32.RCC_APB1ENR_TIM3EN)
intr := interrupt.New(stm32.IRQ_TIM3, handleTIM3)
intr.SetPriority(0xc3)
intr.Enable()
}
const asyncScheduler = false
func ticksToNanoseconds(ticks timeUnit) int64 {
return int64(ticks) * 1000
}
func nanosecondsToTicks(ns int64) timeUnit {
return timeUnit(ns / 1000)
}
// sleepTicks should sleep for specific number of microseconds.
func sleepTicks(d timeUnit) {
for d != 0 {
ticks() // update timestamp
ticks := uint32(d) // current scaling only supports 100 usec to 6553 msec
if !timerSleep(ticks) {
return
}
d -= timeUnit(ticks)
}
}
// number of ticks (microseconds) since start.
func ticks() timeUnit {
// convert RTC counter from seconds to microseconds
timerCounter := uint64(stm32.RTC.CNTH.Get()<<16|stm32.RTC.CNTL.Get()) * 1000 * 1000
// add the fractional part of current time using DIV register
timerCounter += uint64(0x8000-stm32.RTC.DIVL.Get()) * 31
// change since last measurement
offset := (timerCounter - timerLastCounter)
timerLastCounter = timerCounter
timestamp += timeUnit(offset)
return timestamp
}
// ticks are in microseconds
// returns false if an interrupt occured
func timerSleep(ticks uint32) bool {
timerWakeup.Set(0)
// STM32 timer update event period is calculated as follows:
//
// Update_event = TIM_CLK/((PSC + 1)*(ARR + 1)*(RCR + 1))
//
// Where:
//
// TIM_CLK = timer clock input
// PSC = 16-bit prescaler register
// ARR = 16/32-bit Autoreload register
// RCR = 16-bit repetition counter
//
// Example:
//
// TIM_CLK = 72 MHz
// Prescaler = 1
// Auto reload = 65535
// No repetition counter RCR = 0
// Update_event = 72*(10^6)/((1 + 1)*(65535 + 1)*(1))
// Update_event = 549.3 Hz
//
// Set the timer prescaler/autoreload timing registers.
// TODO: support smaller or larger scales (autoscaling) based
// on the length of sleep time requested.
// The current scaling only supports a range of 200 usec to 6553 msec.
// prescale counter down from 72mhz to 10khz aka 0.1 ms frequency.
stm32.TIM3.PSC.Set(machine.CPUFrequency()/10000 - 1) // 7199
// Set duty aka duration.
// STM32 dividers use n-1, i.e. n counts from 0 to n-1.
// As a result, with these prescaler settings,
// the minimum allowed duration is 200 microseconds.
if ticks < 200 {
ticks = 200
}
stm32.TIM3.ARR.Set(ticks/100 - 1) // convert from microseconds to 0.1 ms
// Enable the hardware interrupt.
stm32.TIM3.DIER.SetBits(stm32.TIM_DIER_UIE)
// Enable the timer.
stm32.TIM3.CR1.SetBits(stm32.TIM_CR1_CEN)
wait:
arm.Asm("wfi")
if timerWakeup.Get() != 0 {
return true
}
if hasScheduler {
return false
} else {
// keep looping until the routine exits or is interrupted
goto wait
}
}
func handleTIM3(interrupt.Interrupt) {
if stm32.TIM3.SR.HasBits(stm32.TIM_SR_UIF) {
// Disable the timer.
stm32.TIM3.CR1.ClearBits(stm32.TIM_CR1_CEN)
// clear the update flag
stm32.TIM3.SR.ClearBits(stm32.TIM_SR_UIF)
// timer was triggered
timerWakeup.Set(1)
}
}
| 1 | 11,485 | This code is necessary. It makes sure that you can sleep for longer durations than the timer supports. | tinygo-org-tinygo | go |
@@ -96,7 +96,7 @@ public interface ExpireSnapshots extends PendingUpdate<List<Snapshot>> {
* @param executorService an executor service to parallelize tasks to delete manifests and data files
* @return this for method chaining
*/
- ExpireSnapshots executeWith(ExecutorService executorService);
+ ExpireSnapshots executeDeleteWith(ExecutorService executorService);
/**
* Allows expiration of snapshots without any cleanup of underlying manifest or data files. | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
import java.util.List;
import java.util.concurrent.ExecutorService;
import java.util.function.Consumer;
/**
* API for removing old {@link Snapshot snapshots} from a table.
* <p>
* This API accumulates snapshot deletions and commits the new list to the table. This API does not
* allow deleting the current snapshot.
* <p>
* When committing, these changes will be applied to the latest table metadata. Commit conflicts
* will be resolved by applying the changes to the new latest metadata and reattempting the commit.
* <p>
* Manifest files that are no longer used by valid snapshots will be deleted. Data files that were
* deleted by snapshots that are expired will be deleted. {@link #deleteWith(Consumer)} can be used
* to pass an alternative deletion method.
*
* {@link #apply()} returns a list of the snapshots that will be removed.
*/
public interface ExpireSnapshots extends PendingUpdate<List<Snapshot>> {
/**
* Expires a specific {@link Snapshot} identified by id.
*
* @param snapshotId long id of the snapshot to expire
* @return this for method chaining
*/
ExpireSnapshots expireSnapshotId(long snapshotId);
/**
* Expires all snapshots older than the given timestamp.
*
* @param timestampMillis a long timestamp, as returned by {@link System#currentTimeMillis()}
* @return this for method chaining
*/
ExpireSnapshots expireOlderThan(long timestampMillis);
/**
* Retains the most recent ancestors of the current snapshot.
* <p>
* If a snapshot would be expired because it is older than the expiration timestamp, but is one of
* the {@code numSnapshot} most recent ancestors of the current state, it will be retained. This
* will not cause snapshots explicitly identified by id from expiring.
* <p>
* This may keep more than {@code numSnapshot} ancestors if snapshots are added concurrently. This
* may keep less than {@code numSnapshot} ancestors if the current table state does not have that many.
*
* @param numSnapshots the number of snapshots to retain
* @return this for method chaining
*/
ExpireSnapshots retainLast(int numSnapshots);
/**
* Passes an alternative delete implementation that will be used for manifests and data files.
* <p>
* Manifest files that are no longer used by valid snapshots will be deleted. Data files that were
* deleted by snapshots that are expired will be deleted.
* <p>
* If this method is not called, unnecessary manifests and data files will still be deleted.
*
* @param deleteFunc a function that will be called to delete manifests and data files
* @return this for method chaining
*/
ExpireSnapshots deleteWith(Consumer<String> deleteFunc);
/**
* Passes an alternative executor service that will be used for manifests and data files deletion.
* <p>
* Manifest files that are no longer used by valid snapshots will be deleted. Data files that were
* deleted by snapshots that are expired will be deleted.
* <p>
* If this method is not called, unnecessary manifests and data files will still be deleted using a single threaded
* executor service.
*
* @param executorService an executor service to parallelize tasks to delete manifests and data files
* @return this for method chaining
*/
ExpireSnapshots executeWith(ExecutorService executorService);
/**
* Allows expiration of snapshots without any cleanup of underlying manifest or data files.
* <p>
* Allows control in removing data and manifest files which may be more efficiently removed using
* a distributed framework through the actions API.
*
* @param clean setting this to false will skip deleting expired manifests and files
* @return this for method chaining
*/
ExpireSnapshots cleanExpiredFiles(boolean clean);
}
| 1 | 22,960 | Can we add the new name in parallel and deprecate this name? We should be careful about changes to the public API and give people at least a release to switch over before removing public methods. | apache-iceberg | java |
@@ -32,8 +32,10 @@ bool wlr_backend_start(struct wlr_backend *backend) {
}
void wlr_backend_destroy(struct wlr_backend *backend) {
- if (backend->impl->destroy) {
+ if (backend->impl && backend->impl->destroy) {
backend->impl->destroy(backend);
+ } else {
+ free(backend);
}
}
| 1 | #include <wayland-server.h>
#include <unistd.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <assert.h>
#include <libinput.h>
#include <wlr/backend/session.h>
#include <wlr/backend/interface.h>
#include <wlr/backend/drm.h>
#include <wlr/backend/libinput.h>
#include <wlr/backend/wayland.h>
#include <wlr/backend/multi.h>
#include <wlr/util/log.h>
#include "backend/udev.h"
void wlr_backend_init(struct wlr_backend *backend,
const struct wlr_backend_impl *impl) {
assert(backend);
backend->impl = impl;
wl_signal_init(&backend->events.input_add);
wl_signal_init(&backend->events.input_remove);
wl_signal_init(&backend->events.output_add);
wl_signal_init(&backend->events.output_remove);
}
bool wlr_backend_start(struct wlr_backend *backend) {
if (backend->impl->start) {
return backend->impl->start(backend);
}
return true;
}
void wlr_backend_destroy(struct wlr_backend *backend) {
if (backend->impl->destroy) {
backend->impl->destroy(backend);
}
}
struct wlr_egl *wlr_backend_get_egl(struct wlr_backend *backend) {
if (backend->impl->get_egl) {
return backend->impl->get_egl(backend);
}
return NULL;
}
static struct wlr_backend *attempt_wl_backend(struct wl_display *display) {
struct wlr_backend *backend = wlr_wl_backend_create(display);
if (backend) {
int outputs = 1;
const char *_outputs = getenv("WLR_WL_OUTPUTS");
if (_outputs) {
char *end;
outputs = (int)strtol(_outputs, &end, 10);
if (*end) {
wlr_log(L_ERROR, "WLR_WL_OUTPUTS specified with invalid integer, ignoring");
outputs = 1;
} else if (outputs < 0) {
wlr_log(L_ERROR, "WLR_WL_OUTPUTS specified with negative outputs, ignoring");
outputs = 1;
}
}
while (outputs--) {
wlr_wl_output_create(backend);
}
}
return backend;
}
struct wlr_backend *wlr_backend_autocreate(struct wl_display *display) {
struct wlr_backend *backend;
if (getenv("WAYLAND_DISPLAY") || getenv("_WAYLAND_DISPLAY")) {
backend = attempt_wl_backend(display);
if (backend) {
return backend;
}
}
if (getenv("DISPLAY")) {
wlr_log(L_ERROR, "X11 backend is not implemented"); // TODO
return NULL;
}
// Attempt DRM+libinput
struct wlr_session *session = wlr_session_start(display);
if (!session) {
wlr_log(L_ERROR, "Failed to start a DRM session");
return NULL;
}
struct wlr_udev *udev = wlr_udev_create(display);
if (!udev) {
wlr_log(L_ERROR, "Failed to start udev");
goto error_session;
}
int gpu = wlr_udev_find_gpu(udev, session);
if (gpu == -1) {
wlr_log(L_ERROR, "Failed to open DRM device");
goto error_udev;
}
backend = wlr_multi_backend_create(session, udev);
if (!backend) {
goto error_gpu;
}
struct wlr_backend *libinput = wlr_libinput_backend_create(display, session, udev);
if (!libinput) {
goto error_multi;
}
struct wlr_backend *drm = wlr_drm_backend_create(display, session, udev, gpu);
if (!drm) {
goto error_libinput;
}
wlr_multi_backend_add(backend, libinput);
wlr_multi_backend_add(backend, drm);
return backend;
error_libinput:
wlr_backend_destroy(libinput);
error_multi:
wlr_backend_destroy(backend);
error_gpu:
wlr_session_close_file(session, gpu);
error_udev:
wlr_udev_destroy(udev);
error_session:
wlr_session_finish(session);
return NULL;
}
| 1 | 7,700 | If you're going to start testing that impl itself is not NULL you should probably be doing it everywhere. | swaywm-wlroots | c |
@@ -159,7 +159,7 @@ func ParseFromStructuredObject(s interface{}) (Manifest, error) {
}, nil
}
-func LoadPlainYAMLManifests(ctx context.Context, dir string, names []string) ([]Manifest, error) {
+func LoadPlainYAMLManifests(ctx context.Context, dir string, names []string, configFileName string) ([]Manifest, error) {
// If no name was specified we have to walk the app directory to collect the manifest list.
if len(names) == 0 {
err := filepath.Walk(dir, func(path string, f os.FileInfo, err error) error { | 1 | // Copyright 2020 The PipeCD Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kubernetes
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"sigs.k8s.io/yaml"
"github.com/pipe-cd/pipe/pkg/model"
)
type Manifest struct {
Key ResourceKey
u *unstructured.Unstructured
}
func MakeManifest(key ResourceKey, u *unstructured.Unstructured) Manifest {
return Manifest{
Key: key,
u: u,
}
}
func (m Manifest) Duplicate(name string) Manifest {
u := m.u.DeepCopy()
u.SetName(name)
key := m.Key
key.Name = name
return Manifest{
Key: key,
u: u,
}
}
func (m Manifest) YamlBytes() ([]byte, error) {
return yaml.Marshal(m.u)
}
func (m Manifest) MarshalJSON() ([]byte, error) {
return m.u.MarshalJSON()
}
func (m Manifest) AddAnnotations(annotations map[string]string) {
if len(annotations) == 0 {
return
}
annos := m.u.GetAnnotations()
if annos != nil {
for k, v := range annotations {
annos[k] = v
}
} else {
annos = annotations
}
m.u.SetAnnotations(annos)
}
func (m Manifest) GetAnnotations() map[string]string {
return m.u.GetAnnotations()
}
func (m Manifest) GetNestedStringMap(fields ...string) (map[string]string, error) {
sm, _, err := unstructured.NestedStringMap(m.u.Object, fields...)
if err != nil {
return nil, err
}
return sm, nil
}
// AddStringMapValues adds or overrides the given key-values into the string map
// that can be found at the specified fields.
func (m Manifest) AddStringMapValues(values map[string]string, fields ...string) error {
curMap, _, err := unstructured.NestedStringMap(m.u.Object, fields...)
if err != nil {
return err
}
if curMap == nil {
curMap = values
} else {
for k, v := range values {
curMap[k] = v
}
}
return unstructured.SetNestedStringMap(m.u.Object, curMap, fields...)
}
func (m Manifest) GetSpec() (interface{}, error) {
spec, ok, err := unstructured.NestedFieldNoCopy(m.u.Object, "spec")
if err != nil {
return nil, err
}
if !ok {
return nil, fmt.Errorf("spec was not found")
}
return spec, nil
}
func (m Manifest) SetStructuredSpec(spec interface{}) error {
data, err := yaml.Marshal(spec)
if err != nil {
return err
}
unstructuredSpec := make(map[string]interface{})
if err := yaml.Unmarshal(data, &unstructuredSpec); err != nil {
return err
}
return unstructured.SetNestedField(m.u.Object, unstructuredSpec, "spec")
}
func (m Manifest) ConvertToStructuredObject(o interface{}) error {
data, err := m.MarshalJSON()
if err != nil {
return err
}
return json.Unmarshal(data, o)
}
func ParseFromStructuredObject(s interface{}) (Manifest, error) {
data, err := json.Marshal(s)
if err != nil {
return Manifest{}, err
}
obj := &unstructured.Unstructured{}
if err := obj.UnmarshalJSON(data); err != nil {
return Manifest{}, err
}
return Manifest{
Key: MakeResourceKey(obj),
u: obj,
}, nil
}
func LoadPlainYAMLManifests(ctx context.Context, dir string, names []string) ([]Manifest, error) {
// If no name was specified we have to walk the app directory to collect the manifest list.
if len(names) == 0 {
err := filepath.Walk(dir, func(path string, f os.FileInfo, err error) error {
if err != nil {
return err
}
if path == dir {
return nil
}
if f.IsDir() {
return filepath.SkipDir
}
ext := filepath.Ext(f.Name())
if ext != ".yaml" && ext != ".yml" && ext != ".json" {
return nil
}
// TODO: Allow to check other than default configuration name too
if f.Name() == model.DefaultDeploymentConfigFileName {
return nil
}
names = append(names, f.Name())
return nil
})
if err != nil {
return nil, err
}
}
manifests := make([]Manifest, 0, len(names))
for _, name := range names {
path := filepath.Join(dir, name)
ms, err := LoadManifestsFromYAMLFile(path)
if err != nil {
return nil, fmt.Errorf("failed to load maninifest at %s (%w)", path, err)
}
manifests = append(manifests, ms...)
}
return manifests, nil
}
func LoadManifestsFromYAMLFile(path string) ([]Manifest, error) {
data, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
return ParseManifests(string(data))
}
func ParseManifests(data string) ([]Manifest, error) {
const separator = "\n---"
var (
parts = strings.Split(data, separator)
manifests = make([]Manifest, 0, len(parts))
)
for _, part := range parts {
// Ignore all the cases where no content between separator.
part = strings.TrimSpace(part)
if len(part) == 0 {
continue
}
var obj unstructured.Unstructured
if err := yaml.Unmarshal([]byte(part), &obj); err != nil {
return nil, err
}
manifests = append(manifests, Manifest{
Key: MakeResourceKey(&obj),
u: &obj,
})
}
return manifests, nil
}
| 1 | 8,973 | `ctx` is unused in LoadPlainYAMLManifests | pipe-cd-pipe | go |
@@ -87,7 +87,7 @@ public class BlockHeaderBuilder {
}
public static BlockHeaderBuilder fromBuilder(final BlockHeaderBuilder fromBuilder) {
- BlockHeaderBuilder toBuilder =
+ final BlockHeaderBuilder toBuilder =
create()
.parentHash(fromBuilder.parentHash)
.ommersHash(fromBuilder.ommersHash) | 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.core;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.base.Preconditions.checkState;
import java.time.Instant;
import java.util.OptionalLong;
import org.apache.tuweni.bytes.Bytes;
/** A utility class for building block headers. */
public class BlockHeaderBuilder {
private Hash parentHash;
private Hash ommersHash;
private Address coinbase;
private Hash stateRoot;
private Hash transactionsRoot;
private Hash receiptsRoot;
private LogsBloomFilter logsBloom;
private Difficulty difficulty;
private long number = -1L;
private long gasLimit = -1L;
private long gasUsed = -1L;
private long timestamp = -1L;
private Bytes extraData;
private Long baseFee = null;
private Hash mixHash;
private BlockHeaderFunctions blockHeaderFunctions;
// A nonce can be any value so we use the OptionalLong
// instead of an invalid identifier such as -1.
private OptionalLong nonce = OptionalLong.empty();
public static BlockHeaderBuilder create() {
return new BlockHeaderBuilder();
}
public static BlockHeaderBuilder fromHeader(final BlockHeader header) {
return create()
.parentHash(header.getParentHash())
.ommersHash(header.getOmmersHash())
.coinbase(header.getCoinbase())
.stateRoot(header.getStateRoot())
.transactionsRoot(header.getTransactionsRoot())
.receiptsRoot(header.getReceiptsRoot())
.logsBloom(header.getLogsBloom())
.difficulty(header.getDifficulty())
.number(header.getNumber())
.gasLimit(header.getGasLimit())
.gasUsed(header.getGasUsed())
.timestamp(header.getTimestamp())
.extraData(header.getExtraData())
.baseFee(header.getBaseFee().orElse(null))
.mixHash(header.getMixHash())
.nonce(header.getNonce());
}
public static BlockHeaderBuilder fromBuilder(final BlockHeaderBuilder fromBuilder) {
BlockHeaderBuilder toBuilder =
create()
.parentHash(fromBuilder.parentHash)
.ommersHash(fromBuilder.ommersHash)
.coinbase(fromBuilder.coinbase)
.stateRoot(fromBuilder.stateRoot)
.transactionsRoot(fromBuilder.transactionsRoot)
.receiptsRoot(fromBuilder.receiptsRoot)
.logsBloom(fromBuilder.logsBloom)
.difficulty(fromBuilder.difficulty)
.number(fromBuilder.number)
.gasLimit(fromBuilder.gasLimit)
.gasUsed(fromBuilder.gasUsed)
.timestamp(fromBuilder.timestamp)
.extraData(fromBuilder.extraData)
.mixHash(fromBuilder.mixHash)
.baseFee(fromBuilder.baseFee)
.blockHeaderFunctions(fromBuilder.blockHeaderFunctions);
toBuilder.nonce = fromBuilder.nonce;
return toBuilder;
}
public BlockHeader buildBlockHeader() {
validateBlockHeader();
return new BlockHeader(
parentHash,
ommersHash,
coinbase,
stateRoot,
transactionsRoot,
receiptsRoot,
logsBloom,
difficulty,
number,
gasLimit,
gasUsed,
timestamp < 0 ? Instant.now().getEpochSecond() : timestamp,
extraData,
baseFee,
mixHash,
nonce.getAsLong(),
blockHeaderFunctions);
}
public ProcessableBlockHeader buildProcessableBlockHeader() {
validateProcessableBlockHeader();
return new ProcessableBlockHeader(
parentHash, coinbase, difficulty, number, gasLimit, timestamp, baseFee);
}
public SealableBlockHeader buildSealableBlockHeader() {
validateSealableBlockHeader();
return new SealableBlockHeader(
parentHash,
ommersHash,
coinbase,
stateRoot,
transactionsRoot,
receiptsRoot,
logsBloom,
difficulty,
number,
gasLimit,
gasUsed,
timestamp,
extraData,
baseFee);
}
private void validateBlockHeader() {
validateSealableBlockHeader();
checkState(this.mixHash != null, "Missing mixHash");
checkState(this.nonce.isPresent(), "Missing nonce");
checkState(this.blockHeaderFunctions != null, "Missing blockHeaderFunctions");
}
private void validateProcessableBlockHeader() {
checkState(this.parentHash != null, "Missing parent hash");
checkState(this.coinbase != null, "Missing coinbase");
checkState(this.difficulty != null, "Missing block difficulty");
checkState(this.number > -1L, "Missing block number");
checkState(this.gasLimit > -1L, "Missing gas limit");
checkState(this.timestamp > -1L, "Missing timestamp");
}
private void validateSealableBlockHeader() {
validateProcessableBlockHeader();
checkState(this.ommersHash != null, "Missing ommers hash");
checkState(this.stateRoot != null, "Missing state root");
checkState(this.transactionsRoot != null, "Missing transaction root");
checkState(this.receiptsRoot != null, "Missing receipts root");
checkState(this.logsBloom != null, "Missing logs bloom filter");
checkState(this.gasUsed > -1L, "Missing gas used");
checkState(this.extraData != null, "Missing extra data field");
}
public BlockHeaderBuilder populateFrom(final ProcessableBlockHeader processableBlockHeader) {
checkNotNull(processableBlockHeader);
parentHash(processableBlockHeader.getParentHash());
coinbase(processableBlockHeader.getCoinbase());
difficulty(processableBlockHeader.getDifficulty());
number(processableBlockHeader.getNumber());
gasLimit(processableBlockHeader.getGasLimit());
timestamp(processableBlockHeader.getTimestamp());
baseFee(processableBlockHeader.getBaseFee().orElse(null));
return this;
}
public BlockHeaderBuilder populateFrom(final SealableBlockHeader sealableBlockHeader) {
checkNotNull(sealableBlockHeader);
parentHash(sealableBlockHeader.getParentHash());
ommersHash(sealableBlockHeader.getOmmersHash());
coinbase(sealableBlockHeader.getCoinbase());
stateRoot(sealableBlockHeader.getStateRoot());
transactionsRoot(sealableBlockHeader.getTransactionsRoot());
receiptsRoot(sealableBlockHeader.getReceiptsRoot());
logsBloom(sealableBlockHeader.getLogsBloom());
difficulty(sealableBlockHeader.getDifficulty());
number(sealableBlockHeader.getNumber());
gasLimit(sealableBlockHeader.getGasLimit());
gasUsed(sealableBlockHeader.getGasUsed());
timestamp(sealableBlockHeader.getTimestamp());
extraData(sealableBlockHeader.getExtraData());
baseFee(sealableBlockHeader.getBaseFee().orElse(null));
return this;
}
public BlockHeaderBuilder parentHash(final Hash hash) {
checkNotNull(hash);
this.parentHash = hash;
return this;
}
public BlockHeaderBuilder ommersHash(final Hash hash) {
checkNotNull(hash);
this.ommersHash = hash;
return this;
}
public BlockHeaderBuilder coinbase(final Address address) {
checkNotNull(address);
this.coinbase = address;
return this;
}
public BlockHeaderBuilder stateRoot(final Hash hash) {
checkNotNull(hash);
this.stateRoot = hash;
return this;
}
public BlockHeaderBuilder transactionsRoot(final Hash hash) {
checkNotNull(hash);
this.transactionsRoot = hash;
return this;
}
public BlockHeaderBuilder receiptsRoot(final Hash hash) {
checkNotNull(hash);
this.receiptsRoot = hash;
return this;
}
public BlockHeaderBuilder logsBloom(final LogsBloomFilter filter) {
checkNotNull(filter);
this.logsBloom = filter;
return this;
}
public BlockHeaderBuilder difficulty(final Difficulty difficulty) {
checkNotNull(difficulty);
this.difficulty = difficulty;
return this;
}
public BlockHeaderBuilder number(final long number) {
checkArgument(number >= 0L);
this.number = number;
return this;
}
public BlockHeaderBuilder gasLimit(final long gasLimit) {
checkArgument(gasLimit >= 0L);
this.gasLimit = gasLimit;
return this;
}
public BlockHeaderBuilder gasUsed(final long gasUsed) {
checkArgument(gasUsed > -1L);
this.gasUsed = gasUsed;
return this;
}
public BlockHeaderBuilder timestamp(final long timestamp) {
checkArgument(timestamp >= 0);
this.timestamp = timestamp;
return this;
}
public BlockHeaderBuilder extraData(final Bytes data) {
checkNotNull(data);
this.extraData = data;
return this;
}
public BlockHeaderBuilder mixHash(final Hash mixHash) {
checkNotNull(mixHash);
this.mixHash = mixHash;
return this;
}
public BlockHeaderBuilder nonce(final long nonce) {
this.nonce = OptionalLong.of(nonce);
return this;
}
public BlockHeaderBuilder blockHeaderFunctions(final BlockHeaderFunctions blockHeaderFunctions) {
this.blockHeaderFunctions = blockHeaderFunctions;
return this;
}
public BlockHeaderBuilder baseFee(final Long baseFee) {
this.baseFee = baseFee;
return this;
}
}
| 1 | 25,130 | why is this called toBuilder when the method is called fromBuilder? (I realise you did not change this...) | hyperledger-besu | java |
@@ -40,4 +40,10 @@ public class FlinkConfigOptions {
.intType()
.defaultValue(100)
.withDescription("Sets max infer parallelism for source operator.");
+
+ public static final ConfigOption<Integer> SOURCE_READER_FETCH_BATCH_SIZE = ConfigOptions
+ .key("source.iceberg.reader.fetch-batch-size")
+ .intType()
+ .defaultValue(2048)
+ .withDescription("The target batch size for split reader fetch.");
} | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.flink;
import org.apache.flink.configuration.ConfigOption;
import org.apache.flink.configuration.ConfigOptions;
public class FlinkConfigOptions {
private FlinkConfigOptions() {
}
public static final ConfigOption<Boolean> TABLE_EXEC_ICEBERG_INFER_SOURCE_PARALLELISM =
ConfigOptions.key("table.exec.iceberg.infer-source-parallelism")
.booleanType()
.defaultValue(true)
.withDescription("If is false, parallelism of source are set by config.\n" +
"If is true, source parallelism is inferred according to splits number.\n");
public static final ConfigOption<Integer> TABLE_EXEC_ICEBERG_INFER_SOURCE_PARALLELISM_MAX =
ConfigOptions.key("table.exec.iceberg.infer-source-parallelism.max")
.intType()
.defaultValue(100)
.withDescription("Sets max infer parallelism for source operator.");
}
| 1 | 34,644 | nit: seems rather large. | apache-iceberg | java |
@@ -119,7 +119,7 @@ def get_listens(user_name):
latest_listen = db_conn.fetch_listens(
user_name,
limit=1,
- to_ts=max_ts,
+ to_ts=int(time.time()),
)
latest_listen_ts = latest_listen[0].ts_since_epoch if len(latest_listen) > 0 else 0
| 1 | import ujson
from flask import Blueprint, request, jsonify, current_app
from listenbrainz.webserver.errors import APIBadRequest, APIInternalServerError, APIUnauthorized, APINotFound, APIServiceUnavailable
from listenbrainz.db.exceptions import DatabaseException
from listenbrainz.webserver.decorators import crossdomain
from listenbrainz.webserver.views.follow import parse_user_list
from listenbrainz import webserver
import listenbrainz.db.user as db_user
from listenbrainz.webserver.rate_limiter import ratelimit
import listenbrainz.webserver.redis_connection as redis_connection
from listenbrainz.webserver.views.api_tools import insert_payload, log_raise_400, validate_listen, MAX_LISTEN_SIZE, MAX_ITEMS_PER_GET,\
DEFAULT_ITEMS_PER_GET, LISTEN_TYPE_SINGLE, LISTEN_TYPE_IMPORT, LISTEN_TYPE_PLAYING_NOW
import time
api_bp = Blueprint('api_v1', __name__)
@api_bp.route("/submit-listens", methods=["POST", "OPTIONS"])
@crossdomain(headers="Authorization, Content-Type")
@ratelimit()
def submit_listen():
"""
Submit listens to the server. A user token (found on https://listenbrainz.org/profile/ ) must
be provided in the Authorization header!
Listens should be submitted for tracks when the user has listened to half the track or 4 minutes of
the track, whichever is lower. If the user hasn't listened to 4 minutes or half the track, it doesn't
fully count as a listen and should not be submitted.
For complete details on the format of the JSON to be POSTed to this endpoint, see :ref:`json-doc`.
:reqheader Authorization: Token <user token>
:statuscode 200: listen(s) accepted.
:statuscode 400: invalid JSON sent, see error message for details.
:statuscode 401: invalid authorization. See error message for details.
:resheader Content-Type: *application/json*
"""
user = _validate_auth_header()
raw_data = request.get_data()
try:
data = ujson.loads(raw_data.decode("utf-8"))
except ValueError as e:
log_raise_400("Cannot parse JSON document: %s" % e, raw_data)
try:
payload = data['payload']
if len(payload) == 0:
return "success"
if len(raw_data) > len(payload) * MAX_LISTEN_SIZE:
log_raise_400("JSON document is too large. In aggregate, listens may not "
"be larger than %d characters." % MAX_LISTEN_SIZE, payload)
if data['listen_type'] not in ('playing_now', 'single', 'import'):
log_raise_400("JSON document requires a valid listen_type key.", payload)
listen_type = _get_listen_type(data['listen_type'])
if (listen_type == LISTEN_TYPE_SINGLE or listen_type == LISTEN_TYPE_PLAYING_NOW) and len(payload) > 1:
log_raise_400("JSON document contains more than listen for a single/playing_now. "
"It should contain only one.", payload)
except KeyError:
log_raise_400("Invalid JSON document submitted.", raw_data)
# validate listens to make sure json is okay
for listen in payload:
validate_listen(listen, listen_type)
try:
insert_payload(payload, user, listen_type=_get_listen_type(data['listen_type']))
except APIServiceUnavailable as e:
raise
except Exception as e:
raise APIInternalServerError("Something went wrong. Please try again.")
return jsonify({'status': 'ok'})
@api_bp.route("/user/<user_name>/listens")
@ratelimit()
def get_listens(user_name):
"""
Get listens for user ``user_name``. The format for the JSON returned is defined in our :ref:`json-doc`.
If none of the optional arguments are given, this endpoint will return the :data:`~webserver.views.api.DEFAULT_ITEMS_PER_GET` most recent listens.
The optional ``max_ts`` and ``min_ts`` UNIX epoch timestamps control at which point in time to start returning listens. You may specify max_ts or
min_ts, but not both in one call. Listens are always returned in descending timestamp order.
:param max_ts: If you specify a ``max_ts`` timestamp, listens with listened_at less than (but not including) this value will be returned.
:param min_ts: If you specify a ``min_ts`` timestamp, listens with listened_at greater than (but not including) this value will be returned.
:param count: Optional, number of listens to return. Default: :data:`~webserver.views.api.DEFAULT_ITEMS_PER_GET` . Max: :data:`~webserver.views.api.MAX_ITEMS_PER_GET`
:statuscode 200: Yay, you have data!
:resheader Content-Type: *application/json*
"""
max_ts = _parse_int_arg("max_ts")
min_ts = _parse_int_arg("min_ts")
# if no max given, use now()
if max_ts and min_ts:
log_raise_400("You may only specify max_ts or min_ts, not both.")
# If none are given, start with now and go down
if max_ts == None and min_ts == None:
max_ts = int(time.time())
db_conn = webserver.create_influx(current_app)
listens = db_conn.fetch_listens(
user_name,
limit=min(_parse_int_arg("count", DEFAULT_ITEMS_PER_GET), MAX_ITEMS_PER_GET),
from_ts=min_ts,
to_ts=max_ts,
)
listen_data = []
for listen in listens:
listen_data.append(listen.to_api())
latest_listen = db_conn.fetch_listens(
user_name,
limit=1,
to_ts=max_ts,
)
latest_listen_ts = latest_listen[0].ts_since_epoch if len(latest_listen) > 0 else 0
if min_ts:
listen_data = listen_data[::-1]
return jsonify({'payload': {
'user_id': user_name,
'count': len(listen_data),
'listens': listen_data,
'latest_listen_ts': latest_listen_ts,
}})
@api_bp.route("/user/<user_name>/playing-now")
@ratelimit()
def get_playing_now(user_name):
"""
Get the listen being played right now for user ``user_name``.
This endpoint returns a JSON document with a single listen in the same format as the ``/user/<user_name>/listens`` endpoint,
with one key difference, there will only be one listen returned at maximum and the listen will not contain a ``listened_at`` element.
The format for the JSON returned is defined in our :ref:`json-doc`.
:statuscode 200: Yay, you have data!
:resheader Content-Type: *application/json*
"""
user = db_user.get_by_mb_id(user_name)
if user is None:
raise APINotFound("Cannot find user: %s" % user_name)
playing_now_listen = redis_connection._redis.get_playing_now(user['id'])
listen_data = []
count = 0
if playing_now_listen:
count += 1
listen_data = [{
'track_metadata': playing_now_listen.data,
}]
return jsonify({
'payload': {
'count': count,
'user_id': user_name,
'playing_now': True,
'listens': listen_data,
},
})
@api_bp.route("/users/<user_list>/recent-listens")
@crossdomain(headers='Authorization, Content-Type')
@ratelimit()
def get_recent_listens_for_user_list(user_list):
"""
Fetch the most recent listens for a comma separated list of users. Take care to properly HTTP escape
user names that contain commas!
:statuscode 200: Fetched listens successfully.
:statuscode 400: Your user list was incomplete or otherwise invalid.
:resheader Content-Type: *application/json*
"""
limit = _parse_int_arg("limit", 2)
users = parse_user_list(user_list)
if not len(users):
raise APIBadRequest("user_list is empty or invalid.")
db_conn = webserver.create_influx(current_app)
listens = db_conn.fetch_recent_listens_for_users(
users,
limit=limit
)
listen_data = []
for listen in listens:
listen_data.append(listen.to_api())
return jsonify({'payload': {
'user_list': user_list,
'count': len(listen_data),
'listens': listen_data,
}})
@api_bp.route('/latest-import', methods=['GET', 'POST', 'OPTIONS'])
@crossdomain(headers='Authorization, Content-Type')
@ratelimit()
def latest_import():
"""
Get and update the timestamp of the newest listen submitted in previous imports to ListenBrainz.
In order to get the timestamp for a user, make a GET request to this endpoint. The data returned will
be JSON of the following format:
{
'musicbrainz_id': the MusicBrainz ID of the user,
'latest_import': the timestamp of the newest listen submitted in previous imports. Defaults to 0
}
:param user_name: the MusicBrainz ID of the user whose data is needed
:statuscode 200: Yay, you have data!
:resheader Content-Type: *application/json*
In order to update the timestamp of a user, you'll have to provide a user token in the Authorization
Header. User tokens can be found on https://listenbrainz.org/profile/ .
The JSON that needs to be posted must contain a field named `ts` in the root with a valid unix timestamp.
:reqheader Authorization: Token <user token>
:statuscode 200: latest import timestamp updated
:statuscode 400: invalid JSON sent, see error message for details.
:statuscode 401: invalid authorization. See error message for details.
"""
if request.method == 'GET':
user_name = request.args.get('user_name', '')
user = db_user.get_by_mb_id(user_name)
if user is None:
raise APINotFound("Cannot find user: {user_name}".format(user_name=user_name))
return jsonify({
'musicbrainz_id': user['musicbrainz_id'],
'latest_import': 0 if not user['latest_import'] else int(user['latest_import'].strftime('%s'))
})
elif request.method == 'POST':
user = _validate_auth_header()
try:
ts = ujson.loads(request.get_data()).get('ts', 0)
except ValueError:
raise APIBadRequest('Invalid data sent')
try:
db_user.increase_latest_import(user['musicbrainz_id'], int(ts))
except DatabaseException as e:
current_app.logger.error("Error while updating latest import: {}".format(e))
raise APIInternalServerError('Could not update latest_import, try again')
return jsonify({'status': 'ok'})
@api_bp.route('/validate-token', methods=['GET'])
@ratelimit()
def validate_token():
"""
Check whether a User Token is a valid entry in the database.
In order to query this endpoint, send a GET request.
A JSON response will be returned, with one of three codes.
:statuscode 200: The user token is valid/invalid.
:statuscode 400: No token was sent to the endpoint.
"""
auth_token = request.args.get('token', '')
if not auth_token:
raise APIBadRequest("You need to provide an Authorization token.")
user = db_user.get_by_token(auth_token)
if user is None:
return jsonify({
'code': 200,
'message': 'Token invalid.'
})
else:
return jsonify({
'code': 200,
'message': 'Token valid.'
})
def _parse_int_arg(name, default=None):
value = request.args.get(name)
if value:
try:
return int(value)
except ValueError:
raise APIBadRequest("Invalid %s argument: %s" % (name, value))
else:
return default
def _validate_auth_header():
auth_token = request.headers.get('Authorization')
if not auth_token:
raise APIUnauthorized("You need to provide an Authorization header.")
try:
auth_token = auth_token.split(" ")[1]
except IndexError:
raise APIUnauthorized("Provided Authorization header is invalid.")
user = db_user.get_by_token(auth_token)
if user is None:
raise APIUnauthorized("Invalid authorization token.")
return user
def _get_listen_type(listen_type):
return {
'single': LISTEN_TYPE_SINGLE,
'import': LISTEN_TYPE_IMPORT,
'playing_now': LISTEN_TYPE_PLAYING_NOW
}.get(listen_type)
| 1 | 15,362 | I think it'd make sense to only calculate time.time() once (it's also used if max_ts and min_ts aren't set) | metabrainz-listenbrainz-server | py |
@@ -53,6 +53,8 @@ public abstract class TestCaseView {
public abstract GrpcStreamingType grpcStreamingType();
+ public abstract String grpcStubTypeName();
+
public abstract String mockGrpcStubTypeName();
public abstract String createStubFunctionName(); | 1 | /* Copyright 2016 Google Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.viewmodel.testing;
import com.google.api.codegen.config.GrpcStreamingConfig.GrpcStreamingType;
import com.google.api.codegen.viewmodel.ClientMethodType;
import com.google.api.codegen.viewmodel.InitCodeView;
import com.google.auto.value.AutoValue;
import java.util.List;
@AutoValue
public abstract class TestCaseView {
public abstract String clientMethodName();
public abstract InitCodeView initCode();
public abstract ClientMethodType clientMethodType();
public abstract MockGrpcResponseView mockResponse();
public abstract List<ClientTestAssertView> asserts();
public abstract String requestTypeName();
public abstract String responseTypeName();
public abstract List<PageStreamingResponseView> pageStreamingResponseViews();
public abstract String name();
public abstract String nameWithException();
public abstract String serviceConstructorName();
public abstract String mockServiceVarName();
public abstract boolean hasRequestParameters();
public abstract boolean hasReturnValue();
public abstract GrpcStreamingType grpcStreamingType();
public abstract String mockGrpcStubTypeName();
public abstract String createStubFunctionName();
public abstract String grpcStubCallString();
public static Builder newBuilder() {
return new AutoValue_TestCaseView.Builder();
}
@AutoValue.Builder
public abstract static class Builder {
public abstract Builder clientMethodName(String val);
public abstract Builder name(String val);
public abstract Builder nameWithException(String val);
public abstract Builder serviceConstructorName(String val);
public abstract Builder mockServiceVarName(String val);
public abstract Builder initCode(InitCodeView val);
public abstract Builder clientMethodType(ClientMethodType val);
public abstract Builder mockResponse(MockGrpcResponseView val);
public abstract Builder asserts(List<ClientTestAssertView> val);
public abstract Builder requestTypeName(String val);
public abstract Builder responseTypeName(String val);
public abstract Builder pageStreamingResponseViews(List<PageStreamingResponseView> val);
public abstract Builder hasRequestParameters(boolean val);
public abstract Builder hasReturnValue(boolean val);
public abstract Builder grpcStreamingType(GrpcStreamingType val);
public abstract Builder mockGrpcStubTypeName(String val);
public abstract Builder createStubFunctionName(String val);
public abstract Builder grpcStubCallString(String val);
public abstract TestCaseView build();
}
}
| 1 | 21,982 | Use existing `grpcStubCallString` instead | googleapis-gapic-generator | java |
@@ -0,0 +1,7 @@
+namespace Datadog.Trace
+{
+ internal static class TraceConstants
+ {
+ public const ulong MaxTraceId = 9_223_372_036_854_775_807; // 2^63-1
+ }
+} | 1 | 1 | 15,933 | In a recent PR, Bob added a `TracerConstants` class. Do you think this makes sense to put in that class instead so that we can consolidate? | DataDog-dd-trace-dotnet | .cs |
|
@@ -73,6 +73,10 @@ type Service struct {
auth BlockAuthenticator
parallelBlocks uint64
deadlineTimeout time.Duration
+ // catchpointWriting defines whether we've ran into a state where the ledger is currently busy writing the
+ // catchpoint file. If so, we want to pospone all the catchup process until the catchpoint file writing is complete,
+ // and resume from there without stopping the catchup timer.
+ catchpointWriting bool
// The channel gets closed when the initial sync is complete. This allows for other services to avoid
// the overhead of starting prematurely (before this node is caught-up and can validate messages for example). | 1 | // Copyright (C) 2019-2020 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
package catchup
import (
"context"
"fmt"
"sync"
"sync/atomic"
"time"
"github.com/algorand/go-algorand/agreement"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/ledger"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/logging/telemetryspec"
"github.com/algorand/go-algorand/network"
"github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/rpcs"
)
const catchupPeersForSync = 10
const blockQueryPeerLimit = 10
// this should be at least the number of relays
const catchupRetryLimit = 500
// PendingUnmatchedCertificate is a single certificate that is being waited upon to have its corresponding block fetched.
type PendingUnmatchedCertificate struct {
Cert agreement.Certificate
VoteVerifier *agreement.AsyncVoteVerifier
}
// Ledger represents the interface of a block database which the
// catchup server should interact with.
type Ledger interface {
agreement.LedgerReader
AddBlock(bookkeeping.Block, agreement.Certificate) error
EnsureBlock(block *bookkeeping.Block, c agreement.Certificate)
LastRound() basics.Round
Block(basics.Round) (bookkeeping.Block, error)
IsWritingCatchpointFile() bool
}
// Service represents the catchup service. Once started and until it is stopped, it ensures that the ledger is up to date with network.
type Service struct {
syncStartNS int64 // at top of struct to keep 64 bit aligned for atomic.* ops
cfg config.Local
ledger Ledger
fetcherFactory FetcherFactory
ctx context.Context
cancel func()
done chan struct{}
log logging.Logger
net network.GossipNode
auth BlockAuthenticator
parallelBlocks uint64
deadlineTimeout time.Duration
// The channel gets closed when the initial sync is complete. This allows for other services to avoid
// the overhead of starting prematurely (before this node is caught-up and can validate messages for example).
InitialSyncDone chan struct{}
initialSyncNotified uint32
protocolErrorLogged bool
lastSupportedRound basics.Round
unmatchedPendingCertificates <-chan PendingUnmatchedCertificate
latestRoundFetcherFactory FetcherFactory
}
// A BlockAuthenticator authenticates blocks given a certificate.
//
// Note that Authenticate does not check if the block contents match
// their header as it only checks the block header. If the contents
// have not been checked yet, callers should also call
// block.ContentsMatchHeader and reject blocks that do not pass this
// check.
type BlockAuthenticator interface {
Authenticate(*bookkeeping.Block, *agreement.Certificate) error
Quit()
}
// MakeService creates a catchup service instance from its constituent components
// If wsf is nil, then fetch over gossip is disabled.
func MakeService(log logging.Logger, config config.Local, net network.GossipNode, ledger Ledger, wsf *rpcs.WsFetcherService, auth BlockAuthenticator, unmatchedPendingCertificates <-chan PendingUnmatchedCertificate) (s *Service) {
s = &Service{}
s.cfg = config
s.fetcherFactory = MakeNetworkFetcherFactory(net, catchupPeersForSync, wsf, &config)
s.ledger = ledger
s.net = net
s.auth = auth
s.unmatchedPendingCertificates = unmatchedPendingCertificates
s.latestRoundFetcherFactory = MakeNetworkFetcherFactory(net, blockQueryPeerLimit, wsf, &config)
s.log = log.With("Context", "sync")
s.parallelBlocks = config.CatchupParallelBlocks
s.deadlineTimeout = agreement.DeadlineTimeout()
return s
}
// Start the catchup service
func (s *Service) Start() {
s.done = make(chan struct{})
s.ctx, s.cancel = context.WithCancel(context.Background())
s.InitialSyncDone = make(chan struct{})
go s.periodicSync()
}
// Stop informs the catchup service that it should stop, and waits for it to stop (when periodicSync() exits)
func (s *Service) Stop() {
s.cancel()
<-s.done
if atomic.CompareAndSwapUint32(&s.initialSyncNotified, 0, 1) {
close(s.InitialSyncDone)
}
}
// IsSynchronizing returns true if we're currently executing a sync() call - either initial catchup
// or attempting to catchup after too-long waiting for next block.
// Also returns a 2nd bool indicating if this is our initial sync
func (s *Service) IsSynchronizing() (synchronizing bool, initialSync bool) {
synchronizing = atomic.LoadInt64(&s.syncStartNS) != 0
initialSync = atomic.LoadUint32(&s.initialSyncNotified) == 0
return
}
// SynchronizingTime returns the time we've been performing a catchup operation (0 if not currently catching up)
func (s *Service) SynchronizingTime() time.Duration {
startNS := atomic.LoadInt64(&s.syncStartNS)
if startNS == 0 {
return time.Duration(0)
}
timeInNS := time.Now().UnixNano()
return time.Duration(timeInNS - startNS)
}
// function scope to make a bunch of defer statements better
func (s *Service) innerFetch(fetcher Fetcher, r basics.Round) (blk *bookkeeping.Block, cert *agreement.Certificate, rpcc FetcherClient, err error) {
ctx, cf := context.WithCancel(s.ctx)
defer cf()
stopWaitingForLedgerRound := make(chan struct{})
defer close(stopWaitingForLedgerRound)
go func() {
select {
case <-stopWaitingForLedgerRound:
case <-s.ledger.Wait(r):
cf()
}
}()
return fetcher.FetchBlock(ctx, r)
}
// fetchAndWrite fetches a block, checks the cert, and writes it to the ledger. Cert checking and ledger writing both wait for the ledger to advance if necessary.
// Returns false if we couldn't fetch or write (i.e., if we failed even after a given number of retries or if we were told to abort.)
func (s *Service) fetchAndWrite(fetcher Fetcher, r basics.Round, prevFetchCompleteChan chan bool, lookbackComplete chan bool) bool {
i := 0
hasLookback := false
for !fetcher.OutOfPeers(r) {
i++
select {
case <-s.ctx.Done():
s.log.Debugf("fetchAndWrite(%v): Aborted", r)
return false
default:
}
// Stop retrying after a while.
if i > catchupRetryLimit {
s.log.Errorf("fetchAndWrite: block retrieval exceeded retry limit")
return false
}
// Try to fetch, timing out after retryInterval
block, cert, client, err := s.innerFetch(fetcher, r)
if err != nil {
s.log.Debugf("fetchAndWrite(%v): Could not fetch: %v (attempt %d)", r, err, i)
// we've just failed to retrieve a block; wait until the previous block is fetched before trying again
// to avoid the usecase where the first block doesn't exists and we're making many requests down the chain
// for no reason.
if !hasLookback {
select {
case <-s.ctx.Done():
s.log.Debugf("fetchAndWrite(%v): Aborted while waiting for lookback block to ledger after failing once", r)
return false
case hasLookback = <-lookbackComplete:
if !hasLookback {
s.log.Debugf("fetchAndWrite(%v): lookback block doesn't exist, won't try to retrieve block again", r)
return false
}
}
}
continue // retry the fetch
} else if block == nil || cert == nil {
// someone already wrote the block to the ledger, we should stop syncing
return false
}
s.log.Debugf("fetchAndWrite(%v): Got block and cert contents: %v %v", r, block, cert)
// Check that the block's contents match the block header (necessary with an untrusted block because b.Hash() only hashes the header)
if !block.ContentsMatchHeader() {
// Check if this mismatch is due to an unsupported protocol version
if _, ok := config.Consensus[block.BlockHeader.CurrentProtocol]; !ok {
s.log.Errorf("fetchAndWrite(%v): unsupported protocol version detected: '%v'", r, block.BlockHeader.CurrentProtocol)
client.Close()
return false
}
s.log.Warnf("fetchAndWrite(%v): block contents do not match header (attempt %d)", r, i)
client.Close()
continue // retry the fetch
}
// make sure that we have the lookBack block that's required for authenticating this block
if !hasLookback {
select {
case <-s.ctx.Done():
s.log.Debugf("fetchAndWrite(%v): Aborted while waiting for lookback block to ledger", r)
return false
case hasLookback = <-lookbackComplete:
if !hasLookback {
s.log.Warnf("fetchAndWrite(%v): lookback block doesn't exist, cannot authenticate new block", r)
return false
}
}
}
err = s.auth.Authenticate(block, cert)
if err != nil {
s.log.Warnf("fetchAndWrite(%v): cert did not authenticate block (attempt %d): %v", r, i, err)
client.Close()
continue // retry the fetch
}
// Write to ledger, noting that ledger writes must be in order
select {
case <-s.ctx.Done():
s.log.Debugf("fetchAndWrite(%v): Aborted while waiting to write to ledger", r)
return false
case prevFetchSuccess := <-prevFetchCompleteChan:
if prevFetchSuccess {
err := s.ledger.AddBlock(*block, *cert)
if err != nil {
switch err.(type) {
case ledger.BlockInLedgerError:
s.log.Debugf("fetchAndWrite(%v): block already in ledger", r)
return true
case protocol.Error:
if !s.protocolErrorLogged {
logging.Base().Errorf("fetchAndWrite(%v): unrecoverable protocol error detected: %v", r, err)
s.protocolErrorLogged = true
}
default:
s.log.Errorf("fetchAndWrite(%v): ledger write failed: %v", r, err)
}
return false
}
s.log.Debugf("fetchAndWrite(%v): Wrote block to ledger", r)
return true
}
s.log.Warnf("fetchAndWrite(%v): previous block doesn't exist (perhaps fetching block %v failed)", r, r-1)
return false
}
}
return false
}
type task func() basics.Round
func (s *Service) pipelineCallback(fetcher Fetcher, r basics.Round, thisFetchComplete chan bool, prevFetchCompleteChan chan bool, lookbackChan chan bool) func() basics.Round {
return func() basics.Round {
fetchResult := s.fetchAndWrite(fetcher, r, prevFetchCompleteChan, lookbackChan)
// the fetch result will be read at most twice (once as the lookback block and once as the prev block, so we write the result twice)
thisFetchComplete <- fetchResult
thisFetchComplete <- fetchResult
if !fetchResult {
s.log.Infof("failed to fetch block %v", r)
return 0
}
return r
}
}
// TODO the following code does not handle the following case: seedLookback upgrades during fetch
func (s *Service) pipelinedFetch(seedLookback uint64) {
fetcher := s.fetcherFactory.NewOverGossip(protocol.UniCatchupReqTag)
defer fetcher.Close()
// make sure that we have at least one peer
if fetcher.NumPeers() == 0 {
return
}
parallelRequests := s.parallelBlocks
if parallelRequests < seedLookback {
parallelRequests = seedLookback
}
completed := make(chan basics.Round, parallelRequests)
taskCh := make(chan task, parallelRequests)
var wg sync.WaitGroup
defer func() {
close(taskCh)
wg.Wait()
close(completed)
}()
// Invariant: len(taskCh) + (# pending writes to completed) <= N
wg.Add(int(parallelRequests))
for i := uint64(0); i < parallelRequests; i++ {
go func() {
defer wg.Done()
for t := range taskCh {
completed <- t() // This write to completed comes after a read from taskCh, so the invariant is preserved.
}
}()
}
recentReqs := make([]chan bool, 0)
for i := 0; i < int(seedLookback); i++ {
// the fetch result will be read at most twice (once as the lookback block and once as the prev block, so we write the result twice)
reqComplete := make(chan bool, 2)
reqComplete <- true
reqComplete <- true
recentReqs = append(recentReqs, reqComplete)
}
from := s.ledger.NextRound()
nextRound := from
for ; nextRound < from+basics.Round(parallelRequests); nextRound++ {
// If the next round is not supported
if s.nextRoundIsNotSupported(nextRound) {
// We may get here when (1) The service starts
// and gets to an unsupported round. Since in
// this loop we do not wait for the requests
// to be written to the ledger, there is no
// guarantee that the unsupported round will be
// stopped in this case.
// (2) The unsupported round is detected in the
// "the rest" loop, but did not cancel because
// the last supported round was not yet written
// to the ledger.
// It is sufficient to check only in the first
// iteration, however checking in all in favor
// of code simplicity.
s.handleUnsupportedRound(nextRound)
break
}
currentRoundComplete := make(chan bool, 2)
// len(taskCh) + (# pending writes to completed) increases by 1
taskCh <- s.pipelineCallback(fetcher, nextRound, currentRoundComplete, recentReqs[len(recentReqs)-1], recentReqs[len(recentReqs)-int(seedLookback)])
recentReqs = append(recentReqs[1:], currentRoundComplete)
}
completedRounds := make(map[basics.Round]bool)
// the rest
for {
select {
case round := <-completed:
if round == 0 {
// there was an error
return
}
// if we're writing a catchpoint file, stop catching up to reduce the memory pressure. Once we finish writing the file we
// could resume with the catchup.
if s.ledger.IsWritingCatchpointFile() {
s.log.Info("Catchup is stopping due to catchpoint file being written")
return
}
completedRounds[round] = true
// fetch rounds we can validate
for completedRounds[nextRound-basics.Round(parallelRequests)] {
// If the next round is not supported
if s.nextRoundIsNotSupported(nextRound) {
s.handleUnsupportedRound(nextRound)
return
}
delete(completedRounds, nextRound)
currentRoundComplete := make(chan bool, 2)
// len(taskCh) + (# pending writes to completed) increases by 1
taskCh <- s.pipelineCallback(fetcher, nextRound, currentRoundComplete, recentReqs[len(recentReqs)-1], recentReqs[0])
recentReqs = append(recentReqs[1:], currentRoundComplete)
nextRound++
}
case <-s.ctx.Done():
return
}
}
}
// periodicSync periodically asks the network for its latest round and syncs if we've fallen behind (also if our ledger stops advancing)
func (s *Service) periodicSync() {
defer close(s.done)
// wait until network is ready, or until we're told to quit
select {
case <-s.net.Ready():
s.log.Info("network ready")
case <-s.ctx.Done():
return
}
s.sync(nil)
stuckInARow := 0
sleepDuration := s.deadlineTimeout
for {
currBlock := s.ledger.LastRound()
select {
case <-s.ctx.Done():
return
case <-s.ledger.Wait(currBlock + 1):
// Ledger moved forward; likely to be by the agreement service.
stuckInARow = 0
// go to sleep for a short while, for a random duration.
// we want to sleep for a random duration since it would "de-syncronize" us from the ledger advance sync
sleepDuration = time.Duration(crypto.RandUint63()) % s.deadlineTimeout
continue
case <-time.After(sleepDuration):
if sleepDuration < s.deadlineTimeout {
sleepDuration = s.deadlineTimeout
continue
}
// check to see if we're currently writing a catchpoint file. If so, wait longer before attempting again.
if s.ledger.IsWritingCatchpointFile() {
// keep the existing sleep duration and try again later.
continue
}
s.log.Info("It's been too long since our ledger advanced; resyncing")
s.sync(nil)
case cert := <-s.unmatchedPendingCertificates:
// the agreement service has a valid certificate for a block, but not the block itself.
s.sync(&cert)
}
if currBlock == s.ledger.LastRound() {
stuckInARow++
} else {
stuckInARow = 0
}
if stuckInARow == s.cfg.CatchupFailurePeerRefreshRate {
stuckInARow = 0
// TODO: RequestConnectOutgoing in terms of Context
s.net.RequestConnectOutgoing(true, s.ctx.Done())
}
}
}
// Syncs the client with the network. sync asks the network for last known block and tries to sync the system
// up the to the highest number it gets. When a certificate is provided, the sync function attempts to keep trying
// to fetch the matching block or abort when the catchup service exits.
func (s *Service) sync(cert *PendingUnmatchedCertificate) {
// Only run sync once at a time
// Store start time of sync - in NS so we can compute time.Duration (which is based on NS)
start := time.Now()
timeInNS := start.UnixNano()
if !atomic.CompareAndSwapInt64(&s.syncStartNS, 0, timeInNS) {
s.log.Infof("previous sync from %d still running (now=%d)", atomic.LoadInt64(&s.syncStartNS), timeInNS)
return
}
defer atomic.StoreInt64(&s.syncStartNS, 0)
pr := s.ledger.LastRound()
s.log.EventWithDetails(telemetryspec.ApplicationState, telemetryspec.CatchupStartEvent, telemetryspec.CatchupStartEventDetails{
StartRound: uint64(pr),
})
if cert == nil {
seedLookback := uint64(2)
proto, err := s.ledger.ConsensusParams(pr)
if err != nil {
s.log.Errorf("catchup: could not get consensus parameters for round %v: %v", pr, err)
} else {
seedLookback = proto.SeedLookback
}
s.pipelinedFetch(seedLookback)
} else {
// we want to fetch a single round. no need to be concerned about lookback.
s.fetchRound(cert.Cert, cert.VoteVerifier)
}
initSync := false
// close the initial sync channel if not already close
if atomic.CompareAndSwapUint32(&s.initialSyncNotified, 0, 1) {
close(s.InitialSyncDone)
initSync = true
}
elapsedTime := time.Now().Sub(start)
s.log.EventWithDetails(telemetryspec.ApplicationState, telemetryspec.CatchupStopEvent, telemetryspec.CatchupStopEventDetails{
StartRound: uint64(pr),
EndRound: uint64(s.ledger.LastRound()),
Time: elapsedTime,
InitSync: initSync,
})
s.log.Infof("Catchup Service: finished catching up, now at round %v (previously %v). Total time catching up %v.", s.ledger.LastRound(), pr, elapsedTime)
}
// TODO this doesn't actually use the digest from cert!
func (s *Service) fetchRound(cert agreement.Certificate, verifier *agreement.AsyncVoteVerifier) {
blockHash := bookkeeping.BlockHash(cert.Proposal.BlockDigest) // semantic digest (i.e., hash of the block header), not byte-for-byte digest
fetcher := s.latestRoundFetcherFactory.NewOverGossip(protocol.UniEnsBlockReqTag)
defer func() {
fetcher.Close()
}()
for s.ledger.LastRound() < cert.Round {
if fetcher.OutOfPeers(cert.Round) {
fetcher.Close()
// refresh peers and try again
logging.Base().Warn("fetchRound found no outgoing peers")
s.net.RequestConnectOutgoing(true, s.ctx.Done())
fetcher = s.latestRoundFetcherFactory.NewOverGossip(protocol.UniEnsBlockReqTag)
}
// Ask the fetcher to get the block somehow
block, fetchedCert, rpcc, err := s.innerFetch(fetcher, cert.Round)
if err != nil {
select {
case <-s.ctx.Done():
logging.Base().Debugf("fetchRound was asked to quit before we could acquire the block")
return
default:
}
logging.Base().Warnf("fetchRound could not acquire block, fetcher errored out: %v", err)
continue
}
rpcc.Close()
if block.Hash() == blockHash && block.ContentsMatchHeader() {
s.ledger.EnsureBlock(block, cert)
return
}
// Otherwise, fetcher gave us the wrong block
logging.Base().Warnf("fetcher gave us bad/wrong block (for round %d): fetched hash %v; want hash %v", cert.Round, block.Hash(), blockHash)
// As a failsafe, if the cert we fetched is valid but for the wrong block, panic as loudly as possible
if cert.Round == fetchedCert.Round &&
cert.Proposal.BlockDigest != fetchedCert.Proposal.BlockDigest &&
fetchedCert.Authenticate(*block, s.ledger, verifier) == nil {
s := "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
s += "!!!!!!!!!! FORK DETECTED !!!!!!!!!!!\n"
s += "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
s += "fetchRound called with a cert authenticating block with hash %v.\n"
s += "We fetched a valid cert authenticating a different block, %v. This indicates a fork.\n\n"
s += "Cert from our agreement service:\n%#v\n\n"
s += "Cert from the fetcher:\n%#v\n\n"
s += "Block from the fetcher:\n%#v\n\n"
s += "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
s += "!!!!!!!!!! FORK DETECTED !!!!!!!!!!!\n"
s += "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
s = fmt.Sprintf(s, cert.Proposal.BlockDigest, fetchedCert.Proposal.BlockDigest, cert, fetchedCert, block)
fmt.Println(s)
logging.Base().Error(s)
}
}
}
// nextRoundIsNotSupported returns true if the next round upgrades to a protocol version
// which is not supported.
// In case of an error, it returns false
func (s *Service) nextRoundIsNotSupported(nextRound basics.Round) bool {
lastLedgerRound := s.ledger.LastRound()
supportedUpgrades := config.Consensus
block, err := s.ledger.Block(lastLedgerRound)
if err != nil {
s.log.Errorf("nextRoundIsNotSupported: could not retrieve last block (%d) from the ledger : %v", lastLedgerRound, err)
return false
}
bh := block.BlockHeader
_, isSupportedUpgrade := supportedUpgrades[bh.NextProtocol]
if bh.NextProtocolSwitchOn > 0 && !isSupportedUpgrade {
// Save the last supported round number
// It is not necessary to check bh.NextProtocolSwitchOn < s.lastSupportedRound
// since there cannot be two protocol updates scheduled.
s.lastSupportedRound = bh.NextProtocolSwitchOn - 1
if nextRound >= bh.NextProtocolSwitchOn {
return true
}
}
return false
}
// handleUnSupportedRound receives a verified unsupported round: nextUnsupportedRound
// Checks if the last supported round was added to the ledger, and stops the service.
func (s *Service) handleUnsupportedRound(nextUnsupportedRound basics.Round) {
s.log.Infof("Catchup Service: round %d is not approved. Service will stop once the last supported round is added to the ledger.",
nextUnsupportedRound)
// If the next round is an unsupported round, need to stop the
// catchup service. Should stop after the last supported round
// is added to the ledger.
lr := s.ledger.LastRound()
// Ledger writes are in order. >= guarantees last supported round is added to the ledger.
if lr >= s.lastSupportedRound {
s.log.Infof("Catchup Service: finished catching up to the last supported round %d. The subsequent rounds are not supported. Service is stopping.",
lr)
s.cancel()
}
}
| 1 | 40,588 | I want to propose couple of different names for catchpointWriting: syncInterruptedWaitingCatchpointWriting syncWaitingForCatchpointWriting catchpointWriting is lacking context, and I found it difficult to understand the logic without this context. | algorand-go-algorand | go |
@@ -92,7 +92,7 @@ class InventoryImporter(object):
Args:
session (object): Database session.
- model (str): Model name to create.
+ model (Model): Model name to create.
dao (object): Data Access Object from dao.py
service_config (ServiceConfig): Service configuration.
inventory_id (str): Inventory id to import from | 1 | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Importer implementations. """
# pylint: disable=unused-argument,too-many-instance-attributes
# pylint: disable=no-self-use,not-callable,too-many-lines,too-many-locals
from StringIO import StringIO
import traceback
import json
from google.cloud.forseti.services.utils import get_sql_dialect
from google.cloud.forseti.services.utils import to_full_resource_name
from google.cloud.forseti.services.utils import to_type_name
from google.cloud.forseti.services.inventory.storage import Storage as Inventory
class ResourceCache(dict):
"""Resource cache."""
def __setitem__(self, key, value):
"""Overriding to assert the keys does not exist previously.
Args:
key (object): Key into the dict.
value (object): Value to set.
Raises:
Exception: If the key already exists in the dict.
"""
if key in self:
raise Exception('Key should not exist: {}'.format(key))
super(ResourceCache, self).__setitem__(key, value)
class EmptyImporter(object):
"""Imports an empty model."""
def __init__(self, session, model, dao, _, *args, **kwargs):
"""Create an EmptyImporter which creates an empty stub model.
Args:
session (object): Database session.
model (str): Model name to create.
dao (object): Data Access Object from dao.py.
_ (object): Unused.
*args (list): Unused.
**kwargs (dict): Unused.
"""
self.session = session
self.model = model
self.dao = dao
def run(self):
"""Runs the import."""
self.session.add(self.model)
self.model.add_description(
json.dumps(
{'source':'empty', 'pristine':True}
)
)
self.model.set_done()
self.session.commit()
class InventoryImporter(object):
"""Imports data from Inventory."""
def __init__(self,
session,
model,
dao,
service_config,
inventory_id,
*args,
**kwargs):
"""Create a Inventory importer which creates a model from the inventory.
Args:
session (object): Database session.
model (str): Model name to create.
dao (object): Data Access Object from dao.py
service_config (ServiceConfig): Service configuration.
inventory_id (str): Inventory id to import from
*args (list): Unused.
**kwargs (dict): Unused.
"""
self.session = session
self.model = model
self.dao = dao
self.service_config = service_config
self.inventory_id = inventory_id
self.session.add(self.model)
self.role_cache = {}
self.permission_cache = {}
self.resource_cache = ResourceCache()
self._membership_cache = []
self.member_cache = {}
self.member_cache_policies = {}
self.found_root = False
def run(self):
"""Runs the import.
Raises:
NotImplementedError: If the importer encounters an unknown
inventory type.
"""
gcp_type_list = [
'organization',
'folder',
'project',
'role',
'appengine_app',
'appengine_service',
'appengine_version',
'appengine_instance',
'serviceaccount',
'serviceaccount_key',
'bucket',
'dataset',
'compute_project',
'image',
'instancegroup',
'instancegroupmanager',
'instancetemplate',
'instance',
'firewall',
'backendservice',
'forwardingrule',
'network',
'subnetwork',
'cloudsqlinstance',
'kubernetes_cluster',
]
gsuite_type_list = [
'gsuite_group',
'gsuite_user',
]
member_type_list = [
'gsuite_user_member',
'gsuite_group_member',
]
autoflush = self.session.autoflush
try:
self.session.autoflush = False
item_counter = 0
last_res_type = None
with Inventory(self.session, self.inventory_id, True) as inventory:
root = inventory.get_root()
self.model.add_description(json.dumps({
'source': 'inventory',
'source_info': {'inventory_index_id': inventory.index.id},
'source_root': self._type_name(root),
'pristine': True,
'gsuite_enabled': inventory.type_exists(
['gsuite_group', 'gsuite_user'])
}))
if root.get_type() in ['organization']:
self.found_root = True
if not self.found_root:
raise Exception(
'Cannot import inventory without organization root')
for resource in inventory.iter(gcp_type_list):
item_counter += 1
last_res_type = self._store_resource(resource,
last_res_type)
self._store_resource(None, last_res_type)
self.session.flush()
for policy in inventory.iter(gcp_type_list,
fetch_dataset_policy=True):
item_counter += 1
self._convert_dataset_policy(policy)
self.session.flush()
for config in inventory.iter(
gcp_type_list, fetch_service_config=True):
item_counter += 1
self._convert_service_config(config)
self.session.flush()
for resource in inventory.iter(gsuite_type_list):
self._store_gsuite_principal(resource)
self.session.flush()
self._store_gsuite_membership_pre()
for child, parent in inventory.iter(member_type_list,
with_parent=True):
self._store_gsuite_membership(parent, child)
self._store_gsuite_membership_post()
self.dao.denorm_group_in_group(self.session)
self._store_iam_policy_pre()
for policy in inventory.iter(gcp_type_list,
fetch_iam_policy=True):
self._store_iam_policy(policy)
self._convert_iam_policy(policy)
self._store_iam_policy_post()
except Exception: # pylint: disable=broad-except
buf = StringIO()
traceback.print_exc(file=buf)
buf.seek(0)
message = buf.read()
self.model.set_error(message)
else:
self.model.add_warning(inventory.index.warnings)
self.model.set_done(item_counter)
finally:
self.session.commit()
self.session.autoflush = autoflush
def _store_gsuite_principal(self, principal):
"""Store a gsuite principal such as a group, user or member.
Args:
principal (object): object to store.
Raises:
Exception: if the principal type is unknown.
"""
gsuite_type = principal.get_type()
data = principal.get_data()
if gsuite_type == 'gsuite_user':
member = 'user/{}'.format(data['primaryEmail'])
elif gsuite_type == 'gsuite_group':
member = 'group/{}'.format(data['email'])
else:
raise Exception('Unknown gsuite principal: {}'.format(gsuite_type))
if member not in self.member_cache:
m_type, name = member.split('/', 1)
self.member_cache[member] = self.dao.TBL_MEMBER(
name=member,
type=m_type,
member_name=name)
def _store_gsuite_membership_pre(self):
"""Prepare storing gsuite memberships."""
pass
def _store_gsuite_membership_post(self):
"""Flush storing gsuite memberships."""
if not self.member_cache:
return
# Store all members before we flush the memberships
self.session.add_all(self.member_cache.values())
self.session.flush()
# session.execute automatically flushes
if self._membership_cache:
if get_sql_dialect(self.session) == 'sqlite':
# SQLite doesn't support bulk insert
for item in self._membership_cache:
stmt = self.dao.TBL_MEMBERSHIP.insert(
dict(group_name=item[0],
members_name=item[1]))
self.session.execute(stmt)
else:
dicts = [dict(group_name=item[0], members_name=item[1])
for item in self._membership_cache]
stmt = self.dao.TBL_MEMBERSHIP.insert(dicts)
self.session.execute(stmt)
def _store_gsuite_membership(self, parent, child):
"""Store a gsuite principal such as a group, user or member.
Args:
parent (object): parent part of membership.
child (object): member item
"""
def member_name(child):
"""Create the type:name representation for a non-group.
Args:
child (object): member to create representation from.
Returns:
str: type:name representation of the member.
"""
data = child.get_data()
return '{}/{}'.format(data['type'].lower(),
data['email'])
def group_name(parent):
"""Create the type:name representation for a group.
Args:
parent (object): group to create representation from.
Returns:
str: group:name representation of the group.
"""
data = parent.get_data()
return 'group/{}'.format(data['email'])
# Gsuite group members don't have to be part
# of this domain, so we might see them for
# the first time here.
member = member_name(child)
if member not in self.member_cache:
m_type, name = member.split('/', 1)
self.member_cache[member] = self.dao.TBL_MEMBER(
name=member,
type=m_type,
member_name=name)
self._membership_cache.append(
(group_name(parent), member))
def _store_iam_policy_pre(self):
"""Executed before iam policies are inserted."""
pass
def _store_iam_policy_post(self):
"""Executed after iam policies are inserted."""
# Store all members which are mentioned in policies
# that were not previously in groups or gsuite users.
self.session.add_all(self.member_cache_policies.values())
self.session.flush()
def _store_iam_policy(self, policy):
"""Store the iam policy of the resource.
Args:
policy (object): IAM policy to store.
Raises:
KeyError: if member could not be found in any cache.
"""
bindings = policy.get_data().get('bindings', [])
for binding in bindings:
role = binding['role']
if role not in self.role_cache:
msg = 'Role reference in iam policy not found: {}'.format(role)
self.model.add_warning(msg)
continue
#binding['members'] can have duplicate ids
members = set(binding['members'])
for member in members:
member = member.replace(':', '/', 1)
# We still might hit external users or groups
# that we haven't seen in gsuite.
if member not in self.member_cache and \
member not in self.member_cache_policies:
try:
# This is the default case, e.g. 'group/foobar'
m_type, name = member.split('/', 1)
except ValueError:
# Special groups like 'allUsers' done specify a type
m_type, name = member, member
self.member_cache_policies[member] = self.dao.TBL_MEMBER(
name=member,
type=m_type,
member_name=name)
self.session.add(self.member_cache_policies[member])
# Get all the member objects to reference
# in the binding row
db_members = []
for member in members:
member = member.replace(':', '/', 1)
if member not in self.member_cache:
if member not in self.member_cache_policies:
raise KeyError(member)
db_members.append(self.member_cache_policies[member])
continue
db_members.append(self.member_cache[member])
self.session.add(
self.dao.TBL_BINDING(
resource_type_name=self._type_name(policy),
role_name=role,
members=db_members))
def _store_resource(self, resource, last_res_type=None):
"""Store an inventory resource in the database.
Args:
resource (object): Resource object to convert from.
last_res_type (str): Previously processed resource type used to
spot transition between types to execute pre/handler/post
accordingly.
Returns:
str: Resource type that was processed during the execution.
"""
handlers = {
'organization': (None,
self._convert_organization,
None),
'folder': (None,
self._convert_folder,
None),
'project': (None,
self._convert_project,
None),
'role': (self._convert_role_pre,
self._convert_role,
self._convert_role_post),
'appengine_app': (None,
self._convert_appengine_resource,
None),
'appengine_service': (None,
self._convert_appengine_resource,
None),
'appengine_version': (None,
self._convert_appengine_resource,
None),
'appengine_instance': (None,
self._convert_appengine_resource,
None),
'serviceaccount': (None,
self._convert_serviceaccount,
None),
'serviceaccount_key': (None,
self._convert_serviceaccount_key,
None),
'bucket': (None,
self._convert_bucket,
None),
'object': (None,
self._convert_object,
None),
'dataset': (None,
self._convert_dataset,
None),
'compute_project': (None,
self._convert_computeproject,
None),
'image': (None,
self._convert_image,
None),
'instancegroup': (None,
self._convert_instancegroup,
None),
'instancegroupmanager': (None,
self._convert_instancegroupmanager,
None),
'instancetemplate': (None,
self._convert_instancetemplate,
None),
'instance': (None,
self._convert_instance,
None),
'firewall': (None,
self._convert_firewall,
None),
'backendservice': (None,
self._convert_backendservice,
None),
'forwardingrule': (None,
self._convert_forwardingrule,
None),
'network': (None,
self._convert_network,
None),
'subnetwork': (None,
self._convert_subnetwork,
None),
'cloudsqlinstance': (None,
self._convert_cloudsqlinstance,
None),
'kubernetes_cluster': (None,
self._convert_kubernetes_cluster,
None),
None: (None, None, None),
}
res_type = resource.get_type() if resource else None
if res_type not in handlers:
self.model.add_warning('No handler for type "{}"'.format(res_type))
if res_type != last_res_type:
post = handlers[last_res_type][-1]
if post:
post()
pre = handlers[res_type][0]
if pre:
pre()
handler = handlers[res_type][1]
if handler:
handler(resource)
return res_type
return None
def _convert_object(self, gcsobject):
"""Not Implemented
Args:
gcsobject (object): Object to store.
"""
def _convert_appengine_resource(self, gae_resource):
"""Convert an AppEngine resource to a database object.
Args:
gae_resource (dict): An appengine resource to store.
"""
data = gae_resource.get_data()
parent, full_res_name, type_name = self._full_resource_name(
gae_resource)
resource = self.dao.TBL_RESOURCE(
full_name=full_res_name,
type_name=type_name,
name=gae_resource.get_key(),
type=gae_resource.get_type(),
display_name=data.get('name', ''),
data=gae_resource.get_data_raw(),
parent=parent)
self.session.add(resource)
self._add_to_cache(gae_resource, resource)
def _convert_bucket(self, bucket):
"""Convert a bucket to a database object.
Args:
bucket (object): Bucket to store.
"""
data = bucket.get_data()
parent, full_res_name, type_name = self._full_resource_name(
bucket)
resource = self.dao.TBL_RESOURCE(
full_name=full_res_name,
type_name=type_name,
name=bucket.get_key(),
type=bucket.get_type(),
display_name=data.get('displayName', ''),
email=data.get('email', ''),
data=bucket.get_data_raw(),
parent=parent)
self.session.add(resource)
self._add_to_cache(bucket, resource)
def _convert_kubernetes_cluster(self, cluster):
"""Convert an AppEngine resource to a database object.
Args:
cluster (dict): A Kubernetes cluster resource to store.
"""
data = cluster.get_data()
parent, full_res_name, type_name = self._full_resource_name(
cluster)
resource = self.dao.TBL_RESOURCE(
full_name=full_res_name,
type_name=type_name,
name=cluster.get_key(),
type=cluster.get_type(),
display_name=data.get('name', ''),
data=cluster.get_data_raw(),
parent=parent)
self.session.add(resource)
self._add_to_cache(cluster, resource)
def _convert_service_config(self, service_config):
"""Convert Kubernetes Service Config to a database object.
Args:
service_config (dict): A Service Config resource to store.
"""
parent, full_res_name = self._get_parent(service_config)
sc_type_name = to_type_name(
service_config.get_type_class(),
parent.type_name)
sc_res_name = to_full_resource_name(full_res_name, sc_type_name)
self.session.add(
self.dao.TBL_RESOURCE(
full_name=sc_res_name,
type_name=sc_type_name,
name=service_config.get_key(),
type=service_config.get_type_class(),
data=service_config.get_data_raw(),
parent=parent))
def _convert_dataset(self, dataset):
"""Convert a dataset to a database object.
Args:
dataset (object): Dataset to store.
"""
parent, full_res_name, type_name = self._full_resource_name(
dataset)
resource = self.dao.TBL_RESOURCE(
full_name=full_res_name,
type_name=type_name,
name=dataset.get_key(),
type=dataset.get_type(),
data=dataset.get_data_raw(),
parent=parent)
self.session.add(resource)
self._add_to_cache(dataset, resource)
def _convert_dataset_policy(self, dataset_policy):
"""Convert a dataset policy to a database object.
Args:
dataset_policy (object): Dataset policy to store.
"""
# TODO: Dataset policies should be integrated in the model, not stored
# as a resource.
parent, full_res_name = self._get_parent(dataset_policy)
policy_type_name = to_type_name(
dataset_policy.get_type_class(),
parent.type_name)
policy_res_name = to_full_resource_name(full_res_name, policy_type_name)
self.session.add(
self.dao.TBL_RESOURCE(
full_name=policy_res_name,
type_name=policy_type_name,
name=dataset_policy.get_key(),
type=dataset_policy.get_type_class(),
data=dataset_policy.get_data_raw(),
parent=parent))
def _convert_computeproject(self, computeproject):
"""Convert a computeproject to a database object.
Args:
computeproject (object): computeproject to store.
"""
data = computeproject.get_data()
parent, full_res_name, type_name = self._full_resource_name(
computeproject)
self.session.add(
self.dao.TBL_RESOURCE(
full_name=full_res_name,
type_name=type_name,
name=computeproject.get_key(),
type=computeproject.get_type(),
display_name=data.get('displayName', ''),
email=data.get('email', ''),
data=computeproject.get_data_raw(),
parent=parent))
def _convert_iam_policy(self, iam_policy):
"""Convert an IAM policy to a database object.
Args:
iam_policy (object): IAM policy to store.
"""
parent, full_res_name = self._get_parent(iam_policy)
iam_policy_type_name = to_type_name(
iam_policy.get_type_class(),
':'.join(parent.type_name.split('/')))
iam_policy_full_res_name = to_full_resource_name(
full_res_name,
iam_policy_type_name)
self.session.add(
self.dao.TBL_RESOURCE(
full_name=iam_policy_full_res_name,
type_name=iam_policy_type_name,
name=iam_policy.get_key(),
type=iam_policy.get_type_class(),
data=iam_policy.get_data_raw(),
parent=parent))
def _convert_image(self, image):
"""Convert a image to a database object.
Args:
image (object): Image to store.
"""
data = image.get_data()
parent, full_res_name, type_name = self._full_resource_name(
image)
self.session.add(
self.dao.TBL_RESOURCE(
full_name=full_res_name,
type_name=type_name,
name=image.get_key(),
type=image.get_type(),
display_name=data.get('displayName', ''),
email=data.get('email', ''),
data=image.get_data_raw(),
parent=parent))
def _convert_instancegroup(self, instancegroup):
"""Convert a instancegroup to a database object.
Args:
instancegroup (object): Instancegroup to store.
"""
data = instancegroup.get_data()
parent, full_res_name, type_name = self._full_resource_name(
instancegroup)
self.session.add(
self.dao.TBL_RESOURCE(
full_name=full_res_name,
type_name=type_name,
name=instancegroup.get_key(),
type=instancegroup.get_type(),
display_name=data.get('displayName', ''),
email=data.get('email', ''),
data=instancegroup.get_data_raw(),
parent=parent))
def _convert_instancegroupmanager(self, instancegroupmanager):
"""Convert a instancegroupmanager to a database object.
Args:
instancegroupmanager (object): InstanceGroupManager to store.
"""
data = instancegroupmanager.get_data()
parent, full_res_name, type_name = self._full_resource_name(
instancegroupmanager)
self.session.add(
self.dao.TBL_RESOURCE(
full_name=full_res_name,
type_name=type_name,
name=instancegroupmanager.get_key(),
type=instancegroupmanager.get_type(),
display_name=data.get('displayName', ''),
email=data.get('email', ''),
data=instancegroupmanager.get_data_raw(),
parent=parent))
def _convert_instancetemplate(self, instancetemplate):
"""Convert a instancetemplate to a database object.
Args:
instancetemplate (object): InstanceTemplate to store.
"""
data = instancetemplate.get_data()
parent, full_res_name, type_name = self._full_resource_name(
instancetemplate)
self.session.add(
self.dao.TBL_RESOURCE(
full_name=full_res_name,
type_name=type_name,
name=instancetemplate.get_key(),
type=instancetemplate.get_type(),
display_name=data.get('displayName', ''),
email=data.get('email', ''),
data=instancetemplate.get_data_raw(),
parent=parent))
def _convert_instance(self, instance):
"""Convert a instance to a database object.
Args:
instance (object): Instance to store.
"""
data = instance.get_data()
parent, full_res_name, type_name = self._full_resource_name(
instance)
self.session.add(
self.dao.TBL_RESOURCE(
full_name=full_res_name,
type_name=type_name,
name=instance.get_key(),
type=instance.get_type(),
display_name=data.get('displayName', ''),
email=data.get('email', ''),
data=instance.get_data_raw(),
parent=parent))
def _convert_firewall(self, firewall):
"""Convert a firewall to a database object.
Args:
firewall (object): Firewall to store.
"""
data = firewall.get_data()
parent, full_res_name, type_name = self._full_resource_name(
firewall)
self.session.add(
self.dao.TBL_RESOURCE(
full_name=full_res_name,
type_name=type_name,
name=firewall.get_key(),
type=firewall.get_type(),
display_name=data.get('displayName', ''),
email=data.get('email', ''),
data=firewall.get_data_raw(),
parent=parent))
def _convert_backendservice(self, backendservice):
"""Convert a backendservice to a database object.
Args:
backendservice (object): Backendservice to store.
"""
data = backendservice.get_data()
parent, full_res_name, type_name = self._full_resource_name(
backendservice)
self.session.add(
self.dao.TBL_RESOURCE(
full_name=full_res_name,
type_name=type_name,
name=backendservice.get_key(),
type=backendservice.get_type(),
display_name=data.get('displayName', ''),
email=data.get('email', ''),
data=backendservice.get_data_raw(),
parent=parent))
def _convert_forwardingrule(self, forwardingrule):
"""Convert a forwarding rule to a database object.
Args:
forwardingrule (object): ForwardingRule to store.
"""
data = forwardingrule.get_data()
parent, full_res_name, type_name = self._full_resource_name(
forwardingrule)
self.session.add(
self.dao.TBL_RESOURCE(
full_name=full_res_name,
type_name=type_name,
name=forwardingrule.get_key(),
type=forwardingrule.get_type(),
display_name=data.get('displayName', ''),
email=data.get('email', ''),
data=forwardingrule.get_data_raw(),
parent=parent))
def _convert_network(self, network):
"""Convert a network to a database object.
Args:
network (object): Network to store.
"""
data = network.get_data()
parent, full_res_name, type_name = self._full_resource_name(
network)
self.session.add(
self.dao.TBL_RESOURCE(
full_name=full_res_name,
type_name=type_name,
name=network.get_key(),
type=network.get_type(),
display_name=data.get('displayName', ''),
email=data.get('email', ''),
data=network.get_data_raw(),
parent=parent))
def _convert_subnetwork(self, subnetwork):
"""Convert a subnetwork to a database object.
Args:
subnetwork (object): Subnetwork to store.
"""
data = subnetwork.get_data()
parent, full_res_name, type_name = self._full_resource_name(
subnetwork)
self.session.add(
self.dao.TBL_RESOURCE(
full_name=full_res_name,
type_name=type_name,
name=subnetwork.get_key(),
type=subnetwork.get_type(),
display_name=data.get('displayName', ''),
email=data.get('email', ''),
data=subnetwork.get_data_raw(),
parent=parent))
def _convert_cloudsqlinstance(self, cloudsqlinstance):
"""Convert a cloudsqlinstance to a database object.
Args:
cloudsqlinstance (object): Cloudsql to store.
"""
data = cloudsqlinstance.get_data()
parent, full_res_name, type_name = self._full_resource_name(
cloudsqlinstance)
self.session.add(
self.dao.TBL_RESOURCE(
full_name=full_res_name,
type_name=type_name,
name=cloudsqlinstance.get_key(),
type=cloudsqlinstance.get_type(),
display_name=data.get('displayName', ''),
email=data.get('email', ''),
data=cloudsqlinstance.get_data_raw(),
parent=parent))
def _convert_serviceaccount(self, service_account):
"""Convert a service account to a database object.
Args:
service_account (object): Service account to store.
"""
data = service_account.get_data()
parent, full_res_name, type_name = self._full_resource_name(
service_account)
resource = self.dao.TBL_RESOURCE(
full_name=full_res_name,
type_name=type_name,
name=service_account.get_key(),
type=service_account.get_type(),
display_name=data.get('displayName', ''),
email=data.get('email', ''),
data=service_account.get_data_raw(),
parent=parent)
self.session.add(resource)
self._add_to_cache(service_account, resource)
def _convert_serviceaccount_key(self, service_account_key):
"""Convert a service account key to a database object.
Args:
service_account_key (object): Service account key to store.
"""
data = service_account_key.get_data()
parent, full_res_name, type_name = self._full_resource_name(
service_account_key)
self.session.add(
self.dao.TBL_RESOURCE(
full_name=full_res_name,
type_name=type_name,
name=service_account_key.get_key(),
type=service_account_key.get_type(),
display_name=data.get('displayName', ''),
email=data.get('email', ''),
data=service_account_key.get_data_raw(),
parent=parent))
def _convert_folder(self, folder):
"""Convert a folder to a database object.
Args:
folder (object): Folder to store.
"""
data = folder.get_data()
if self._is_root(folder):
parent, type_name = None, self._type_name(folder)
full_res_name = type_name
else:
parent, full_res_name, type_name = self._full_resource_name(
folder)
resource = self.dao.TBL_RESOURCE(
full_name=full_res_name,
type_name=type_name,
name=folder.get_key(),
type=folder.get_type(),
display_name=data.get('displayName', ''),
data=folder.get_data_raw(),
parent=parent)
self.session.add(resource)
self._add_to_cache(folder, resource)
def _convert_project(self, project):
"""Convert a project to a database object.
Args:
project (object): Project to store.
"""
data = project.get_data()
if self._is_root(project):
parent, type_name = None, self._type_name(project)
full_res_name = type_name
else:
parent, full_res_name, type_name = self._full_resource_name(
project)
resource = self.dao.TBL_RESOURCE(
full_name=full_res_name,
type_name=type_name,
name=project.get_key(),
type=project.get_type(),
display_name=data.get('name', ''),
data=project.get_data_raw(),
parent=parent)
self.session.add(resource)
self._add_to_cache(project, resource)
def _convert_role_pre(self):
"""Executed before roles are handled. Prepares for bulk insert."""
pass
def _convert_role_post(self):
"""Executed after all roles were handled. Performs bulk insert."""
self.session.add_all(self.permission_cache.values())
self.session.add_all(self.role_cache.values())
def _convert_role(self, role):
"""Convert a role to a database object.
Args:
role (object): Role to store.
"""
data = role.get_data()
is_custom = not data['name'].startswith('roles/')
db_permissions = []
if 'includedPermissions' not in data:
self.model.add_warning(
'Role missing permissions: {}'.format(
data.get('name', '<missing name>')))
else:
for perm_name in data['includedPermissions']:
if perm_name not in self.permission_cache:
permission = self.dao.TBL_PERMISSION(
name=perm_name)
self.permission_cache[perm_name] = permission
db_permissions.append(self.permission_cache[perm_name])
dbrole = self.dao.TBL_ROLE(
name=data['name'],
title=data.get('title', ''),
stage=data.get('stage', ''),
description=data.get('description', ''),
custom=is_custom,
permissions=db_permissions)
self.role_cache[data['name']] = dbrole
if is_custom:
parent, full_res_name, type_name = self._full_resource_name(role)
self.session.add(
self.dao.TBL_RESOURCE(
full_name=full_res_name,
type_name=type_name,
name=role.get_key(),
type=role.get_type(),
display_name=data.get('title'),
data=role.get_data_raw(),
parent=parent))
def _convert_organization(self, organization):
"""Convert an organization a database object.
Args:
organization (object): Organization to store.
"""
# Under current assumptions, organization is always root
self.found_root = True
data = organization.get_data()
type_name = self._type_name(organization)
org = self.dao.TBL_RESOURCE(
full_name=to_full_resource_name('', type_name),
type_name=type_name,
name=organization.get_key(),
type=organization.get_type(),
display_name=data.get('displayName', ''),
data=organization.get_data_raw(),
parent=None)
self._add_to_cache(organization, org)
self.session.add(org)
def _add_to_cache(self, resource, dbobj):
"""Add a resource to the cache for parent lookup.
Args:
resource (object): Resource to put in the cache.
dbobj (object): Database object.
"""
type_name = self._type_name(resource)
full_res_name = dbobj.full_name
self.resource_cache[type_name] = (dbobj, full_res_name)
def _get_parent(self, resource):
"""Return the parent object for a resource from cache.
Args:
resource (object): Resource whose parent to look for.
Returns:
tuple: cached object and full resource name
"""
return self.resource_cache[self._parent_type_name(resource)]
def _type_name(self, resource):
"""Return the type/name for that resource.
Args:
resource (object): Resource to retrieve type/name for.
Returns:
str: type/name representation of the resource.
"""
return to_type_name(
resource.get_type(),
resource.get_key())
def _parent_type_name(self, resource):
"""Return the type/name for a resource's parent.
Args:
resource (object): Resource whose parent should be returned.
Returns:
str: type/name representation of the resource's parent.
"""
return to_type_name(
resource.get_parent_type(),
resource.get_parent_key())
def _full_resource_name(self, resource):
"""Returns the parent object, full resource name and type name.
Args:
resource (object): Resource whose full resource name and parent
should be returned.
Returns:
str: full resource name for the provided resource.
"""
type_name = self._type_name(resource)
parent, full_res_name = self._get_parent(resource)
full_resource_name = to_full_resource_name(full_res_name, type_name)
return parent, full_resource_name, type_name
def _is_root(self, resource):
"""Checks if the resource is an inventory root. Result is cached.
Args:
resource (object): Resource to check.
Returns:
bool: Whether the resource is root or not
"""
if not self.found_root:
is_root = \
resource.get_type() == resource.get_parent_type() and \
resource.get_key() == resource.get_parent_key()
if is_root:
self.found_root = True
return is_root
return False
def by_source(source):
"""Helper to resolve client provided import sources.
Args:
source (str): Source to import from.
Returns:
Importer: Chosen by source.
"""
return {
'INVENTORY': InventoryImporter,
'EMPTY': EmptyImporter,
}[source.upper()]
| 1 | 29,446 | Do you still need the `name` in the arg description, if this is not `str` type anymore.? | forseti-security-forseti-security | py |
@@ -42,15 +42,5 @@ def allreduce_grads(params, coalesce=True, bucket_size_mb=-1):
class DistOptimizerHook(OptimizerHook):
-
- def __init__(self, grad_clip=None, coalesce=True, bucket_size_mb=-1):
- self.grad_clip = grad_clip
- self.coalesce = coalesce
- self.bucket_size_mb = bucket_size_mb
-
- def after_train_iter(self, runner):
- runner.optimizer.zero_grad()
- runner.outputs['loss'].backward()
- if self.grad_clip is not None:
- self.clip_grads(runner.model.parameters())
- runner.optimizer.step()
+ """Deprecated optimizer hook for distributed training"""
+ pass | 1 | from collections import OrderedDict
import torch.distributed as dist
from mmcv.runner import OptimizerHook
from torch._utils import (_flatten_dense_tensors, _take_tensors,
_unflatten_dense_tensors)
def _allreduce_coalesced(tensors, world_size, bucket_size_mb=-1):
if bucket_size_mb > 0:
bucket_size_bytes = bucket_size_mb * 1024 * 1024
buckets = _take_tensors(tensors, bucket_size_bytes)
else:
buckets = OrderedDict()
for tensor in tensors:
tp = tensor.type()
if tp not in buckets:
buckets[tp] = []
buckets[tp].append(tensor)
buckets = buckets.values()
for bucket in buckets:
flat_tensors = _flatten_dense_tensors(bucket)
dist.all_reduce(flat_tensors)
flat_tensors.div_(world_size)
for tensor, synced in zip(
bucket, _unflatten_dense_tensors(flat_tensors, bucket)):
tensor.copy_(synced)
def allreduce_grads(params, coalesce=True, bucket_size_mb=-1):
grads = [
param.grad.data for param in params
if param.requires_grad and param.grad is not None
]
world_size = dist.get_world_size()
if coalesce:
_allreduce_coalesced(grads, world_size, bucket_size_mb)
else:
for tensor in grads:
dist.all_reduce(tensor.div_(world_size))
class DistOptimizerHook(OptimizerHook):
def __init__(self, grad_clip=None, coalesce=True, bucket_size_mb=-1):
self.grad_clip = grad_clip
self.coalesce = coalesce
self.bucket_size_mb = bucket_size_mb
def after_train_iter(self, runner):
runner.optimizer.zero_grad()
runner.outputs['loss'].backward()
if self.grad_clip is not None:
self.clip_grads(runner.model.parameters())
runner.optimizer.step()
| 1 | 19,887 | We may raise a warning. | open-mmlab-mmdetection | py |
@@ -2,8 +2,10 @@ import AbstractModel from 'hospitalrun/models/abstract';
import DS from 'ember-data';
export default AbstractModel.extend({
- patient: DS.belongsTo('patient'),
+ // Attributes
name: DS.attr('string'),
icd9CMCode: DS.attr('string'),
- icd10Code: DS.attr('string')
+ icd10Code: DS.attr('string'),
+ // Associations
+ patient: DS.belongsTo('patient'),
}); | 1 | import AbstractModel from 'hospitalrun/models/abstract';
import DS from 'ember-data';
export default AbstractModel.extend({
patient: DS.belongsTo('patient'),
name: DS.attr('string'),
icd9CMCode: DS.attr('string'),
icd10Code: DS.attr('string')
});
| 1 | 13,292 | Trailing comma caused the eslint test to fail | HospitalRun-hospitalrun-frontend | js |
@@ -0,0 +1,6 @@
+from werkzeug.utils import import_string
+
+from .config import CFG_RELATIONSHIPS_NODE_ENGINE, CFG_RELATIONSHIPS_EDGE_ENGINE
+
+Node = import_string(CFG_RELATIONSHIPS_NODE_ENGINE)
+Edge = import_string(CFG_RELATIONSHIPS_EDGE_ENGINE) | 1 | 1 | 15,073 | I think you should use `app.config`. In the usual case, the config file is not overwritten, there is additional config file outside of the source of `Invenio`. | inveniosoftware-invenio | py |
|
@@ -86,6 +86,16 @@ public final class JwtIssuerAuthenticationManagerResolver implements Authenticat
new TrustedIssuerJwtAuthenticationManagerResolver(
Collections.unmodifiableCollection(trustedIssuers)::contains));
}
+
+ /**
+ * Construct a {@link JwtIssuerAuthenticationManagerResolver} using the provided
+ * parameters
+ * @param trustedIssuer a predicate to determine whether the issuer should be trusted or not
+ */
+ public JwtIssuerAuthenticationManagerResolver(Predicate<String> trustedIssuer) {
+ this.authenticationManager = new ResolvingAuthenticationManager(
+ new TrustedIssuerJwtAuthenticationManagerResolver(trustedIssuer));
+ }
/**
* Construct a {@link JwtIssuerAuthenticationManagerResolver} using the provided | 1 | /*
* Copyright 2002-2020 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.security.oauth2.server.resource.authentication;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.function.Predicate;
import javax.servlet.http.HttpServletRequest;
import com.nimbusds.jwt.JWTParser;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.springframework.core.convert.converter.Converter;
import org.springframework.core.log.LogMessage;
import org.springframework.lang.NonNull;
import org.springframework.security.authentication.AuthenticationManager;
import org.springframework.security.authentication.AuthenticationManagerResolver;
import org.springframework.security.core.Authentication;
import org.springframework.security.core.AuthenticationException;
import org.springframework.security.oauth2.core.OAuth2AuthenticationException;
import org.springframework.security.oauth2.jwt.JwtDecoder;
import org.springframework.security.oauth2.jwt.JwtDecoders;
import org.springframework.security.oauth2.server.resource.BearerTokenAuthenticationToken;
import org.springframework.security.oauth2.server.resource.InvalidBearerTokenException;
import org.springframework.util.Assert;
/**
* An implementation of {@link AuthenticationManagerResolver} that resolves a JWT-based
* {@link AuthenticationManager} based on the <a href=
* "https://openid.net/specs/openid-connect-core-1_0.html#IssuerIdentifier">Issuer</a> in
* a signed JWT (JWS).
*
* To use, this class must be able to determine whether or not the `iss` claim is trusted.
* Recall that anyone can stand up an authorization server and issue valid tokens to a
* resource server. The simplest way to achieve this is to supply a list of trusted
* issuers in the constructor.
*
* This class derives the Issuer from the `iss` claim found in the
* {@link HttpServletRequest}'s
* <a href="https://tools.ietf.org/html/rfc6750#section-1.2" target="_blank">Bearer
* Token</a>.
*
* @author Josh Cummings
* @since 5.3
*/
public final class JwtIssuerAuthenticationManagerResolver implements AuthenticationManagerResolver<HttpServletRequest> {
private final AuthenticationManager authenticationManager;
/**
* Construct a {@link JwtIssuerAuthenticationManagerResolver} using the provided
* parameters
* @param trustedIssuers a list of trusted issuers
*/
public JwtIssuerAuthenticationManagerResolver(String... trustedIssuers) {
this(Arrays.asList(trustedIssuers));
}
/**
* Construct a {@link JwtIssuerAuthenticationManagerResolver} using the provided
* parameters
* @param trustedIssuers a list of trusted issuers
*/
public JwtIssuerAuthenticationManagerResolver(Collection<String> trustedIssuers) {
Assert.notEmpty(trustedIssuers, "trustedIssuers cannot be empty");
this.authenticationManager = new ResolvingAuthenticationManager(
new TrustedIssuerJwtAuthenticationManagerResolver(
Collections.unmodifiableCollection(trustedIssuers)::contains));
}
/**
* Construct a {@link JwtIssuerAuthenticationManagerResolver} using the provided
* parameters
*
* Note that the {@link AuthenticationManagerResolver} provided in this constructor
* will need to verify that the issuer is trusted. This should be done via an
* allowlist.
*
* One way to achieve this is with a {@link Map} where the keys are the known issuers:
* <pre>
* Map<String, AuthenticationManager> authenticationManagers = new HashMap<>();
* authenticationManagers.put("https://issuerOne.example.org", managerOne);
* authenticationManagers.put("https://issuerTwo.example.org", managerTwo);
* JwtAuthenticationManagerResolver resolver = new JwtAuthenticationManagerResolver
* (authenticationManagers::get);
* </pre>
*
* The keys in the {@link Map} are the allowed issuers.
* @param issuerAuthenticationManagerResolver a strategy for resolving the
* {@link AuthenticationManager} by the issuer
*/
public JwtIssuerAuthenticationManagerResolver(
AuthenticationManagerResolver<String> issuerAuthenticationManagerResolver) {
Assert.notNull(issuerAuthenticationManagerResolver, "issuerAuthenticationManagerResolver cannot be null");
this.authenticationManager = new ResolvingAuthenticationManager(issuerAuthenticationManagerResolver);
}
/**
* Return an {@link AuthenticationManager} based off of the `iss` claim found in the
* request's bearer token
* @throws OAuth2AuthenticationException if the bearer token is malformed or an
* {@link AuthenticationManager} can't be derived from the issuer
*/
@Override
public AuthenticationManager resolve(HttpServletRequest request) {
return this.authenticationManager;
}
private static class ResolvingAuthenticationManager implements AuthenticationManager {
private final Converter<BearerTokenAuthenticationToken, String> issuerConverter = new JwtClaimIssuerConverter();
private final AuthenticationManagerResolver<String> issuerAuthenticationManagerResolver;
ResolvingAuthenticationManager(AuthenticationManagerResolver<String> issuerAuthenticationManagerResolver) {
this.issuerAuthenticationManagerResolver = issuerAuthenticationManagerResolver;
}
@Override
public Authentication authenticate(Authentication authentication) throws AuthenticationException {
Assert.isTrue(authentication instanceof BearerTokenAuthenticationToken,
"Authentication must be of type BearerTokenAuthenticationToken");
BearerTokenAuthenticationToken token = (BearerTokenAuthenticationToken) authentication;
String issuer = this.issuerConverter.convert(token);
AuthenticationManager authenticationManager = this.issuerAuthenticationManagerResolver.resolve(issuer);
if (authenticationManager == null) {
throw new InvalidBearerTokenException("Invalid issuer");
}
return authenticationManager.authenticate(authentication);
}
}
private static class JwtClaimIssuerConverter implements Converter<BearerTokenAuthenticationToken, String> {
@Override
public String convert(@NonNull BearerTokenAuthenticationToken authentication) {
String token = authentication.getToken();
try {
String issuer = JWTParser.parse(token).getJWTClaimsSet().getIssuer();
if (issuer != null) {
return issuer;
}
}
catch (Exception ex) {
throw new InvalidBearerTokenException(ex.getMessage(), ex);
}
throw new InvalidBearerTokenException("Missing issuer");
}
}
static class TrustedIssuerJwtAuthenticationManagerResolver implements AuthenticationManagerResolver<String> {
private final Log logger = LogFactory.getLog(getClass());
private final Map<String, AuthenticationManager> authenticationManagers = new ConcurrentHashMap<>();
private final Predicate<String> trustedIssuer;
TrustedIssuerJwtAuthenticationManagerResolver(Predicate<String> trustedIssuer) {
this.trustedIssuer = trustedIssuer;
}
@Override
public AuthenticationManager resolve(String issuer) {
if (this.trustedIssuer.test(issuer)) {
AuthenticationManager authenticationManager = this.authenticationManagers.computeIfAbsent(issuer,
(k) -> {
this.logger.debug("Constructing AuthenticationManager");
JwtDecoder jwtDecoder = JwtDecoders.fromIssuerLocation(issuer);
return new JwtAuthenticationProvider(jwtDecoder)::authenticate;
});
this.logger.debug(LogMessage.format("Resolved AuthenticationManager for issuer '%s'", issuer));
return authenticationManager;
}
else {
this.logger.debug("Did not resolve AuthenticationManager since issuer is not trusted");
}
return null;
}
}
}
| 1 | 17,748 | Will you please update the copyright message to now include `2021`? | spring-projects-spring-security | java |
@@ -38,8 +38,8 @@ void test_rw(const Offsets &offsets, const Data &data)
for (std::size_t index = 0; index < offsets.size() - 1; ++index)
{
- typename IndexedData::ResultType expected_result(&data[offsets[index]],
- &data[offsets[index + 1]]);
+ typename IndexedData::ResultType expected_result(data.begin() + offsets[index],
+ data.begin() + offsets[index + 1]);
BOOST_CHECK_EQUAL(expected_result, indexed_data.at(index));
}
} | 1 | #include "util/indexed_data.hpp"
#include "common/temporary_file.hpp"
#include "util/exception.hpp"
#include <boost/test/unit_test.hpp>
#include <iomanip>
#include <iostream>
#include <sstream>
#include <typeinfo>
#include <vector>
BOOST_AUTO_TEST_SUITE(indexed_data)
using namespace osrm;
using namespace osrm::util;
BOOST_AUTO_TEST_CASE(check_variable_group_block_bitops)
{
VariableGroupBlock<16> variable_group_block;
BOOST_CHECK_EQUAL(variable_group_block.sum2bits(0xe4), 6);
BOOST_CHECK_EQUAL(variable_group_block.sum2bits(0x11111111), 8);
BOOST_CHECK_EQUAL(variable_group_block.sum2bits(0x55555555), 16);
BOOST_CHECK_EQUAL(variable_group_block.sum2bits(0xffffffff), 48);
BOOST_CHECK_EQUAL(variable_group_block.log256(0), 0);
BOOST_CHECK_EQUAL(variable_group_block.log256(1), 1);
BOOST_CHECK_EQUAL(variable_group_block.log256(255), 1);
BOOST_CHECK_EQUAL(variable_group_block.log256(256), 2);
BOOST_CHECK_EQUAL(variable_group_block.log256(1024), 2);
BOOST_CHECK_EQUAL(variable_group_block.log256(16777215), 3);
}
template <typename IndexedData, typename Offsets, typename Data>
void test_rw(const Offsets &offsets, const Data &data)
{
IndexedData indexed_data(offsets.begin(), offsets.end(), data.begin());
for (std::size_t index = 0; index < offsets.size() - 1; ++index)
{
typename IndexedData::ResultType expected_result(&data[offsets[index]],
&data[offsets[index + 1]]);
BOOST_CHECK_EQUAL(expected_result, indexed_data.at(index));
}
}
BOOST_AUTO_TEST_CASE(check_group_blocks_with_different_sizes)
{
std::vector<std::string> str = {
"", "A", "bb", "ccc", "dDDd", "E", "ff", "ggg", "hhhh", "I", "jj", "", "kkk",
"llll", "M", "nn", "ooo", "pppp", "q", "r", "S", "T", "", "u", "V", "W",
"X", "Y", "Z", "", "", "", "", "", "", "", "0", ""};
std::vector<unsigned char> name_char_data;
std::vector<std::uint32_t> name_offsets;
for (auto s : str)
{
name_offsets.push_back(name_char_data.size());
std::copy(s.begin(), s.end(), std::back_inserter(name_char_data));
}
name_offsets.push_back(name_char_data.size());
test_rw<IndexedData<VariableGroupBlock<0, std::string>>>(name_offsets, name_char_data);
test_rw<IndexedData<VariableGroupBlock<1, std::string>>>(name_offsets, name_char_data);
test_rw<IndexedData<VariableGroupBlock<16, std::string>>>(name_offsets, name_char_data);
test_rw<IndexedData<FixedGroupBlock<0, std::string>>>(name_offsets, name_char_data);
test_rw<IndexedData<FixedGroupBlock<1, std::string>>>(name_offsets, name_char_data);
test_rw<IndexedData<FixedGroupBlock<16, std::string>>>(name_offsets, name_char_data);
test_rw<IndexedData<FixedGroupBlock<32, std::string>>>(name_offsets, name_char_data);
test_rw<IndexedData<FixedGroupBlock<128, std::string>>>(name_offsets, name_char_data);
}
BOOST_AUTO_TEST_CASE(check_1001_pandas)
{
std::vector<unsigned char> name_char_data;
std::vector<std::uint32_t> name_offsets;
const std::string panda = "🐼";
name_offsets.push_back(0);
for (std::size_t i = 0; i < 1000; ++i)
std::copy(panda.begin(), panda.end(), std::back_inserter(name_char_data));
name_offsets.push_back(name_char_data.size());
std::copy(panda.begin(), panda.end(), std::back_inserter(name_char_data));
name_offsets.push_back(name_char_data.size());
test_rw<IndexedData<VariableGroupBlock<16, std::string>>>(name_offsets, name_char_data);
}
BOOST_AUTO_TEST_CASE(check_different_sizes)
{
for (std::size_t num_strings = 0; num_strings < 256; ++num_strings)
{
std::vector<unsigned char> name_char_data;
std::vector<std::uint32_t> name_offsets;
const std::string canoe = "🛶";
name_offsets.push_back(0);
for (std::size_t i = 0; i < num_strings; ++i)
{
std::copy(canoe.begin(), canoe.end(), std::back_inserter(name_char_data));
name_offsets.push_back(name_char_data.size());
}
test_rw<IndexedData<VariableGroupBlock<16, std::string>>>(name_offsets, name_char_data);
test_rw<IndexedData<FixedGroupBlock<16, std::string>>>(name_offsets, name_char_data);
}
}
BOOST_AUTO_TEST_CASE(check_max_size)
{
std::vector<unsigned char> name_data(0x1000000, '#');
std::vector<std::uint32_t> name_offsets;
auto test_variable = [&name_offsets, &name_data]() {
test_rw<IndexedData<VariableGroupBlock<16, std::string>>>(name_offsets, name_data);
};
auto test_fixed = [&name_offsets, &name_data]() {
test_rw<IndexedData<FixedGroupBlock<16, std::string>>>(name_offsets, name_data);
};
name_offsets = {0, 0x1000000};
BOOST_CHECK_THROW(test_variable(), osrm::util::exception);
name_offsets = {0, 0x1000000 - 1};
test_variable();
name_offsets = {0, 256};
BOOST_CHECK_THROW(test_fixed(), osrm::util::exception);
name_offsets = {0, 255};
BOOST_CHECK_NO_THROW(test_fixed());
}
BOOST_AUTO_TEST_CASE(check_string_view)
{
std::string name_data = "hellostringview";
std::vector<std::uint32_t> name_offsets = {0, 5, 11, 15};
IndexedData<VariableGroupBlock<16, StringView>> indexed_data(
name_offsets.begin(), name_offsets.end(), name_data.begin());
BOOST_CHECK_EQUAL(indexed_data.at(0), "hello");
BOOST_CHECK_EQUAL(indexed_data.at(1), "string");
BOOST_CHECK_EQUAL(indexed_data.at(2), "view");
}
BOOST_AUTO_TEST_SUITE_END()
| 1 | 24,186 | Potential subscript out of range. | Project-OSRM-osrm-backend | cpp |
@@ -185,7 +185,6 @@ func (ps *peerSelector) PeerDownloadDurationToRank(peer network.Peer, blockDownl
default: // i.e. peerRankInitialFourthPriority
return downloadDurationToRank(blockDownloadDuration, lowBlockDownloadThreshold, highBlockDownloadThreshold, peerRank3LowBlockTime, peerRank3HighBlockTime)
-
}
}
| 1 | // Copyright (C) 2019-2021 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
package catchup
import (
"errors"
"sort"
"time"
"github.com/algorand/go-deadlock"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/network"
)
const (
// peerRankInitialFirstPriority is the high-priority peers group ( typically, archivers )
peerRankInitialFirstPriority = 0
peerRank0LowBlockTime = 1
peerRank0HighBlockTime = 199
// peerRankInitialSecondPriority is the second priority peers group ( typically, relays )
peerRankInitialSecondPriority = 200
peerRank1LowBlockTime = 201
peerRank1HighBlockTime = 399
peerRankInitialThirdPriority = 400
peerRank2LowBlockTime = 401
peerRank2HighBlockTime = 599
peerRankInitialFourthPriority = 600
peerRank3LowBlockTime = 601
peerRank3HighBlockTime = 799
// peerRankDownloadFailed is used for responses which could be temporary, such as missing files, or such that we don't
// have clear resolution
peerRankDownloadFailed = 900
// peerRankInvalidDownload is used for responses which are likely to be invalid - whether it's serving the wrong content
// or attempting to serve malicious content
peerRankInvalidDownload = 1000
// once a block is downloaded, the download duration is clamped into the range of [lowBlockDownloadThreshold..highBlockDownloadThreshold] and
// then mapped into the a ranking range.
lowBlockDownloadThreshold = 50 * time.Millisecond
highBlockDownloadThreshold = 8 * time.Second
)
var errPeerSelectorNoPeerPoolsAvailable = errors.New("no peer pools available")
// peerClass defines the type of peer we want to have in a particular "class",
// and define the network.PeerOption that would be used to retrieve that type of
// peer
type peerClass struct {
initialRank int
peerClass network.PeerOption
}
// the peersRetriever is a subset of the network.GossipNode used to ensure that we can create an instance of the peerSelector
// for testing purposes, providing just the above function.
type peersRetriever interface {
// Get a list of Peers we could potentially send a direct message to.
GetPeers(options ...network.PeerOption) []network.Peer
}
// peerPoolEntry represents a single peer entry in the pool. It contains
// the underlying network peer as well as the peer class.
type peerPoolEntry struct {
peer network.Peer
class peerClass
}
// peerPool is a single pool of peers that shares the same rank.
type peerPool struct {
rank int
peers []peerPoolEntry
}
// peerSelector is a helper struct used to select the next peer to try and connect to
// for various catchup purposes. Unlike the underlying network GetPeers(), it allows the
// client to provide feedback regarding the peer's performance, and to have the subsequent
// query(s) take advantage of that intel.
type peerSelector struct {
mu deadlock.Mutex
net peersRetriever
peerClasses []peerClass
pools []peerPool
}
// makePeerSelector creates a peerSelector, given a peersRetriever and peerClass array.
func makePeerSelector(net peersRetriever, initialPeersClasses []peerClass) *peerSelector {
selector := &peerSelector{
net: net,
peerClasses: initialPeersClasses,
}
return selector
}
// GetNextPeer returns the next peer. It randomally selects a peer from a pool that has
// the lowest rank value. Given that the peers are grouped by their ranks, allow us to
// prioritize peers based on their class and/or performance.
func (ps *peerSelector) GetNextPeer() (peer network.Peer, err error) {
ps.mu.Lock()
defer ps.mu.Unlock()
ps.refreshAvailablePeers()
for _, pool := range ps.pools {
if len(pool.peers) > 0 {
// the previous call to refreshAvailablePeers ensure that this would always be the case;
// however, if we do have a zero length pool, we don't want to divide by zero, so this would
// provide the needed test.
// pick one of the peers from this pool at random
peerIdx := crypto.RandUint64() % uint64(len(pool.peers))
peer = pool.peers[peerIdx].peer
return
}
}
return nil, errPeerSelectorNoPeerPoolsAvailable
}
// RankPeer ranks a given peer.
// return true if the value was updated or false otherwise.
func (ps *peerSelector) RankPeer(peer network.Peer, rank int) bool {
if peer == nil {
return false
}
ps.mu.Lock()
defer ps.mu.Unlock()
poolIdx, peerIdx := ps.findPeer(peer)
if poolIdx < 0 || peerIdx < 0 {
return false
}
// we need to remove the peer from the pool so we can place it in a different location.
pool := ps.pools[poolIdx]
if pool.rank != rank {
class := pool.peers[peerIdx].class
if len(pool.peers) > 1 {
pool.peers = append(pool.peers[:peerIdx], pool.peers[peerIdx+1:]...)
ps.pools[poolIdx] = pool
} else {
// the last peer was removed from the pool; delete this pool.
ps.pools = append(ps.pools[:poolIdx], ps.pools[poolIdx+1:]...)
}
sortNeeded := ps.addToPool(peer, rank, class)
if sortNeeded {
ps.sort()
}
}
return true
}
// PeerDownloadDurationToRank calculates the rank for a peer given a peer and the block download time.
func (ps *peerSelector) PeerDownloadDurationToRank(peer network.Peer, blockDownloadDuration time.Duration) (rank int) {
ps.mu.Lock()
defer ps.mu.Unlock()
poolIdx, peerIdx := ps.findPeer(peer)
if poolIdx < 0 || peerIdx < 0 {
return peerRankInvalidDownload
}
switch ps.pools[poolIdx].peers[peerIdx].class.initialRank {
case peerRankInitialFirstPriority:
return downloadDurationToRank(blockDownloadDuration, lowBlockDownloadThreshold, highBlockDownloadThreshold, peerRank0LowBlockTime, peerRank0HighBlockTime)
case peerRankInitialSecondPriority:
return downloadDurationToRank(blockDownloadDuration, lowBlockDownloadThreshold, highBlockDownloadThreshold, peerRank1LowBlockTime, peerRank1HighBlockTime)
case peerRankInitialThirdPriority:
return downloadDurationToRank(blockDownloadDuration, lowBlockDownloadThreshold, highBlockDownloadThreshold, peerRank2LowBlockTime, peerRank2HighBlockTime)
default: // i.e. peerRankInitialFourthPriority
return downloadDurationToRank(blockDownloadDuration, lowBlockDownloadThreshold, highBlockDownloadThreshold, peerRank3LowBlockTime, peerRank3HighBlockTime)
}
}
// addToPool adds a given peer to the correct group. If no group exists for that peer's rank,
// a new group is created.
// The method return true if a new group was created ( suggesting that the pools list would need to be re-ordered ), or false otherwise.
func (ps *peerSelector) addToPool(peer network.Peer, rank int, class peerClass) bool {
// see if we already have a list with that rank:
for i, pool := range ps.pools {
if pool.rank == rank {
// we found an existing group, add this peer to the list.
ps.pools[i].peers = append(pool.peers, peerPoolEntry{peer: peer, class: class})
return false
}
}
ps.pools = append(ps.pools, peerPool{rank: rank, peers: []peerPoolEntry{{peer: peer, class: class}}})
return true
}
// sort the pools array in an accending order according to the rank of each pool.
func (ps *peerSelector) sort() {
sort.SliceStable(ps.pools, func(i, j int) bool {
return ps.pools[i].rank < ps.pools[j].rank
})
}
// peerAddress returns the peer's underlying address. The network.Peer object cannot be compared
// to itself, since the network package dynamically creating a new instance on every network.GetPeers() call.
// The method retrun the peer address or an empty string if the peer is not one of HTTPPeer/UnicastPeer
func peerAddress(peer network.Peer) string {
if httpPeer, ok := peer.(network.HTTPPeer); ok {
return httpPeer.GetAddress()
} else if unicastPeer, ok := peer.(network.UnicastPeer); ok {
return unicastPeer.GetAddress()
}
return ""
}
// refreshAvailablePeers reload the available peers from the network package, add new peers along with their
// corresponding initial rank, and deletes peers that have been dropped by the network package.
func (ps *peerSelector) refreshAvailablePeers() {
existingPeers := make(map[string]network.Peer)
for _, pool := range ps.pools {
for _, localPeer := range pool.peers {
if peerAddress := peerAddress(localPeer.peer); peerAddress != "" {
existingPeers[peerAddress] = localPeer.peer
}
}
}
sortNeeded := false
for _, initClass := range ps.peerClasses {
peers := ps.net.GetPeers(initClass.peerClass)
for _, peer := range peers {
peerAddress := peerAddress(peer)
if peerAddress == "" {
continue
}
if _, has := existingPeers[peerAddress]; has {
delete(existingPeers, peerAddress)
continue
}
// it's an entry which we did not had before.
sortNeeded = ps.addToPool(peer, initClass.initialRank, initClass) || sortNeeded
}
}
// delete from the pools array the peers that do not exist on the network anymore.
for poolIdx := len(ps.pools) - 1; poolIdx >= 0; poolIdx-- {
pool := ps.pools[poolIdx]
for peerIdx := len(pool.peers) - 1; peerIdx >= 0; peerIdx-- {
peer := pool.peers[peerIdx].peer
if peerAddress := peerAddress(peer); peerAddress != "" {
if _, has := existingPeers[peerAddress]; has {
// need to be removed.
pool.peers = append(pool.peers[:peerIdx], pool.peers[peerIdx+1:]...)
}
}
}
if len(pool.peers) == 0 {
ps.pools = append(ps.pools[:poolIdx], ps.pools[poolIdx+1:]...)
sortNeeded = true
} else {
ps.pools[poolIdx] = pool
}
}
if sortNeeded {
ps.sort()
}
}
// findPeer look into the peer pool and find the given peer.
// The method returns the pool and peer indices if a peer was found, or (-1, -1) otherwise.
func (ps *peerSelector) findPeer(peer network.Peer) (poolIdx, peerIdx int) {
peerAddr := peerAddress(peer)
if peerAddr != "" {
for i, pool := range ps.pools {
for j, localPeerEntry := range pool.peers {
if peerAddress(localPeerEntry.peer) == peerAddr {
return i, j
}
}
}
}
return -1, -1
}
// calculate the duration rank by mapping the range of [minDownloadDuration..maxDownloadDuration] into the rank range of [minRank..maxRank]
func downloadDurationToRank(downloadDuration, minDownloadDuration, maxDownloadDuration time.Duration, minRank, maxRank int) (rank int) {
// clamp the downloadDuration into the range of [minDownloadDuration .. maxDownloadDuration]
if downloadDuration < minDownloadDuration {
downloadDuration = minDownloadDuration
} else if downloadDuration > maxDownloadDuration {
downloadDuration = maxDownloadDuration
}
// the formula below maps an element in the range of [minDownloadDuration .. maxDownloadDuration] onto the range of [minRank .. maxRank]
rank = minRank + int((downloadDuration-minDownloadDuration).Nanoseconds()*int64(maxRank-minRank)/(maxDownloadDuration-minDownloadDuration).Nanoseconds())
return
}
| 1 | 42,330 | please undo this change. you didn't really meant to make it, right ? | algorand-go-algorand | go |
@@ -0,0 +1,13 @@
+import initStoryshots from '@storybook/addon-storyshots';
+import { puppeteerTest } from '@storybook/addon-storyshots-puppeteer';
+import path from 'path';
+
+initStoryshots( {
+ suite: 'Puppeteer storyshots',
+ test: puppeteerTest( {
+ // eslint-disable-next-line sitekit/acronym-case
+ storybookUrl: `file://${ path.resolve( __dirname, '../dist' ) }`,
+ setupTimeout: 5000,
+ testTimeout: 5000,
+ } ),
+} ); | 1 | 1 | 40,479 | To check! are these millseconds or seconds :thinking: The docs aren't clear | google-site-kit-wp | js |
|
@@ -508,6 +508,7 @@ func (u *multiuploader) upload(firstBuf io.ReadSeeker) (*UploadOutput, error) {
// Read and queue the rest of the parts
for u.geterr() == nil {
+ num++
// This upload exceeded maximum number of supported parts, error now.
if num > int64(u.ctx.MaxUploadParts) || num > int64(MaxUploadParts) {
var msg string | 1 | package s3manager
import (
"bytes"
"fmt"
"io"
"sort"
"sync"
"time"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3iface"
)
// MaxUploadParts is the maximum allowed number of parts in a multi-part upload
// on Amazon S3.
const MaxUploadParts = 10000
// MinUploadPartSize is the minimum allowed part size when uploading a part to
// Amazon S3.
const MinUploadPartSize int64 = 1024 * 1024 * 5
// DefaultUploadPartSize is the default part size to buffer chunks of a
// payload into.
const DefaultUploadPartSize = MinUploadPartSize
// DefaultUploadConcurrency is the default number of goroutines to spin up when
// using Upload().
const DefaultUploadConcurrency = 5
// A MultiUploadFailure wraps a failed S3 multipart upload. An error returned
// will satisfy this interface when a multi part upload failed to upload all
// chucks to S3. In the case of a failure the UploadID is needed to operate on
// the chunks, if any, which were uploaded.
//
// Example:
//
// u := s3manager.NewUploader(opts)
// output, err := u.upload(input)
// if err != nil {
// if multierr, ok := err.(s3manager.MultiUploadFailure); ok {
// // Process error and its associated uploadID
// fmt.Println("Error:", multierr.Code(), multierr.Message(), multierr.UploadID())
// } else {
// // Process error generically
// fmt.Println("Error:", err.Error())
// }
// }
//
type MultiUploadFailure interface {
awserr.Error
// Returns the upload id for the S3 multipart upload that failed.
UploadID() string
}
// So that the Error interface type can be included as an anonymous field
// in the multiUploadError struct and not conflict with the error.Error() method.
type awsError awserr.Error
// A multiUploadError wraps the upload ID of a failed s3 multipart upload.
// Composed of BaseError for code, message, and original error
//
// Should be used for an error that occurred failing a S3 multipart upload,
// and a upload ID is available. If an uploadID is not available a more relevant
type multiUploadError struct {
awsError
// ID for multipart upload which failed.
uploadID string
}
// Error returns the string representation of the error.
//
// See apierr.BaseError ErrorWithExtra for output format
//
// Satisfies the error interface.
func (m multiUploadError) Error() string {
extra := fmt.Sprintf("upload id: %s", m.uploadID)
return awserr.SprintError(m.Code(), m.Message(), extra, m.OrigErr())
}
// String returns the string representation of the error.
// Alias for Error to satisfy the stringer interface.
func (m multiUploadError) String() string {
return m.Error()
}
// UploadID returns the id of the S3 upload which failed.
func (m multiUploadError) UploadID() string {
return m.uploadID
}
// UploadInput contains all input for upload requests to Amazon S3.
type UploadInput struct {
// The canned ACL to apply to the object.
ACL *string `location:"header" locationName:"x-amz-acl" type:"string"`
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
// Specifies caching behavior along the request/reply chain.
CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
// Specifies presentational information for the object.
ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"`
// Specifies what content encodings have been applied to the object and thus
// what decoding mechanisms must be applied to obtain the media-type referenced
// by the Content-Type header field.
ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"`
// The language the content is in.
ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"`
// A standard MIME type describing the format of the object data.
ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
// The date and time at which the object is no longer cacheable.
Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"`
// Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
// Allows grantee to read the object data and its metadata.
GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
// Allows grantee to read the object ACL.
GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
// Allows grantee to write the ACL for the applicable object.
GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
Key *string `location:"uri" locationName:"Key" type:"string" required:"true"`
// A map of metadata to store with the object in S3.
Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
// Confirms that the requester knows that she or he will be charged for the
// request. Bucket owners need not specify this parameter in their requests.
// Documentation on downloading objects from requester pays buckets can be found
// at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string"`
// Specifies the algorithm to use to when encrypting the object (e.g., AES256,
// aws:kms).
SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
// Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
// data. This value is used to store the object and then it is discarded; Amazon
// does not store the encryption key. The key must be appropriate for use with
// the algorithm specified in the x-amz-server-side-encryption-customer-algorithm
// header.
SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"`
// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
// Amazon S3 uses this header for a message integrity check to ensure the encryption
// key was transmitted without error.
SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
// Specifies the AWS KMS key ID to use for object encryption. All GET and PUT
// requests for an object protected by AWS KMS will fail if not made via SSL
// or using SigV4. Documentation on configuring any of the officially supported
// AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version
SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
// The Server-side encryption algorithm used when storing this object in S3
// (e.g., AES256, aws:kms).
ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string"`
// The type of storage to use for the object. Defaults to 'STANDARD'.
StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string"`
// If the bucket is configured as a website, redirects requests for this object
// to another object in the same bucket or to an external URL. Amazon S3 stores
// the value of this header in the object metadata.
WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
// The readable body payload to send to S3.
Body io.Reader
}
// UploadOutput represents a response from the Upload() call.
type UploadOutput struct {
// The URL where the object was uploaded to.
Location string
// The version of the object that was uploaded. Will only be populated if
// the S3 Bucket is versioned. If the bucket is not versioned this field
// will not be set.
VersionID *string
// The ID for a multipart upload to S3. In the case of an error the error
// can be cast to the MultiUploadFailure interface to extract the upload ID.
UploadID string
}
// The Uploader structure that calls Upload(). It is safe to call Upload()
// on this structure for multiple objects and across concurrent goroutines.
// Mutating the Uploader's properties is not safe to be done concurrently.
type Uploader struct {
// The buffer size (in bytes) to use when buffering data into chunks and
// sending them as parts to S3. The minimum allowed part size is 5MB, and
// if this value is set to zero, the DefaultPartSize value will be used.
PartSize int64
// The number of goroutines to spin up in parallel when sending parts.
// If this is set to zero, the DefaultUploadConcurrency value will be used.
Concurrency int
// Setting this value to true will cause the SDK to avoid calling
// AbortMultipartUpload on a failure, leaving all successfully uploaded
// parts on S3 for manual recovery.
//
// Note that storing parts of an incomplete multipart upload counts towards
// space usage on S3 and will add additional costs if not cleaned up.
LeavePartsOnError bool
// MaxUploadParts is the max number of parts which will be uploaded to S3.
// Will be used to calculate the partsize of the object to be uploaded.
// E.g: 5GB file, with MaxUploadParts set to 100, will upload the file
// as 100, 50MB parts.
// With a limited of s3.MaxUploadParts (10,000 parts).
MaxUploadParts int
// The client to use when uploading to S3.
S3 s3iface.S3API
}
// NewUploader creates a new Uploader instance to upload objects to S3. Pass In
// additional functional options to customize the uploader's behavior. Requires a
// client.ConfigProvider in order to create a S3 service client. The session.Session
// satisfies the client.ConfigProvider interface.
//
// Example:
// // The session the S3 Uploader will use
// sess := session.New()
//
// // Create an uploader with the session and default options
// uploader := s3manager.NewUploader(sess)
//
// // Create an uploader with the session and custom options
// uploader := s3manager.NewUploader(session, func(u *s3manager.Uploader) {
// u.PartSize = 64 * 1024 * 1024 // 64MB per part
// })
func NewUploader(c client.ConfigProvider, options ...func(*Uploader)) *Uploader {
u := &Uploader{
S3: s3.New(c),
PartSize: DefaultUploadPartSize,
Concurrency: DefaultUploadConcurrency,
LeavePartsOnError: false,
MaxUploadParts: MaxUploadParts,
}
for _, option := range options {
option(u)
}
return u
}
// NewUploaderWithClient creates a new Uploader instance to upload objects to S3. Pass in
// additional functional options to customize the uploader's behavior. Requires
// a S3 service client to make S3 API calls.
//
// Example:
// // S3 service client the Upload manager will use.
// s3Svc := s3.New(session.New())
//
// // Create an uploader with S3 client and default options
// uploader := s3manager.NewUploaderWithClient(s3Svc)
//
// // Create an uploader with S3 client and custom options
// uploader := s3manager.NewUploaderWithClient(s3Svc, func(u *s3manager.Uploader) {
// u.PartSize = 64 * 1024 * 1024 // 64MB per part
// })
func NewUploaderWithClient(svc s3iface.S3API, options ...func(*Uploader)) *Uploader {
u := &Uploader{
S3: svc,
PartSize: DefaultUploadPartSize,
Concurrency: DefaultUploadConcurrency,
LeavePartsOnError: false,
MaxUploadParts: MaxUploadParts,
}
for _, option := range options {
option(u)
}
return u
}
// Upload uploads an object to S3, intelligently buffering large files into
// smaller chunks and sending them in parallel across multiple goroutines. You
// can configure the buffer size and concurrency through the Uploader's parameters.
//
// Additional functional options can be provided to configure the individual
// upload. These options are copies of the Uploader instance Upload is called from.
// Modifying the options will not impact the original Uploader instance.
//
// It is safe to call this method concurrently across goroutines.
//
// Example:
// // Upload input parameters
// upParams := &s3manager.UploadInput{
// Bucket: &bucketName,
// Key: &keyName,
// Body: file,
// }
//
// // Perform an upload.
// result, err := uploader.Upload(upParams)
//
// // Perform upload with options different than the those in the Uploader.
// result, err := uploader.Upload(upParams, func(u *s3manager.Uploader) {
// u.PartSize = 10 * 1024 * 1024 // 10MB part size
// u.LeavePartsOnError = true // Dont delete the parts if the upload fails.
// })
func (u Uploader) Upload(input *UploadInput, options ...func(*Uploader)) (*UploadOutput, error) {
i := uploader{in: input, ctx: u}
for _, option := range options {
option(&i.ctx)
}
return i.upload()
}
// internal structure to manage an upload to S3.
type uploader struct {
ctx Uploader
in *UploadInput
readerPos int64 // current reader position
totalSize int64 // set to -1 if the size is not known
}
// internal logic for deciding whether to upload a single part or use a
// multipart upload.
func (u *uploader) upload() (*UploadOutput, error) {
u.init()
if u.ctx.PartSize < MinUploadPartSize {
msg := fmt.Sprintf("part size must be at least %d bytes", MinUploadPartSize)
return nil, awserr.New("ConfigError", msg, nil)
}
// Do one read to determine if we have more than one part
buf, err := u.nextReader()
if err == io.EOF || err == io.ErrUnexpectedEOF { // single part
return u.singlePart(buf)
} else if err != nil {
return nil, awserr.New("ReadRequestBody", "read upload data failed", err)
}
mu := multiuploader{uploader: u}
return mu.upload(buf)
}
// init will initialize all default options.
func (u *uploader) init() {
if u.ctx.Concurrency == 0 {
u.ctx.Concurrency = DefaultUploadConcurrency
}
if u.ctx.PartSize == 0 {
u.ctx.PartSize = DefaultUploadPartSize
}
// Try to get the total size for some optimizations
u.initSize()
}
// initSize tries to detect the total stream size, setting u.totalSize. If
// the size is not known, totalSize is set to -1.
func (u *uploader) initSize() {
u.totalSize = -1
switch r := u.in.Body.(type) {
case io.Seeker:
pos, _ := r.Seek(0, 1)
defer r.Seek(pos, 0)
n, err := r.Seek(0, 2)
if err != nil {
return
}
u.totalSize = n
// Try to adjust partSize if it is too small and account for
// integer division truncation.
if u.totalSize/u.ctx.PartSize >= int64(u.ctx.MaxUploadParts) {
// Add one to the part size to account for remainders
// during the size calculation. e.g odd number of bytes.
u.ctx.PartSize = (u.totalSize / int64(u.ctx.MaxUploadParts)) + 1
}
}
}
// nextReader returns a seekable reader representing the next packet of data.
// This operation increases the shared u.readerPos counter, but note that it
// does not need to be wrapped in a mutex because nextReader is only called
// from the main thread.
func (u *uploader) nextReader() (io.ReadSeeker, error) {
switch r := u.in.Body.(type) {
case io.ReaderAt:
var err error
n := u.ctx.PartSize
if u.totalSize >= 0 {
bytesLeft := u.totalSize - u.readerPos
if bytesLeft == 0 {
err = io.EOF
n = bytesLeft
} else if bytesLeft <= u.ctx.PartSize {
err = io.ErrUnexpectedEOF
n = bytesLeft
}
}
buf := io.NewSectionReader(r, u.readerPos, n)
u.readerPos += n
return buf, err
default:
packet := make([]byte, u.ctx.PartSize)
n, err := io.ReadFull(u.in.Body, packet)
u.readerPos += int64(n)
return bytes.NewReader(packet[0:n]), err
}
}
// singlePart contains upload logic for uploading a single chunk via
// a regular PutObject request. Multipart requests require at least two
// parts, or at least 5MB of data.
func (u *uploader) singlePart(buf io.ReadSeeker) (*UploadOutput, error) {
params := &s3.PutObjectInput{}
awsutil.Copy(params, u.in)
params.Body = buf
req, out := u.ctx.S3.PutObjectRequest(params)
req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager"))
if err := req.Send(); err != nil {
return nil, err
}
url := req.HTTPRequest.URL.String()
return &UploadOutput{
Location: url,
VersionID: out.VersionId,
}, nil
}
// internal structure to manage a specific multipart upload to S3.
type multiuploader struct {
*uploader
wg sync.WaitGroup
m sync.Mutex
err error
uploadID string
parts completedParts
}
// keeps track of a single chunk of data being sent to S3.
type chunk struct {
buf io.ReadSeeker
num int64
}
// completedParts is a wrapper to make parts sortable by their part number,
// since S3 required this list to be sent in sorted order.
type completedParts []*s3.CompletedPart
func (a completedParts) Len() int { return len(a) }
func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a completedParts) Less(i, j int) bool { return *a[i].PartNumber < *a[j].PartNumber }
// upload will perform a multipart upload using the firstBuf buffer containing
// the first chunk of data.
func (u *multiuploader) upload(firstBuf io.ReadSeeker) (*UploadOutput, error) {
params := &s3.CreateMultipartUploadInput{}
awsutil.Copy(params, u.in)
// Create the multipart
req, resp := u.ctx.S3.CreateMultipartUploadRequest(params)
req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager"))
if err := req.Send(); err != nil {
return nil, err
}
u.uploadID = *resp.UploadId
// Create the workers
ch := make(chan chunk, u.ctx.Concurrency)
for i := 0; i < u.ctx.Concurrency; i++ {
u.wg.Add(1)
go u.readChunk(ch)
}
// Send part 1 to the workers
var num int64 = 1
ch <- chunk{buf: firstBuf, num: num}
// Read and queue the rest of the parts
for u.geterr() == nil {
// This upload exceeded maximum number of supported parts, error now.
if num > int64(u.ctx.MaxUploadParts) || num > int64(MaxUploadParts) {
var msg string
if num > int64(u.ctx.MaxUploadParts) {
msg = fmt.Sprintf("exceeded total allowed configured MaxUploadParts (%d). Adjust PartSize to fit in this limit",
u.ctx.MaxUploadParts)
} else {
msg = fmt.Sprintf("exceeded total allowed S3 limit MaxUploadParts (%d). Adjust PartSize to fit in this limit",
MaxUploadParts)
}
u.seterr(awserr.New("TotalPartsExceeded", msg, nil))
break
}
num++
buf, err := u.nextReader()
if err == io.EOF {
break
}
ch <- chunk{buf: buf, num: num}
if err != nil && err != io.ErrUnexpectedEOF {
u.seterr(awserr.New(
"ReadRequestBody",
"read multipart upload data failed",
err))
break
}
}
// Close the channel, wait for workers, and complete upload
close(ch)
u.wg.Wait()
complete := u.complete()
if err := u.geterr(); err != nil {
return nil, &multiUploadError{
awsError: awserr.New(
"MultipartUpload",
"upload multipart failed",
err),
uploadID: u.uploadID,
}
}
return &UploadOutput{
Location: *complete.Location,
VersionID: complete.VersionId,
UploadID: u.uploadID,
}, nil
}
// readChunk runs in worker goroutines to pull chunks off of the ch channel
// and send() them as UploadPart requests.
func (u *multiuploader) readChunk(ch chan chunk) {
defer u.wg.Done()
for {
data, ok := <-ch
if !ok {
break
}
if u.geterr() == nil {
if err := u.send(data); err != nil {
u.seterr(err)
}
}
}
}
// send performs an UploadPart request and keeps track of the completed
// part information.
func (u *multiuploader) send(c chunk) error {
req, resp := u.ctx.S3.UploadPartRequest(&s3.UploadPartInput{
Bucket: u.in.Bucket,
Key: u.in.Key,
Body: c.buf,
UploadId: &u.uploadID,
PartNumber: &c.num,
})
req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager"))
if err := req.Send(); err != nil {
return err
}
n := c.num
completed := &s3.CompletedPart{ETag: resp.ETag, PartNumber: &n}
u.m.Lock()
u.parts = append(u.parts, completed)
u.m.Unlock()
return nil
}
// geterr is a thread-safe getter for the error object
func (u *multiuploader) geterr() error {
u.m.Lock()
defer u.m.Unlock()
return u.err
}
// seterr is a thread-safe setter for the error object
func (u *multiuploader) seterr(e error) {
u.m.Lock()
defer u.m.Unlock()
u.err = e
}
// fail will abort the multipart unless LeavePartsOnError is set to true.
func (u *multiuploader) fail() {
if u.ctx.LeavePartsOnError {
return
}
req, _ := u.ctx.S3.AbortMultipartUploadRequest(&s3.AbortMultipartUploadInput{
Bucket: u.in.Bucket,
Key: u.in.Key,
UploadId: &u.uploadID,
})
req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager"))
req.Send()
}
// complete successfully completes a multipart upload and returns the response.
func (u *multiuploader) complete() *s3.CompleteMultipartUploadOutput {
if u.geterr() != nil {
u.fail()
return nil
}
// Parts must be sorted in PartNumber order.
sort.Sort(u.parts)
req, resp := u.ctx.S3.CompleteMultipartUploadRequest(&s3.CompleteMultipartUploadInput{
Bucket: u.in.Bucket,
Key: u.in.Key,
UploadId: &u.uploadID,
MultipartUpload: &s3.CompletedMultipartUpload{Parts: u.parts},
})
req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("S3Manager"))
if err := req.Send(); err != nil {
u.seterr(err)
u.fail()
}
return resp
}
| 1 | 7,789 | Why was this moved? | aws-aws-sdk-go | go |
@@ -15,6 +15,10 @@ limitations under the License.
*/
package v1alpha1
+type CStorPoolExpansion interface{}
+
+type CStorVolumeReplicaExpansion interface{}
+
type StoragePoolExpansion interface{}
type StoragePoolClaimExpansion interface{} | 1 | /*
Copyright 2017 The OpenEBS Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
type StoragePoolExpansion interface{}
type StoragePoolClaimExpansion interface{}
type VolumePolicyExpansion interface{}
| 1 | 7,520 | What are these object with suffix Expansion? | openebs-maya | go |
@@ -831,6 +831,13 @@ public class BesuCommand implements DefaultCommandValues, Runnable {
arity = "1")
private final Wei txFeeCap = DEFAULT_RPC_TX_FEE_CAP;
+ @Option(
+ names = {"--rpc-allow-unprotected-txs"},
+ description =
+ "Allow for unprotected (non EIP155 signed) transactions to be submitted via RPC (default: ${DEFAULT-VALUE})",
+ arity = "1")
+ private final Boolean unprotectedTransactionsAllowed = false;
+
@Option(
names = {"--min-block-occupancy-ratio"},
description = "Minimum occupancy ratio for a mined block (default: ${DEFAULT-VALUE})", | 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.cli;
import static com.google.common.base.Preconditions.checkNotNull;
import static java.nio.charset.StandardCharsets.UTF_8;
import static java.util.Arrays.asList;
import static java.util.Collections.singletonList;
import static org.hyperledger.besu.cli.DefaultCommandValues.getDefaultBesuDataPath;
import static org.hyperledger.besu.cli.config.NetworkName.MAINNET;
import static org.hyperledger.besu.cli.util.CommandLineUtils.DEPENDENCY_WARNING_MSG;
import static org.hyperledger.besu.controller.BesuController.DATABASE_PATH;
import static org.hyperledger.besu.ethereum.api.graphql.GraphQLConfiguration.DEFAULT_GRAPHQL_HTTP_PORT;
import static org.hyperledger.besu.ethereum.api.jsonrpc.JsonRpcConfiguration.DEFAULT_JSON_RPC_PORT;
import static org.hyperledger.besu.ethereum.api.jsonrpc.RpcApis.DEFAULT_JSON_RPC_APIS;
import static org.hyperledger.besu.ethereum.api.jsonrpc.websocket.WebSocketConfiguration.DEFAULT_WEBSOCKET_PORT;
import static org.hyperledger.besu.ethereum.permissioning.GoQuorumPermissioningConfiguration.QIP714_DEFAULT_BLOCK;
import static org.hyperledger.besu.metrics.BesuMetricCategory.DEFAULT_METRIC_CATEGORIES;
import static org.hyperledger.besu.metrics.MetricsProtocol.PROMETHEUS;
import static org.hyperledger.besu.metrics.prometheus.MetricsConfiguration.DEFAULT_METRICS_PORT;
import static org.hyperledger.besu.metrics.prometheus.MetricsConfiguration.DEFAULT_METRICS_PUSH_PORT;
import static org.hyperledger.besu.nat.kubernetes.KubernetesNatManager.DEFAULT_BESU_SERVICE_NAME_FILTER;
import org.hyperledger.besu.BesuInfo;
import org.hyperledger.besu.Runner;
import org.hyperledger.besu.RunnerBuilder;
import org.hyperledger.besu.chainexport.RlpBlockExporter;
import org.hyperledger.besu.chainimport.JsonBlockImporter;
import org.hyperledger.besu.chainimport.RlpBlockImporter;
import org.hyperledger.besu.cli.config.EthNetworkConfig;
import org.hyperledger.besu.cli.config.NetworkName;
import org.hyperledger.besu.cli.converter.MetricCategoryConverter;
import org.hyperledger.besu.cli.converter.PercentageConverter;
import org.hyperledger.besu.cli.converter.RpcApisConverter;
import org.hyperledger.besu.cli.custom.CorsAllowedOriginsProperty;
import org.hyperledger.besu.cli.custom.JsonRPCAllowlistHostsProperty;
import org.hyperledger.besu.cli.custom.RpcAuthFileValidator;
import org.hyperledger.besu.cli.error.BesuExceptionHandler;
import org.hyperledger.besu.cli.options.unstable.DataStorageOptions;
import org.hyperledger.besu.cli.options.unstable.DnsOptions;
import org.hyperledger.besu.cli.options.unstable.EthProtocolOptions;
import org.hyperledger.besu.cli.options.unstable.EthstatsOptions;
import org.hyperledger.besu.cli.options.unstable.LauncherOptions;
import org.hyperledger.besu.cli.options.unstable.MetricsCLIOptions;
import org.hyperledger.besu.cli.options.unstable.MiningOptions;
import org.hyperledger.besu.cli.options.unstable.NatOptions;
import org.hyperledger.besu.cli.options.unstable.NativeLibraryOptions;
import org.hyperledger.besu.cli.options.unstable.NetworkingOptions;
import org.hyperledger.besu.cli.options.unstable.RPCOptions;
import org.hyperledger.besu.cli.options.unstable.SynchronizerOptions;
import org.hyperledger.besu.cli.options.unstable.TransactionPoolOptions;
import org.hyperledger.besu.cli.presynctasks.PreSynchronizationTaskRunner;
import org.hyperledger.besu.cli.presynctasks.PrivateDatabaseMigrationPreSyncTask;
import org.hyperledger.besu.cli.subcommands.PasswordSubCommand;
import org.hyperledger.besu.cli.subcommands.PublicKeySubCommand;
import org.hyperledger.besu.cli.subcommands.RetestethSubCommand;
import org.hyperledger.besu.cli.subcommands.blocks.BlocksSubCommand;
import org.hyperledger.besu.cli.subcommands.operator.OperatorSubCommand;
import org.hyperledger.besu.cli.subcommands.rlp.RLPSubCommand;
import org.hyperledger.besu.cli.util.BesuCommandCustomFactory;
import org.hyperledger.besu.cli.util.CommandLineUtils;
import org.hyperledger.besu.cli.util.ConfigOptionSearchAndRunHandler;
import org.hyperledger.besu.cli.util.VersionProvider;
import org.hyperledger.besu.config.GenesisConfigFile;
import org.hyperledger.besu.config.GenesisConfigOptions;
import org.hyperledger.besu.config.GoQuorumOptions;
import org.hyperledger.besu.config.experimental.ExperimentalEIPs;
import org.hyperledger.besu.controller.BesuController;
import org.hyperledger.besu.controller.BesuControllerBuilder;
import org.hyperledger.besu.controller.TargetingGasLimitCalculator;
import org.hyperledger.besu.crypto.KeyPair;
import org.hyperledger.besu.crypto.KeyPairSecurityModule;
import org.hyperledger.besu.crypto.KeyPairUtil;
import org.hyperledger.besu.crypto.NodeKey;
import org.hyperledger.besu.crypto.SignatureAlgorithmFactory;
import org.hyperledger.besu.enclave.EnclaveFactory;
import org.hyperledger.besu.enclave.GoQuorumEnclave;
import org.hyperledger.besu.ethereum.api.ApiConfiguration;
import org.hyperledger.besu.ethereum.api.ImmutableApiConfiguration;
import org.hyperledger.besu.ethereum.api.graphql.GraphQLConfiguration;
import org.hyperledger.besu.ethereum.api.jsonrpc.JsonRpcConfiguration;
import org.hyperledger.besu.ethereum.api.jsonrpc.RpcApi;
import org.hyperledger.besu.ethereum.api.jsonrpc.RpcApis;
import org.hyperledger.besu.ethereum.api.jsonrpc.websocket.WebSocketConfiguration;
import org.hyperledger.besu.ethereum.api.tls.FileBasedPasswordProvider;
import org.hyperledger.besu.ethereum.api.tls.TlsClientAuthConfiguration;
import org.hyperledger.besu.ethereum.api.tls.TlsConfiguration;
import org.hyperledger.besu.ethereum.blockcreation.GasLimitCalculator;
import org.hyperledger.besu.ethereum.chain.Blockchain;
import org.hyperledger.besu.ethereum.core.Address;
import org.hyperledger.besu.ethereum.core.GoQuorumPrivacyParameters;
import org.hyperledger.besu.ethereum.core.Hash;
import org.hyperledger.besu.ethereum.core.MiningParameters;
import org.hyperledger.besu.ethereum.core.PrivacyParameters;
import org.hyperledger.besu.ethereum.core.Wei;
import org.hyperledger.besu.ethereum.eth.sync.SyncMode;
import org.hyperledger.besu.ethereum.eth.sync.SynchronizerConfiguration;
import org.hyperledger.besu.ethereum.eth.transactions.TransactionPoolConfiguration;
import org.hyperledger.besu.ethereum.mainnet.precompiles.AbstractAltBnPrecompiledContract;
import org.hyperledger.besu.ethereum.p2p.config.DiscoveryConfiguration;
import org.hyperledger.besu.ethereum.p2p.peers.EnodeDnsConfiguration;
import org.hyperledger.besu.ethereum.p2p.peers.EnodeURL;
import org.hyperledger.besu.ethereum.p2p.peers.StaticNodesParser;
import org.hyperledger.besu.ethereum.permissioning.GoQuorumPermissioningConfiguration;
import org.hyperledger.besu.ethereum.permissioning.LocalPermissioningConfiguration;
import org.hyperledger.besu.ethereum.permissioning.PermissioningConfiguration;
import org.hyperledger.besu.ethereum.permissioning.PermissioningConfigurationBuilder;
import org.hyperledger.besu.ethereum.permissioning.SmartContractPermissioningConfiguration;
import org.hyperledger.besu.ethereum.privacy.storage.keyvalue.PrivacyKeyValueStorageProvider;
import org.hyperledger.besu.ethereum.privacy.storage.keyvalue.PrivacyKeyValueStorageProviderBuilder;
import org.hyperledger.besu.ethereum.storage.StorageProvider;
import org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueStorageProvider;
import org.hyperledger.besu.ethereum.storage.keyvalue.KeyValueStorageProviderBuilder;
import org.hyperledger.besu.ethereum.worldstate.DefaultWorldStateArchive;
import org.hyperledger.besu.ethereum.worldstate.PrunerConfiguration;
import org.hyperledger.besu.ethereum.worldstate.WorldStateArchive;
import org.hyperledger.besu.ethereum.worldstate.WorldStatePreimageStorage;
import org.hyperledger.besu.ethereum.worldstate.WorldStateStorage;
import org.hyperledger.besu.metrics.BesuMetricCategory;
import org.hyperledger.besu.metrics.MetricCategoryRegistryImpl;
import org.hyperledger.besu.metrics.MetricsProtocol;
import org.hyperledger.besu.metrics.MetricsSystemFactory;
import org.hyperledger.besu.metrics.ObservableMetricsSystem;
import org.hyperledger.besu.metrics.StandardMetricCategory;
import org.hyperledger.besu.metrics.prometheus.MetricsConfiguration;
import org.hyperledger.besu.metrics.vertx.VertxMetricsAdapterFactory;
import org.hyperledger.besu.nat.NatMethod;
import org.hyperledger.besu.plugin.services.BesuConfiguration;
import org.hyperledger.besu.plugin.services.BesuEvents;
import org.hyperledger.besu.plugin.services.MetricsSystem;
import org.hyperledger.besu.plugin.services.PicoCLIOptions;
import org.hyperledger.besu.plugin.services.SecurityModuleService;
import org.hyperledger.besu.plugin.services.StorageService;
import org.hyperledger.besu.plugin.services.exception.StorageException;
import org.hyperledger.besu.plugin.services.metrics.MetricCategory;
import org.hyperledger.besu.plugin.services.metrics.MetricCategoryRegistry;
import org.hyperledger.besu.plugin.services.securitymodule.SecurityModule;
import org.hyperledger.besu.plugin.services.storage.PrivacyKeyValueStorageFactory;
import org.hyperledger.besu.plugin.services.storage.rocksdb.RocksDBPlugin;
import org.hyperledger.besu.services.BesuEventsImpl;
import org.hyperledger.besu.services.BesuPluginContextImpl;
import org.hyperledger.besu.services.PicoCLIOptionsImpl;
import org.hyperledger.besu.services.SecurityModuleServiceImpl;
import org.hyperledger.besu.services.StorageServiceImpl;
import org.hyperledger.besu.services.kvstore.InMemoryStoragePlugin;
import org.hyperledger.besu.util.NetworkUtility;
import org.hyperledger.besu.util.PermissioningConfigurationValidator;
import org.hyperledger.besu.util.number.Fraction;
import org.hyperledger.besu.util.number.Percentage;
import org.hyperledger.besu.util.number.PositiveNumber;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.math.BigInteger;
import java.net.InetAddress;
import java.net.SocketException;
import java.net.URI;
import java.net.UnknownHostException;
import java.nio.file.Path;
import java.time.Clock;
import java.util.ArrayList;
import java.util.Base64;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.OptionalLong;
import java.util.Set;
import java.util.TreeMap;
import java.util.function.Function;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Strings;
import com.google.common.base.Suppliers;
import com.google.common.collect.ImmutableMap;
import com.google.common.io.Files;
import com.google.common.io.Resources;
import io.vertx.core.Vertx;
import io.vertx.core.VertxOptions;
import io.vertx.core.json.DecodeException;
import io.vertx.core.metrics.MetricsOptions;
import net.consensys.quorum.mainnet.launcher.LauncherManager;
import net.consensys.quorum.mainnet.launcher.config.ImmutableLauncherConfig;
import net.consensys.quorum.mainnet.launcher.exception.LauncherException;
import net.consensys.quorum.mainnet.launcher.util.ParseArgsHelper;
import org.apache.logging.log4j.Level;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.core.config.Configurator;
import org.apache.tuweni.bytes.Bytes;
import org.apache.tuweni.units.bigints.UInt256;
import picocli.CommandLine;
import picocli.CommandLine.AbstractParseResultHandler;
import picocli.CommandLine.Command;
import picocli.CommandLine.ExecutionException;
import picocli.CommandLine.Option;
import picocli.CommandLine.ParameterException;
@SuppressWarnings("FieldCanBeLocal") // because Picocli injected fields report false positives
@Command(
description = "This command runs the Besu Ethereum client full node.",
abbreviateSynopsis = true,
name = "besu",
mixinStandardHelpOptions = true,
versionProvider = VersionProvider.class,
header = "Usage:",
synopsisHeading = "%n",
descriptionHeading = "%nDescription:%n%n",
optionListHeading = "%nOptions:%n",
footerHeading = "%n",
footer = "Besu is licensed under the Apache License 2.0")
public class BesuCommand implements DefaultCommandValues, Runnable {
@SuppressWarnings("PrivateStaticFinalLoggers")
// non-static for testing
private final Logger logger;
private CommandLine commandLine;
private final Supplier<RlpBlockImporter> rlpBlockImporter;
private final Function<BesuController, JsonBlockImporter> jsonBlockImporterFactory;
private final Function<Blockchain, RlpBlockExporter> rlpBlockExporterFactory;
// Unstable CLI options
final NetworkingOptions unstableNetworkingOptions = NetworkingOptions.create();
final SynchronizerOptions unstableSynchronizerOptions = SynchronizerOptions.create();
final EthProtocolOptions unstableEthProtocolOptions = EthProtocolOptions.create();
final MetricsCLIOptions unstableMetricsCLIOptions = MetricsCLIOptions.create();
final TransactionPoolOptions unstableTransactionPoolOptions = TransactionPoolOptions.create();
private final EthstatsOptions unstableEthstatsOptions = EthstatsOptions.create();
private final DataStorageOptions unstableDataStorageOptions = DataStorageOptions.create();
private final DnsOptions unstableDnsOptions = DnsOptions.create();
private final MiningOptions unstableMiningOptions = MiningOptions.create();
private final NatOptions unstableNatOptions = NatOptions.create();
private final NativeLibraryOptions unstableNativeLibraryOptions = NativeLibraryOptions.create();
private final RPCOptions unstableRPCOptions = RPCOptions.create();
final LauncherOptions unstableLauncherOptions = LauncherOptions.create();
private final RunnerBuilder runnerBuilder;
private final BesuController.Builder controllerBuilderFactory;
private final BesuPluginContextImpl besuPluginContext;
private final StorageServiceImpl storageService;
private final SecurityModuleServiceImpl securityModuleService;
private final Map<String, String> environment;
private final MetricCategoryRegistryImpl metricCategoryRegistry =
new MetricCategoryRegistryImpl();
private final MetricCategoryConverter metricCategoryConverter = new MetricCategoryConverter();
// Public IP stored to prevent having to research it each time we need it.
private InetAddress autoDiscoveredDefaultIP = null;
private final PreSynchronizationTaskRunner preSynchronizationTaskRunner =
new PreSynchronizationTaskRunner();
private final Set<Integer> allocatedPorts = new HashSet<>();
// CLI options defined by user at runtime.
// Options parsing is done with CLI library Picocli https://picocli.info/
// While this variable is never read it is needed for the PicoCLI to create
// the config file option that is read elsewhere.
@SuppressWarnings("UnusedVariable")
@CommandLine.Option(
names = {CONFIG_FILE_OPTION_NAME},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description = "TOML config file (default: none)")
private final File configFile = null;
@CommandLine.Option(
names = {"--data-path"},
paramLabel = MANDATORY_PATH_FORMAT_HELP,
description = "The path to Besu data directory (default: ${DEFAULT-VALUE})")
final Path dataPath = getDefaultBesuDataPath(this);
// Genesis file path with null default option if the option
// is not defined on command line as this default is handled by Runner
// to use mainnet json file from resources as indicated in the
// default network option
// Then we have no control over genesis default value here.
@CommandLine.Option(
names = {"--genesis-file"},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description =
"Genesis file. Setting this option makes --network option ignored and requires --network-id to be set.")
private final File genesisFile = null;
@CommandLine.Option(
names = {"--node-private-key-file"},
paramLabel = MANDATORY_PATH_FORMAT_HELP,
description =
"The node's private key file (default: a file named \"key\" in the Besu data folder)")
private final File nodePrivateKeyFile = null;
@Option(
names = "--identity",
paramLabel = "<String>",
description = "Identification for this node in the Client ID",
arity = "1")
private final Optional<String> identityString = Optional.empty();
// Completely disables P2P within Besu.
@Option(
names = {"--p2p-enabled"},
description = "Enable P2P functionality (default: ${DEFAULT-VALUE})",
arity = "1")
private final Boolean p2pEnabled = true;
// Boolean option to indicate if peers should NOT be discovered, default to
// false indicates that
// the peers should be discovered by default.
//
// This negative option is required because of the nature of the option that is
// true when
// added on the command line. You can't do --option=false, so false is set as
// default
// and you have not to set the option at all if you want it false.
// This seems to be the only way it works with Picocli.
// Also many other software use the same negative option scheme for false
// defaults
// meaning that it's probably the right way to handle disabling options.
@Option(
names = {"--discovery-enabled"},
description = "Enable P2P discovery (default: ${DEFAULT-VALUE})",
arity = "1")
private final Boolean peerDiscoveryEnabled = true;
// A list of bootstrap nodes can be passed
// and a hardcoded list will be used otherwise by the Runner.
// NOTE: we have no control over default value here.
@Option(
names = {"--bootnodes"},
paramLabel = "<enode://id@host:port>",
description =
"Comma separated enode URLs for P2P discovery bootstrap. "
+ "Default is a predefined list.",
split = ",",
arity = "0..*")
private final List<String> bootNodes = null;
@Option(
names = {"--max-peers"},
paramLabel = MANDATORY_INTEGER_FORMAT_HELP,
description = "Maximum P2P connections that can be established (default: ${DEFAULT-VALUE})")
private final Integer maxPeers = DEFAULT_MAX_PEERS;
@Option(
names = {"--remote-connections-limit-enabled"},
description =
"Whether to limit the number of P2P connections initiated remotely. (default: ${DEFAULT-VALUE})")
private final Boolean isLimitRemoteWireConnectionsEnabled = true;
@Option(
names = {"--remote-connections-max-percentage"},
paramLabel = MANDATORY_DOUBLE_FORMAT_HELP,
description =
"The maximum percentage of P2P connections that can be initiated remotely. Must be between 0 and 100 inclusive. (default: ${DEFAULT-VALUE})",
arity = "1",
converter = PercentageConverter.class)
private final Integer maxRemoteConnectionsPercentage =
Fraction.fromFloat(DEFAULT_FRACTION_REMOTE_WIRE_CONNECTIONS_ALLOWED)
.toPercentage()
.getValue();
@Option(
names = {"--random-peer-priority-enabled"},
description =
"Allow for incoming connections to be prioritized randomly. This will prevent (typically small, stable) networks from forming impenetrable peer cliques. (default: ${DEFAULT-VALUE})")
private final Boolean randomPeerPriority = false;
@Option(
names = {"--banned-node-ids", "--banned-node-id"},
paramLabel = MANDATORY_NODE_ID_FORMAT_HELP,
description = "A list of node IDs to ban from the P2P network.",
split = ",",
arity = "1..*")
void setBannedNodeIds(final List<String> values) {
try {
bannedNodeIds =
values.stream()
.filter(value -> !value.isEmpty())
.map(EnodeURL::parseNodeId)
.collect(Collectors.toList());
} catch (final IllegalArgumentException e) {
throw new ParameterException(
commandLine, "Invalid ids supplied to '--banned-node-ids'. " + e.getMessage());
}
}
private Collection<Bytes> bannedNodeIds = new ArrayList<>();
@Option(
names = {"--sync-mode"},
paramLabel = MANDATORY_MODE_FORMAT_HELP,
description =
"Synchronization mode, possible values are ${COMPLETION-CANDIDATES} (default: FAST if a --network is supplied and privacy isn't enabled. FULL otherwise.)")
private SyncMode syncMode = null;
@Option(
names = {"--fast-sync-min-peers"},
paramLabel = MANDATORY_INTEGER_FORMAT_HELP,
description =
"Minimum number of peers required before starting fast sync. (default: ${DEFAULT-VALUE})")
private final Integer fastSyncMinPeerCount = FAST_SYNC_MIN_PEER_COUNT;
@Option(
names = {"--network"},
paramLabel = MANDATORY_NETWORK_FORMAT_HELP,
description =
"Synchronize against the indicated network, possible values are ${COMPLETION-CANDIDATES}."
+ " (default: MAINNET)")
private final NetworkName network = null;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--p2p-host"},
paramLabel = MANDATORY_HOST_FORMAT_HELP,
description = "Ip address this node advertises to its peers (default: ${DEFAULT-VALUE})",
arity = "1")
private String p2pHost = autoDiscoverDefaultIP().getHostAddress();
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--p2p-interface"},
paramLabel = MANDATORY_HOST_FORMAT_HELP,
description =
"The network interface address on which this node listens for P2P communication (default: ${DEFAULT-VALUE})",
arity = "1")
private String p2pInterface = NetworkUtility.INADDR_ANY;
@Option(
names = {"--p2p-port"},
paramLabel = MANDATORY_PORT_FORMAT_HELP,
description = "Port on which to listen for P2P communication (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer p2pPort = EnodeURL.DEFAULT_LISTENING_PORT;
@Option(
names = {"--nat-method"},
description =
"Specify the NAT circumvention method to be used, possible values are ${COMPLETION-CANDIDATES}."
+ " NONE disables NAT functionality. (default: ${DEFAULT-VALUE})")
private final NatMethod natMethod = DEFAULT_NAT_METHOD;
@Option(
names = {"--network-id"},
paramLabel = "<BIG INTEGER>",
description =
"P2P network identifier. (default: the selected network chain ID or custom genesis chain ID)",
arity = "1")
private final BigInteger networkId = null;
@Option(
names = {"--graphql-http-enabled"},
description = "Set to start the GraphQL HTTP service (default: ${DEFAULT-VALUE})")
private final Boolean isGraphQLHttpEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--graphql-http-host"},
paramLabel = MANDATORY_HOST_FORMAT_HELP,
description = "Host for GraphQL HTTP to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private String graphQLHttpHost = autoDiscoverDefaultIP().getHostAddress();
@Option(
names = {"--graphql-http-port"},
paramLabel = MANDATORY_PORT_FORMAT_HELP,
description = "Port for GraphQL HTTP to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer graphQLHttpPort = DEFAULT_GRAPHQL_HTTP_PORT;
@Option(
names = {"--graphql-http-cors-origins"},
description = "Comma separated origin domain URLs for CORS validation (default: none)")
private final CorsAllowedOriginsProperty graphQLHttpCorsAllowedOrigins =
new CorsAllowedOriginsProperty();
@Option(
names = {"--rpc-http-enabled"},
description = "Set to start the JSON-RPC HTTP service (default: ${DEFAULT-VALUE})")
private final Boolean isRpcHttpEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--rpc-http-host"},
paramLabel = MANDATORY_HOST_FORMAT_HELP,
description = "Host for JSON-RPC HTTP to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private String rpcHttpHost = autoDiscoverDefaultIP().getHostAddress();
@Option(
names = {"--rpc-http-port"},
paramLabel = MANDATORY_PORT_FORMAT_HELP,
description = "Port for JSON-RPC HTTP to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer rpcHttpPort = DEFAULT_JSON_RPC_PORT;
@Option(
names = {"--rpc-http-max-active-connections"},
description =
"Maximum number of HTTP connections allowed for JSON-RPC (default: ${DEFAULT-VALUE}). Once this limit is reached, incoming connections will be rejected.",
arity = "1")
private final Integer rpcHttpMaxConnections = DEFAULT_HTTP_MAX_CONNECTIONS;
// A list of origins URLs that are accepted by the JsonRpcHttpServer (CORS)
@Option(
names = {"--rpc-http-cors-origins"},
description = "Comma separated origin domain URLs for CORS validation (default: none)")
private final CorsAllowedOriginsProperty rpcHttpCorsAllowedOrigins =
new CorsAllowedOriginsProperty();
@Option(
names = {"--rpc-http-api", "--rpc-http-apis"},
paramLabel = "<api name>",
split = ",",
arity = "1..*",
converter = RpcApisConverter.class,
description =
"Comma separated list of APIs to enable on JSON-RPC HTTP service (default: ${DEFAULT-VALUE})")
private final Collection<RpcApi> rpcHttpApis = DEFAULT_JSON_RPC_APIS;
@Option(
names = {"--rpc-http-authentication-enabled"},
description =
"Require authentication for the JSON-RPC HTTP service (default: ${DEFAULT-VALUE})")
private final Boolean isRpcHttpAuthenticationEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@CommandLine.Option(
names = {"--rpc-http-authentication-credentials-file"},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description =
"Storage file for JSON-RPC HTTP authentication credentials (default: ${DEFAULT-VALUE})",
arity = "1")
private String rpcHttpAuthenticationCredentialsFile = null;
@CommandLine.Option(
names = {"--rpc-http-authentication-jwt-public-key-file"},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description = "JWT public key file for JSON-RPC HTTP authentication",
arity = "1")
private final File rpcHttpAuthenticationPublicKeyFile = null;
@Option(
names = {"--rpc-http-tls-enabled"},
description = "Enable TLS for the JSON-RPC HTTP service (default: ${DEFAULT-VALUE})")
private final Boolean isRpcHttpTlsEnabled = false;
@Option(
names = {"--rpc-http-tls-keystore-file"},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description =
"Keystore (PKCS#12) containing key/certificate for the JSON-RPC HTTP service. Required if TLS is enabled.")
private final Path rpcHttpTlsKeyStoreFile = null;
@Option(
names = {"--rpc-http-tls-keystore-password-file"},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description =
"File containing password to unlock keystore for the JSON-RPC HTTP service. Required if TLS is enabled.")
private final Path rpcHttpTlsKeyStorePasswordFile = null;
@Option(
names = {"--rpc-http-tls-client-auth-enabled"},
description =
"Enable TLS client authentication for the JSON-RPC HTTP service (default: ${DEFAULT-VALUE})")
private final Boolean isRpcHttpTlsClientAuthEnabled = false;
@Option(
names = {"--rpc-http-tls-known-clients-file"},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description =
"Path to file containing clients certificate common name and fingerprint for client authentication")
private final Path rpcHttpTlsKnownClientsFile = null;
@Option(
names = {"--rpc-http-tls-ca-clients-enabled"},
description =
"Enable to accept clients certificate signed by a valid CA for client authentication (default: ${DEFAULT-VALUE})")
private final Boolean isRpcHttpTlsCAClientsEnabled = false;
@Option(
names = {"--rpc-ws-enabled"},
description = "Set to start the JSON-RPC WebSocket service (default: ${DEFAULT-VALUE})")
private final Boolean isRpcWsEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--rpc-ws-host"},
paramLabel = MANDATORY_HOST_FORMAT_HELP,
description = "Host for JSON-RPC WebSocket service to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private String rpcWsHost = autoDiscoverDefaultIP().getHostAddress();
@Option(
names = {"--rpc-ws-port"},
paramLabel = MANDATORY_PORT_FORMAT_HELP,
description = "Port for JSON-RPC WebSocket service to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer rpcWsPort = DEFAULT_WEBSOCKET_PORT;
@Option(
names = {"--rpc-ws-max-active-connections"},
description =
"Maximum number of WebSocket connections allowed for JSON-RPC (default: ${DEFAULT-VALUE}). Once this limit is reached, incoming connections will be rejected.",
arity = "1")
private final Integer rpcWsMaxConnections = DEFAULT_WS_MAX_CONNECTIONS;
@Option(
names = {"--rpc-ws-api", "--rpc-ws-apis"},
paramLabel = "<api name>",
split = ",",
arity = "1..*",
converter = RpcApisConverter.class,
description =
"Comma separated list of APIs to enable on JSON-RPC WebSocket service (default: ${DEFAULT-VALUE})")
private final List<RpcApi> rpcWsApis = DEFAULT_JSON_RPC_APIS;
@Option(
names = {"--rpc-ws-authentication-enabled"},
description =
"Require authentication for the JSON-RPC WebSocket service (default: ${DEFAULT-VALUE})")
private final Boolean isRpcWsAuthenticationEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@CommandLine.Option(
names = {"--rpc-ws-authentication-credentials-file"},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description =
"Storage file for JSON-RPC WebSocket authentication credentials (default: ${DEFAULT-VALUE})",
arity = "1")
private String rpcWsAuthenticationCredentialsFile = null;
@CommandLine.Option(
names = {"--rpc-ws-authentication-jwt-public-key-file"},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description = "JWT public key file for JSON-RPC WebSocket authentication",
arity = "1")
private final File rpcWsAuthenticationPublicKeyFile = null;
@Option(
names = {"--privacy-tls-enabled"},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description = "Enable TLS for connecting to privacy enclave (default: ${DEFAULT-VALUE})")
private final Boolean isPrivacyTlsEnabled = false;
@Option(
names = "--privacy-tls-keystore-file",
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description =
"Path to a PKCS#12 formatted keystore; used to enable TLS on inbound connections.")
private final Path privacyKeyStoreFile = null;
@Option(
names = "--privacy-tls-keystore-password-file",
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description = "Path to a file containing the password used to decrypt the keystore.")
private final Path privacyKeyStorePasswordFile = null;
@Option(
names = "--privacy-tls-known-enclave-file",
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description = "Path to a file containing the fingerprints of the authorized privacy enclave.")
private final Path privacyTlsKnownEnclaveFile = null;
@Option(
names = {"--metrics-enabled"},
description = "Set to start the metrics exporter (default: ${DEFAULT-VALUE})")
private final Boolean isMetricsEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--metrics-protocol"},
description =
"Metrics protocol, one of PROMETHEUS, OPENTELEMETRY or NONE. (default: ${DEFAULT-VALUE})")
private MetricsProtocol metricsProtocol = PROMETHEUS;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--metrics-host"},
paramLabel = MANDATORY_HOST_FORMAT_HELP,
description = "Host for the metrics exporter to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private String metricsHost = autoDiscoverDefaultIP().getHostAddress();
@Option(
names = {"--metrics-port"},
paramLabel = MANDATORY_PORT_FORMAT_HELP,
description = "Port for the metrics exporter to listen on (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer metricsPort = DEFAULT_METRICS_PORT;
@Option(
names = {"--metrics-category", "--metrics-categories"},
paramLabel = "<category name>",
split = ",",
arity = "1..*",
description =
"Comma separated list of categories to track metrics for (default: ${DEFAULT-VALUE})")
private final Set<MetricCategory> metricCategories = DEFAULT_METRIC_CATEGORIES;
@Option(
names = {"--metrics-push-enabled"},
description = "Enable the metrics push gateway integration (default: ${DEFAULT-VALUE})")
private final Boolean isMetricsPushEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--metrics-push-host"},
paramLabel = MANDATORY_HOST_FORMAT_HELP,
description = "Host of the Prometheus Push Gateway for push mode (default: ${DEFAULT-VALUE})",
arity = "1")
private String metricsPushHost = autoDiscoverDefaultIP().getHostAddress();
@Option(
names = {"--metrics-push-port"},
paramLabel = MANDATORY_PORT_FORMAT_HELP,
description = "Port of the Prometheus Push Gateway for push mode (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer metricsPushPort = DEFAULT_METRICS_PUSH_PORT;
@Option(
names = {"--metrics-push-interval"},
paramLabel = MANDATORY_INTEGER_FORMAT_HELP,
description =
"Interval in seconds to push metrics when in push mode (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer metricsPushInterval = 15;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--metrics-push-prometheus-job"},
description = "Job name to use when in push mode (default: ${DEFAULT-VALUE})",
arity = "1")
private String metricsPrometheusJob = "besu-client";
@Option(
names = {"--host-allowlist"},
paramLabel = "<hostname>[,<hostname>...]... or * or all",
description =
"Comma separated list of hostnames to allow for RPC access, or * to accept any host (default: ${DEFAULT-VALUE})",
defaultValue = "localhost,127.0.0.1")
private final JsonRPCAllowlistHostsProperty hostsAllowlist = new JsonRPCAllowlistHostsProperty();
@Option(
names = {"--host-whitelist"},
hidden = true,
paramLabel = "<hostname>[,<hostname>...]... or * or all",
description =
"Deprecated in favor of --host-allowlist. Comma separated list of hostnames to allow for RPC access, or * to accept any host (default: ${DEFAULT-VALUE})")
private final JsonRPCAllowlistHostsProperty hostsWhitelist = new JsonRPCAllowlistHostsProperty();
@Option(
names = {"--logging", "-l"},
paramLabel = "<LOG VERBOSITY LEVEL>",
description = "Logging verbosity levels: OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE, ALL")
private final Level logLevel = null;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"})
@Option(
names = {"--color-enabled"},
description =
"Force color output to be enabled/disabled (default: colorized only if printing to console)")
private static Boolean colorEnabled = null;
@Option(
names = {"--reorg-logging-threshold"},
description =
"How deep a chain reorganization must be in order for it to be logged (default: ${DEFAULT-VALUE})")
private final Long reorgLoggingThreshold = 6L;
@Option(
names = {"--miner-enabled"},
description = "Set if node will perform mining (default: ${DEFAULT-VALUE})")
private final Boolean isMiningEnabled = false;
@Option(
names = {"--miner-stratum-enabled"},
description = "Set if node will perform Stratum mining (default: ${DEFAULT-VALUE})")
private final Boolean iStratumMiningEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--miner-stratum-host"},
description = "Host for Stratum network mining service (default: ${DEFAULT-VALUE})")
private String stratumNetworkInterface = "0.0.0.0";
@Option(
names = {"--miner-stratum-port"},
description = "Stratum port binding (default: ${DEFAULT-VALUE})")
private final Integer stratumPort = 8008;
@Option(
names = {"--miner-coinbase"},
description =
"Account to which mining rewards are paid. You must specify a valid coinbase if "
+ "mining is enabled using --miner-enabled option",
arity = "1")
private final Address coinbase = null;
@Option(
names = {"--min-gas-price"},
description =
"Minimum price (in Wei) offered by a transaction for it to be included in a mined "
+ "block (default: ${DEFAULT-VALUE})",
arity = "1")
private final Wei minTransactionGasPrice = DEFAULT_MIN_TRANSACTION_GAS_PRICE;
@Option(
names = {"--rpc-tx-feecap"},
description =
"Maximum transaction fees (in Wei) accepted for transaction submitted through RPC (default: ${DEFAULT-VALUE})",
arity = "1")
private final Wei txFeeCap = DEFAULT_RPC_TX_FEE_CAP;
@Option(
names = {"--min-block-occupancy-ratio"},
description = "Minimum occupancy ratio for a mined block (default: ${DEFAULT-VALUE})",
arity = "1")
private final Double minBlockOccupancyRatio = DEFAULT_MIN_BLOCK_OCCUPANCY_RATIO;
@Option(
names = {"--miner-extra-data"},
description =
"A hex string representing the (32) bytes to be included in the extra data "
+ "field of a mined block (default: ${DEFAULT-VALUE})",
arity = "1")
private final Bytes extraData = DEFAULT_EXTRA_DATA;
@Option(
names = {"--pruning-enabled"},
description =
"Enable disk-space saving optimization that removes old state that is unlikely to be required (default: ${DEFAULT-VALUE})")
private final Boolean pruningEnabled = false;
@Option(
names = {"--permissions-nodes-config-file-enabled"},
description = "Enable node level permissions (default: ${DEFAULT-VALUE})")
private final Boolean permissionsNodesEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@CommandLine.Option(
names = {"--permissions-nodes-config-file"},
description =
"Node permissioning config TOML file (default: a file named \"permissions_config.toml\" in the Besu data folder)")
private String nodePermissionsConfigFile = null;
@Option(
names = {"--permissions-accounts-config-file-enabled"},
description = "Enable account level permissions (default: ${DEFAULT-VALUE})")
private final Boolean permissionsAccountsEnabled = false;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@CommandLine.Option(
names = {"--permissions-accounts-config-file"},
description =
"Account permissioning config TOML file (default: a file named \"permissions_config.toml\" in the Besu data folder)")
private String accountPermissionsConfigFile = null;
@Option(
names = {"--permissions-nodes-contract-address"},
description = "Address of the node permissioning smart contract",
arity = "1")
private final Address permissionsNodesContractAddress = null;
@Option(
names = {"--permissions-nodes-contract-version"},
description = "Version of the EEA Node Permissioning interface (default: ${DEFAULT-VALUE})")
private final Integer permissionsNodesContractVersion = 1;
@Option(
names = {"--permissions-nodes-contract-enabled"},
description = "Enable node level permissions via smart contract (default: ${DEFAULT-VALUE})")
private final Boolean permissionsNodesContractEnabled = false;
@Option(
names = {"--permissions-accounts-contract-address"},
description = "Address of the account permissioning smart contract",
arity = "1")
private final Address permissionsAccountsContractAddress = null;
@Option(
names = {"--permissions-accounts-contract-enabled"},
description =
"Enable account level permissions via smart contract (default: ${DEFAULT-VALUE})")
private final Boolean permissionsAccountsContractEnabled = false;
@Option(
names = {"--privacy-enabled"},
description = "Enable private transactions (default: ${DEFAULT-VALUE})")
private final Boolean isPrivacyEnabled = false;
@Option(
names = {"--privacy-multi-tenancy-enabled"},
description = "Enable multi-tenant private transactions (default: ${DEFAULT-VALUE})")
private final Boolean isPrivacyMultiTenancyEnabled = false;
@Option(
names = {"--revert-reason-enabled"},
description =
"Enable passing the revert reason back through TransactionReceipts (default: ${DEFAULT-VALUE})")
private final Boolean isRevertReasonEnabled = false;
@Option(
names = {"--required-blocks", "--required-block"},
paramLabel = "BLOCK=HASH",
description = "Block number and hash peers are required to have.",
arity = "*",
split = ",")
private final Map<Long, Hash> requiredBlocks = new HashMap<>();
@Option(
names = {"--privacy-url"},
description = "The URL on which the enclave is running")
private final URI privacyUrl = PrivacyParameters.DEFAULT_ENCLAVE_URL;
@Option(
names = {"--privacy-public-key-file"},
description = "The enclave's public key file")
private final File privacyPublicKeyFile = null;
@Option(
names = {"--privacy-precompiled-address"},
description =
"The address to which the privacy pre-compiled contract will be mapped (default: ${DEFAULT-VALUE})",
hidden = true)
private final Integer privacyPrecompiledAddress = Address.PRIVACY;
@Option(
names = {"--privacy-marker-transaction-signing-key-file"},
description =
"The name of a file containing the private key used to sign privacy marker transactions. If unset, each will be signed with a random key.")
private final Path privacyMarkerTransactionSigningKeyPath = null;
@Option(
names = {"--privacy-enable-database-migration"},
description = "Enable private database metadata migration (default: ${DEFAULT-VALUE})")
private final Boolean migratePrivateDatabase = false;
@Option(
names = {"--privacy-flexible-groups-enabled", "--privacy-onchain-groups-enabled"},
description = "Enable flexible (onchain) privacy groups (default: ${DEFAULT-VALUE})")
private final Boolean isFlexiblePrivacyGroupsEnabled = false;
@Option(
names = {"--target-gas-limit"},
description =
"Sets target gas limit per block. If set each block's gas limit will approach this setting over time if the current gas limit is different.")
private final Long targetGasLimit = null;
@Option(
names = {"--tx-pool-max-size"},
paramLabel = MANDATORY_INTEGER_FORMAT_HELP,
description =
"Maximum number of pending transactions that will be kept in the transaction pool (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer txPoolMaxSize = TransactionPoolConfiguration.MAX_PENDING_TRANSACTIONS;
@Option(
names = {"--tx-pool-hashes-max-size"},
paramLabel = MANDATORY_INTEGER_FORMAT_HELP,
description =
"Maximum number of pending transaction hashes that will be kept in the transaction pool (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer pooledTransactionHashesSize =
TransactionPoolConfiguration.MAX_PENDING_TRANSACTIONS_HASHES;
@Option(
names = {"--tx-pool-retention-hours"},
paramLabel = MANDATORY_INTEGER_FORMAT_HELP,
description =
"Maximum retention period of pending transactions in hours (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer pendingTxRetentionPeriod =
TransactionPoolConfiguration.DEFAULT_TX_RETENTION_HOURS;
@Option(
names = {"--tx-pool-price-bump"},
paramLabel = MANDATORY_INTEGER_FORMAT_HELP,
converter = PercentageConverter.class,
description =
"Price bump percentage to replace an already existing transaction (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer priceBump = TransactionPoolConfiguration.DEFAULT_PRICE_BUMP.getValue();
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"}) // PicoCLI requires non-final Strings.
@Option(
names = {"--key-value-storage"},
description = "Identity for the key-value storage to be used.",
arity = "1")
private String keyValueStorageName = DEFAULT_KEY_VALUE_STORAGE_NAME;
@SuppressWarnings({"FieldCanBeFinal", "FieldMayBeFinal"})
@Option(
names = {"--security-module"},
paramLabel = "<NAME>",
description = "Identity for the Security Module to be used.",
arity = "1")
private String securityModuleName = DEFAULT_SECURITY_MODULE;
@Option(
names = {"--auto-log-bloom-caching-enabled"},
description = "Enable automatic log bloom caching (default: ${DEFAULT-VALUE})",
arity = "1")
private final Boolean autoLogBloomCachingEnabled = true;
@Option(
names = {"--override-genesis-config"},
paramLabel = "NAME=VALUE",
description = "Overrides configuration values in the genesis file. Use with care.",
arity = "*",
hidden = true,
split = ",")
private final Map<String, String> genesisConfigOverrides =
new TreeMap<>(String.CASE_INSENSITIVE_ORDER);
@Option(
names = {"--pruning-blocks-retained"},
defaultValue = "1024",
paramLabel = "<INTEGER>",
description =
"Minimum number of recent blocks for which to keep entire world state (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer pruningBlocksRetained = PrunerConfiguration.DEFAULT_PRUNING_BLOCKS_RETAINED;
@Option(
names = {"--pruning-block-confirmations"},
defaultValue = "10",
paramLabel = "<INTEGER>",
description =
"Minimum number of confirmations on a block before marking begins (default: ${DEFAULT-VALUE})",
arity = "1")
private final Integer pruningBlockConfirmations =
PrunerConfiguration.DEFAULT_PRUNING_BLOCK_CONFIRMATIONS;
@CommandLine.Option(
names = {"--pid-path"},
paramLabel = MANDATORY_PATH_FORMAT_HELP,
description = "Path to PID file (optional)")
private final Path pidPath = null;
@CommandLine.Option(
names = {"--api-gas-price-blocks"},
paramLabel = MANDATORY_PATH_FORMAT_HELP,
description = "Number of blocks to consider for eth_gasPrice (default: ${DEFAULT-VALUE})")
private final Long apiGasPriceBlocks = 100L;
@CommandLine.Option(
names = {"--api-gas-price-percentile"},
paramLabel = MANDATORY_PATH_FORMAT_HELP,
description = "Percentile value to measure for eth_gasPrice (default: ${DEFAULT-VALUE})")
private final Double apiGasPricePercentile = 50.0;
@CommandLine.Option(
names = {"--api-gas-price-max"},
paramLabel = MANDATORY_PATH_FORMAT_HELP,
description = "Maximum gas price for eth_gasPrice (default: ${DEFAULT-VALUE})")
private final Long apiGasPriceMax = 500_000_000_000L;
@Option(
names = {"--goquorum-compatibility-enabled"},
hidden = true,
description = "Start Besu in GoQuorum compatibility mode (default: ${DEFAULT-VALUE})")
private final Boolean isGoQuorumCompatibilityMode = false;
@CommandLine.Option(
names = {"--static-nodes-file"},
paramLabel = MANDATORY_FILE_FORMAT_HELP,
description =
"Specifies the static node file containing the static nodes for this node to connect to")
private final Path staticNodesFile = null;
private EthNetworkConfig ethNetworkConfig;
private JsonRpcConfiguration jsonRpcConfiguration;
private GraphQLConfiguration graphQLConfiguration;
private WebSocketConfiguration webSocketConfiguration;
private ApiConfiguration apiConfiguration;
private MetricsConfiguration metricsConfiguration;
private Optional<PermissioningConfiguration> permissioningConfiguration;
private Collection<EnodeURL> staticNodes;
private BesuController besuController;
private BesuConfiguration pluginCommonConfiguration;
private final Supplier<ObservableMetricsSystem> metricsSystem =
Suppliers.memoize(() -> MetricsSystemFactory.create(metricsConfiguration()));
private Vertx vertx;
private EnodeDnsConfiguration enodeDnsConfiguration;
private KeyValueStorageProvider keyValueStorageProvider;
public BesuCommand(
final Logger logger,
final Supplier<RlpBlockImporter> rlpBlockImporter,
final Function<BesuController, JsonBlockImporter> jsonBlockImporterFactory,
final Function<Blockchain, RlpBlockExporter> rlpBlockExporterFactory,
final RunnerBuilder runnerBuilder,
final BesuController.Builder controllerBuilderFactory,
final BesuPluginContextImpl besuPluginContext,
final Map<String, String> environment) {
this(
logger,
rlpBlockImporter,
jsonBlockImporterFactory,
rlpBlockExporterFactory,
runnerBuilder,
controllerBuilderFactory,
besuPluginContext,
environment,
new StorageServiceImpl(),
new SecurityModuleServiceImpl());
}
@VisibleForTesting
protected BesuCommand(
final Logger logger,
final Supplier<RlpBlockImporter> rlpBlockImporter,
final Function<BesuController, JsonBlockImporter> jsonBlockImporterFactory,
final Function<Blockchain, RlpBlockExporter> rlpBlockExporterFactory,
final RunnerBuilder runnerBuilder,
final BesuController.Builder controllerBuilderFactory,
final BesuPluginContextImpl besuPluginContext,
final Map<String, String> environment,
final StorageServiceImpl storageService,
final SecurityModuleServiceImpl securityModuleService) {
this.logger = logger;
this.rlpBlockImporter = rlpBlockImporter;
this.rlpBlockExporterFactory = rlpBlockExporterFactory;
this.jsonBlockImporterFactory = jsonBlockImporterFactory;
this.runnerBuilder = runnerBuilder;
this.controllerBuilderFactory = controllerBuilderFactory;
this.besuPluginContext = besuPluginContext;
this.environment = environment;
this.storageService = storageService;
this.securityModuleService = securityModuleService;
pluginCommonConfiguration = new BesuCommandConfigurationService();
besuPluginContext.addService(BesuConfiguration.class, pluginCommonConfiguration);
}
public void parse(
final AbstractParseResultHandler<List<Object>> resultHandler,
final BesuExceptionHandler exceptionHandler,
final InputStream in,
final String... args) {
commandLine =
new CommandLine(this, new BesuCommandCustomFactory(besuPluginContext))
.setCaseInsensitiveEnumValuesAllowed(true);
enableExperimentalEIPs();
addSubCommands(resultHandler, in);
registerConverters();
handleUnstableOptions();
preparePlugins();
parse(resultHandler, exceptionHandler, args);
}
@Override
public void run() {
try {
configureLogging(true);
configureNativeLibs();
logger.info("Starting Besu version: {}", BesuInfo.nodeName(identityString));
// Need to create vertx after cmdline has been parsed, such that metricsSystem is configurable
vertx = createVertx(createVertxOptions(metricsSystem.get()));
final BesuCommand controller = validateOptions().configure().controller();
preSynchronizationTaskRunner.runTasks(controller.besuController);
controller.startPlugins().startSynchronization();
} catch (final Exception e) {
throw new ParameterException(this.commandLine, e.getMessage(), e);
}
}
@VisibleForTesting
void setBesuConfiguration(final BesuConfiguration pluginCommonConfiguration) {
this.pluginCommonConfiguration = pluginCommonConfiguration;
}
private void enableExperimentalEIPs() {
// Usage of static command line flags is strictly reserved for experimental EIPs
commandLine.addMixin("experimentalEIPs", ExperimentalEIPs.class);
}
private void addSubCommands(
final AbstractParseResultHandler<List<Object>> resultHandler, final InputStream in) {
commandLine.addSubcommand(
BlocksSubCommand.COMMAND_NAME,
new BlocksSubCommand(
rlpBlockImporter,
jsonBlockImporterFactory,
rlpBlockExporterFactory,
resultHandler.out()));
commandLine.addSubcommand(
PublicKeySubCommand.COMMAND_NAME,
new PublicKeySubCommand(resultHandler.out(), this::buildNodeKey));
commandLine.addSubcommand(
PasswordSubCommand.COMMAND_NAME, new PasswordSubCommand(resultHandler.out()));
commandLine.addSubcommand(RetestethSubCommand.COMMAND_NAME, new RetestethSubCommand());
commandLine.addSubcommand(
RLPSubCommand.COMMAND_NAME, new RLPSubCommand(resultHandler.out(), in));
commandLine.addSubcommand(
OperatorSubCommand.COMMAND_NAME, new OperatorSubCommand(resultHandler.out()));
}
private void registerConverters() {
commandLine.registerConverter(Address.class, Address::fromHexStringStrict);
commandLine.registerConverter(Bytes.class, Bytes::fromHexString);
commandLine.registerConverter(Level.class, Level::valueOf);
commandLine.registerConverter(SyncMode.class, SyncMode::fromString);
commandLine.registerConverter(MetricsProtocol.class, MetricsProtocol::fromString);
commandLine.registerConverter(UInt256.class, (arg) -> UInt256.valueOf(new BigInteger(arg)));
commandLine.registerConverter(Wei.class, (arg) -> Wei.of(Long.parseUnsignedLong(arg)));
commandLine.registerConverter(PositiveNumber.class, PositiveNumber::fromString);
commandLine.registerConverter(Hash.class, Hash::fromHexString);
commandLine.registerConverter(Optional.class, Optional::of);
commandLine.registerConverter(Double.class, Double::parseDouble);
metricCategoryConverter.addCategories(BesuMetricCategory.class);
metricCategoryConverter.addCategories(StandardMetricCategory.class);
commandLine.registerConverter(MetricCategory.class, metricCategoryConverter);
}
private void handleUnstableOptions() {
// Add unstable options
final ImmutableMap.Builder<String, Object> unstableOptionsBuild = ImmutableMap.builder();
final ImmutableMap<String, Object> unstableOptions =
unstableOptionsBuild
.put("Ethereum Wire Protocol", unstableEthProtocolOptions)
.put("Metrics", unstableMetricsCLIOptions)
.put("P2P Network", unstableNetworkingOptions)
.put("RPC", unstableRPCOptions)
.put("DNS Configuration", unstableDnsOptions)
.put("NAT Configuration", unstableNatOptions)
.put("Synchronizer", unstableSynchronizerOptions)
.put("TransactionPool", unstableTransactionPoolOptions)
.put("Ethstats", unstableEthstatsOptions)
.put("Mining", unstableMiningOptions)
.put("Native Library", unstableNativeLibraryOptions)
.put("Data Storage Options", unstableDataStorageOptions)
.put("Launcher", unstableLauncherOptions)
.build();
UnstableOptionsSubCommand.createUnstableOptions(commandLine, unstableOptions);
}
private void preparePlugins() {
besuPluginContext.addService(PicoCLIOptions.class, new PicoCLIOptionsImpl(commandLine));
besuPluginContext.addService(SecurityModuleService.class, securityModuleService);
besuPluginContext.addService(StorageService.class, storageService);
besuPluginContext.addService(MetricCategoryRegistry.class, metricCategoryRegistry);
// register built-in plugins
new RocksDBPlugin().register(besuPluginContext);
new InMemoryStoragePlugin().register(besuPluginContext);
besuPluginContext.registerPlugins(pluginsDir());
metricCategoryRegistry
.getMetricCategories()
.forEach(metricCategoryConverter::addRegistryCategory);
// register default security module
securityModuleService.register(
DEFAULT_SECURITY_MODULE, Suppliers.memoize(this::defaultSecurityModule));
}
private SecurityModule defaultSecurityModule() {
return new KeyPairSecurityModule(loadKeyPair());
}
@VisibleForTesting
KeyPair loadKeyPair() {
return KeyPairUtil.loadKeyPair(nodePrivateKeyFile());
}
private void parse(
final AbstractParseResultHandler<List<Object>> resultHandler,
final BesuExceptionHandler exceptionHandler,
final String... args) {
// Create a handler that will search for a config file option and use it for
// default values
// and eventually it will run regular parsing of the remaining options.
final ConfigOptionSearchAndRunHandler configParsingHandler =
new ConfigOptionSearchAndRunHandler(
resultHandler, exceptionHandler, CONFIG_FILE_OPTION_NAME, environment);
ParseArgsHelper.getLauncherOptions(unstableLauncherOptions, args);
if (unstableLauncherOptions.isLauncherMode()
|| unstableLauncherOptions.isLauncherModeForced()) {
try {
final ImmutableLauncherConfig launcherConfig =
ImmutableLauncherConfig.builder()
.launcherScript(BesuCommand.class.getResourceAsStream("launcher.json"))
.addCommandClasses(
this, unstableNatOptions, unstableEthstatsOptions, unstableMiningOptions)
.isLauncherForced(unstableLauncherOptions.isLauncherModeForced())
.build();
final File file = new LauncherManager(launcherConfig).run();
logger.info("Config file location : {}", file.getAbsolutePath());
commandLine.parseWithHandlers(
configParsingHandler,
exceptionHandler,
String.format("%s=%s", CONFIG_FILE_OPTION_NAME, file.getAbsolutePath()));
} catch (LauncherException e) {
logger.warn("Unable to run the launcher {}", e.getMessage());
}
} else {
commandLine.parseWithHandlers(configParsingHandler, exceptionHandler, args);
}
}
private void startSynchronization() {
synchronize(
besuController,
p2pEnabled,
peerDiscoveryEnabled,
ethNetworkConfig,
maxPeers,
p2pHost,
p2pInterface,
p2pPort,
graphQLConfiguration,
jsonRpcConfiguration,
webSocketConfiguration,
apiConfiguration,
metricsConfiguration,
permissioningConfiguration,
staticNodes,
pidPath);
}
private BesuCommand startPlugins() {
besuPluginContext.addService(
BesuEvents.class,
new BesuEventsImpl(
besuController.getProtocolContext().getBlockchain(),
besuController.getProtocolManager().getBlockBroadcaster(),
besuController.getTransactionPool(),
besuController.getSyncState()));
besuPluginContext.addService(MetricsSystem.class, getMetricsSystem());
besuController.getAdditionalPluginServices().appendPluginServices(besuPluginContext);
besuPluginContext.startPlugins();
return this;
}
public void configureLogging(final boolean announce) {
// To change the configuration if color was enabled/disabled
Configurator.reconfigure();
// set log level per CLI flags
if (logLevel != null) {
if (announce) {
System.out.println("Setting logging level to " + logLevel.name());
}
Configurator.setAllLevels("", logLevel);
}
}
public static Optional<Boolean> getColorEnabled() {
return Optional.ofNullable(colorEnabled);
}
private void configureNativeLibs() {
if (unstableNativeLibraryOptions.getNativeAltbn128()) {
AbstractAltBnPrecompiledContract.enableNative();
}
if (unstableNativeLibraryOptions.getNativeSecp256k1()) {
SignatureAlgorithmFactory.getInstance().enableNative();
}
}
private BesuCommand validateOptions() {
issueOptionWarnings();
validateP2PInterface(p2pInterface);
validateMiningParams();
validateNatParams();
validateNetStatsParams();
validateDnsOptionsParams();
return this;
}
@SuppressWarnings("ConstantConditions")
private void validateMiningParams() {
if (isMiningEnabled && coinbase == null) {
throw new ParameterException(
this.commandLine,
"Unable to mine without a valid coinbase. Either disable mining (remove --miner-enabled) "
+ "or specify the beneficiary of mining (via --miner-coinbase <Address>)");
}
if (!isMiningEnabled && iStratumMiningEnabled) {
throw new ParameterException(
this.commandLine,
"Unable to mine with Stratum if mining is disabled. Either disable Stratum mining (remove --miner-stratum-enabled) "
+ "or specify mining is enabled (--miner-enabled)");
}
}
protected void validateP2PInterface(final String p2pInterface) {
final String failMessage = "The provided --p2p-interface is not available: " + p2pInterface;
try {
if (!NetworkUtility.isNetworkInterfaceAvailable(p2pInterface)) {
throw new ParameterException(commandLine, failMessage);
}
} catch (final UnknownHostException | SocketException e) {
throw new ParameterException(commandLine, failMessage, e);
}
}
@SuppressWarnings("ConstantConditions")
private void validateNatParams() {
if (!(natMethod.equals(NatMethod.AUTO) || natMethod.equals(NatMethod.KUBERNETES))
&& !unstableNatOptions
.getNatManagerServiceName()
.equals(DEFAULT_BESU_SERVICE_NAME_FILTER)) {
throw new ParameterException(
this.commandLine,
"The `--Xnat-kube-service-name` parameter is only used in kubernetes mode. Either remove --Xnat-kube-service-name"
+ " or select the KUBERNETES mode (via --nat--method=KUBERNETES)");
}
if (natMethod.equals(NatMethod.AUTO) && !unstableNatOptions.getNatMethodFallbackEnabled()) {
throw new ParameterException(
this.commandLine,
"The `--Xnat-method-fallback-enabled` parameter cannot be used in AUTO mode. Either remove --Xnat-method-fallback-enabled"
+ " or select another mode (via --nat--method=XXXX)");
}
}
private void validateNetStatsParams() {
if (Strings.isNullOrEmpty(unstableEthstatsOptions.getEthstatsUrl())
&& !unstableEthstatsOptions.getEthstatsContact().isEmpty()) {
throw new ParameterException(
this.commandLine,
"The `--Xethstats-contact` requires ethstats server URL to be provided. Either remove --Xethstats-contact"
+ " or provide an url (via --Xethstats=nodename:secret@host:port)");
}
}
private void validateDnsOptionsParams() {
if (!unstableDnsOptions.getDnsEnabled() && unstableDnsOptions.getDnsUpdateEnabled()) {
throw new ParameterException(
this.commandLine,
"The `--Xdns-update-enabled` requires dns to be enabled. Either remove --Xdns-update-enabled"
+ " or specify dns is enabled (--Xdns-enabled)");
}
}
private GenesisConfigOptions readGenesisConfigOptions() {
final GenesisConfigOptions genesisConfigOptions;
try {
final GenesisConfigFile genesisConfigFile = GenesisConfigFile.fromConfig(genesisConfig());
genesisConfigOptions = genesisConfigFile.getConfigOptions(genesisConfigOverrides);
} catch (final Exception e) {
throw new IllegalStateException("Unable to read genesis file for GoQuorum options", e);
}
return genesisConfigOptions;
}
private void issueOptionWarnings() {
// Check that P2P options are able to work
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--p2p-enabled",
!p2pEnabled,
asList(
"--bootnodes",
"--discovery-enabled",
"--max-peers",
"--banned-node-id",
"--banned-node-ids",
"--p2p-host",
"--p2p-interface",
"--p2p-port",
"--remote-connections-max-percentage"));
// Check that mining options are able to work
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--miner-enabled",
!isMiningEnabled,
asList(
"--miner-coinbase",
"--min-gas-price",
"--min-block-occupancy-ratio",
"--miner-extra-data",
"--miner-stratum-enabled",
"--Xminer-remote-sealers-limit",
"--Xminer-remote-sealers-hashrate-ttl"));
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--sync-mode",
!SyncMode.FAST.equals(syncMode),
singletonList("--fast-sync-min-peers"));
if (!securityModuleName.equals(DEFAULT_SECURITY_MODULE) && nodePrivateKeyFile != null) {
logger.warn(
DEPENDENCY_WARNING_MSG,
"--node-private-key-file",
"--security-module=" + DEFAULT_SECURITY_MODULE);
}
}
private BesuCommand configure() throws Exception {
checkPortClash();
syncMode =
Optional.ofNullable(syncMode)
.orElse(
genesisFile == null && !isPrivacyEnabled && network != NetworkName.DEV
? SyncMode.FAST
: SyncMode.FULL);
ethNetworkConfig = updateNetworkConfig(getNetwork());
if (isGoQuorumCompatibilityMode) {
checkGoQuorumCompatibilityConfig(ethNetworkConfig);
}
jsonRpcConfiguration = jsonRpcConfiguration();
graphQLConfiguration = graphQLConfiguration();
webSocketConfiguration = webSocketConfiguration();
apiConfiguration = apiConfiguration();
// hostsWhitelist is a hidden option. If it is specified, add the list to hostAllowlist
if (!hostsWhitelist.isEmpty()) {
// if allowlist == default values, remove the default values
if (hostsAllowlist.size() == 2
&& hostsAllowlist.containsAll(List.of("localhost", "127.0.0.1"))) {
hostsAllowlist.removeAll(List.of("localhost", "127.0.0.1"));
}
hostsAllowlist.addAll(hostsWhitelist);
}
permissioningConfiguration = permissioningConfiguration();
staticNodes = loadStaticNodes();
logger.info("Connecting to {} static nodes.", staticNodes.size());
logger.trace("Static Nodes = {}", staticNodes);
final List<EnodeURL> enodeURIs = ethNetworkConfig.getBootNodes();
permissioningConfiguration
.flatMap(PermissioningConfiguration::getLocalConfig)
.ifPresent(p -> ensureAllNodesAreInAllowlist(enodeURIs, p));
permissioningConfiguration
.flatMap(PermissioningConfiguration::getLocalConfig)
.ifPresent(p -> ensureAllNodesAreInAllowlist(staticNodes, p));
metricsConfiguration = metricsConfiguration();
logger.info("Security Module: {}", securityModuleName);
return this;
}
private GoQuorumPrivacyParameters configureGoQuorumPrivacy(
final KeyValueStorageProvider storageProvider) {
return new GoQuorumPrivacyParameters(
createGoQuorumEnclave(),
readEnclaveKey(),
storageProvider.createGoQuorumPrivateStorage(),
createPrivateWorldStateArchive(storageProvider));
}
private GoQuorumEnclave createGoQuorumEnclave() {
final EnclaveFactory enclaveFactory = new EnclaveFactory(Vertx.vertx());
if (privacyKeyStoreFile != null) {
return enclaveFactory.createGoQuorumEnclave(
privacyUrl, privacyKeyStoreFile, privacyKeyStorePasswordFile, privacyTlsKnownEnclaveFile);
} else {
return enclaveFactory.createGoQuorumEnclave(privacyUrl);
}
}
private String readEnclaveKey() {
final String key;
try {
key = Files.asCharSource(privacyPublicKeyFile, UTF_8).read();
} catch (final Exception e) {
throw new ParameterException(
this.commandLine,
"--privacy-public-key-file must be set when --goquorum-compatibility-enabled is set to true.",
e);
}
if (key.length() != 44) {
throw new IllegalArgumentException(
"Contents of enclave public key file needs to be 44 characters long to decode to a valid 32 byte public key.");
}
// throws exception if invalid base 64
Base64.getDecoder().decode(key);
return key;
}
private NetworkName getNetwork() {
// noinspection ConstantConditions network is not always null but injected by
// PicoCLI if used
return network == null ? MAINNET : network;
}
private void ensureAllNodesAreInAllowlist(
final Collection<EnodeURL> enodeAddresses,
final LocalPermissioningConfiguration permissioningConfiguration) {
try {
PermissioningConfigurationValidator.areAllNodesAreInAllowlist(
enodeAddresses, permissioningConfiguration);
} catch (final Exception e) {
throw new ParameterException(this.commandLine, e.getMessage());
}
}
private BesuCommand controller() {
besuController = buildController();
return this;
}
public BesuController buildController() {
try {
return getControllerBuilder().build();
} catch (final Exception e) {
throw new ExecutionException(this.commandLine, e.getMessage(), e);
}
}
public BesuControllerBuilder getControllerBuilder() {
final KeyValueStorageProvider storageProvider = keyValueStorageProvider(keyValueStorageName);
return controllerBuilderFactory
.fromEthNetworkConfig(updateNetworkConfig(getNetwork()), genesisConfigOverrides)
.synchronizerConfiguration(buildSyncConfig())
.ethProtocolConfiguration(unstableEthProtocolOptions.toDomainObject())
.dataDirectory(dataDir())
.miningParameters(
new MiningParameters(
coinbase,
minTransactionGasPrice,
extraData,
isMiningEnabled,
iStratumMiningEnabled,
stratumNetworkInterface,
stratumPort,
unstableMiningOptions.getStratumExtranonce(),
Optional.empty(),
minBlockOccupancyRatio,
unstableMiningOptions.getRemoteSealersLimit(),
unstableMiningOptions.getRemoteSealersTimeToLive()))
.transactionPoolConfiguration(buildTransactionPoolConfiguration())
.nodeKey(buildNodeKey())
.metricsSystem(metricsSystem.get())
.privacyParameters(privacyParameters(storageProvider))
.clock(Clock.systemUTC())
.isRevertReasonEnabled(isRevertReasonEnabled)
.storageProvider(storageProvider)
.isPruningEnabled(isPruningEnabled())
.pruningConfiguration(
new PrunerConfiguration(pruningBlockConfirmations, pruningBlocksRetained))
.genesisConfigOverrides(genesisConfigOverrides)
.gasLimitCalculator(
Optional.ofNullable(targetGasLimit)
.<GasLimitCalculator>map(TargetingGasLimitCalculator::new)
.orElse(GasLimitCalculator.constant()))
.requiredBlocks(requiredBlocks)
.reorgLoggingThreshold(reorgLoggingThreshold)
.dataStorageConfiguration(unstableDataStorageOptions.toDomainObject());
}
private GraphQLConfiguration graphQLConfiguration() {
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--graphql-http-enabled",
!isGraphQLHttpEnabled,
asList("--graphql-http-cors-origins", "--graphql-http-host", "--graphql-http-port"));
final GraphQLConfiguration graphQLConfiguration = GraphQLConfiguration.createDefault();
graphQLConfiguration.setEnabled(isGraphQLHttpEnabled);
graphQLConfiguration.setHost(graphQLHttpHost);
graphQLConfiguration.setPort(graphQLHttpPort);
graphQLConfiguration.setHostsAllowlist(hostsAllowlist);
graphQLConfiguration.setCorsAllowedDomains(graphQLHttpCorsAllowedOrigins);
graphQLConfiguration.setHttpTimeoutSec(unstableRPCOptions.getHttpTimeoutSec());
return graphQLConfiguration;
}
private JsonRpcConfiguration jsonRpcConfiguration() {
checkRpcTlsClientAuthOptionsDependencies();
checkRpcTlsOptionsDependencies();
checkRpcHttpOptionsDependencies();
if (isRpcHttpAuthenticationEnabled
&& rpcHttpAuthenticationCredentialsFile() == null
&& rpcHttpAuthenticationPublicKeyFile == null) {
throw new ParameterException(
commandLine,
"Unable to authenticate JSON-RPC HTTP endpoint without a supplied credentials file or authentication public key file");
}
final JsonRpcConfiguration jsonRpcConfiguration = JsonRpcConfiguration.createDefault();
jsonRpcConfiguration.setEnabled(isRpcHttpEnabled);
jsonRpcConfiguration.setHost(rpcHttpHost);
jsonRpcConfiguration.setPort(rpcHttpPort);
jsonRpcConfiguration.setMaxActiveConnections(rpcHttpMaxConnections);
jsonRpcConfiguration.setCorsAllowedDomains(rpcHttpCorsAllowedOrigins);
jsonRpcConfiguration.setRpcApis(rpcHttpApis.stream().distinct().collect(Collectors.toList()));
jsonRpcConfiguration.setHostsAllowlist(hostsAllowlist);
jsonRpcConfiguration.setAuthenticationEnabled(isRpcHttpAuthenticationEnabled);
jsonRpcConfiguration.setAuthenticationCredentialsFile(rpcHttpAuthenticationCredentialsFile());
jsonRpcConfiguration.setAuthenticationPublicKeyFile(rpcHttpAuthenticationPublicKeyFile);
jsonRpcConfiguration.setTlsConfiguration(rpcHttpTlsConfiguration());
jsonRpcConfiguration.setHttpTimeoutSec(unstableRPCOptions.getHttpTimeoutSec());
return jsonRpcConfiguration;
}
private void checkRpcHttpOptionsDependencies() {
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--rpc-http-enabled",
!isRpcHttpEnabled,
asList(
"--rpc-http-api",
"--rpc-http-apis",
"--rpc-http-cors-origins",
"--rpc-http-host",
"--rpc-http-port",
"--rpc-http-max-active-connections",
"--rpc-http-authentication-enabled",
"--rpc-http-authentication-credentials-file",
"--rpc-http-authentication-public-key-file",
"--rpc-http-tls-enabled",
"--rpc-http-tls-keystore-file",
"--rpc-http-tls-keystore-password-file",
"--rpc-http-tls-client-auth-enabled",
"--rpc-http-tls-known-clients-file",
"--rpc-http-tls-ca-clients-enabled"));
}
private void checkRpcTlsOptionsDependencies() {
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--rpc-http-tls-enabled",
!isRpcHttpTlsEnabled,
asList(
"--rpc-http-tls-keystore-file",
"--rpc-http-tls-keystore-password-file",
"--rpc-http-tls-client-auth-enabled",
"--rpc-http-tls-known-clients-file",
"--rpc-http-tls-ca-clients-enabled"));
}
private void checkRpcTlsClientAuthOptionsDependencies() {
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--rpc-http-tls-client-auth-enabled",
!isRpcHttpTlsClientAuthEnabled,
asList("--rpc-http-tls-known-clients-file", "--rpc-http-tls-ca-clients-enabled"));
}
private void checkPrivacyTlsOptionsDependencies() {
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--privacy-tls-enabled",
!isPrivacyTlsEnabled,
asList(
"--privacy-tls-keystore-file",
"--privacy-tls-keystore-password-file",
"--privacy-tls-known-enclave-file"));
}
private Optional<TlsConfiguration> rpcHttpTlsConfiguration() {
if (!isRpcTlsConfigurationRequired()) {
return Optional.empty();
}
if (rpcHttpTlsKeyStoreFile == null) {
throw new ParameterException(
commandLine, "Keystore file is required when TLS is enabled for JSON-RPC HTTP endpoint");
}
if (rpcHttpTlsKeyStorePasswordFile == null) {
throw new ParameterException(
commandLine,
"File containing password to unlock keystore is required when TLS is enabled for JSON-RPC HTTP endpoint");
}
if (isRpcHttpTlsClientAuthEnabled
&& !isRpcHttpTlsCAClientsEnabled
&& rpcHttpTlsKnownClientsFile == null) {
throw new ParameterException(
commandLine,
"Known-clients file must be specified or CA clients must be enabled when TLS client authentication is enabled for JSON-RPC HTTP endpoint");
}
return Optional.of(
TlsConfiguration.Builder.aTlsConfiguration()
.withKeyStorePath(rpcHttpTlsKeyStoreFile)
.withKeyStorePasswordSupplier(
new FileBasedPasswordProvider(rpcHttpTlsKeyStorePasswordFile))
.withClientAuthConfiguration(rpcHttpTlsClientAuthConfiguration())
.build());
}
private TlsClientAuthConfiguration rpcHttpTlsClientAuthConfiguration() {
if (isRpcHttpTlsClientAuthEnabled) {
return TlsClientAuthConfiguration.Builder.aTlsClientAuthConfiguration()
.withKnownClientsFile(rpcHttpTlsKnownClientsFile)
.withCaClientsEnabled(isRpcHttpTlsCAClientsEnabled)
.build();
}
return null;
}
private boolean isRpcTlsConfigurationRequired() {
return isRpcHttpEnabled && isRpcHttpTlsEnabled;
}
private WebSocketConfiguration webSocketConfiguration() {
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--rpc-ws-enabled",
!isRpcWsEnabled,
asList(
"--rpc-ws-api",
"--rpc-ws-apis",
"--rpc-ws-host",
"--rpc-ws-port",
"--rpc-ws-max-active-connections",
"--rpc-ws-authentication-enabled",
"--rpc-ws-authentication-credentials-file",
"--rpc-ws-authentication-public-key-file"));
if (isRpcWsAuthenticationEnabled
&& rpcWsAuthenticationCredentialsFile() == null
&& rpcWsAuthenticationPublicKeyFile == null) {
throw new ParameterException(
commandLine,
"Unable to authenticate JSON-RPC WebSocket endpoint without a supplied credentials file or authentication public key file");
}
final WebSocketConfiguration webSocketConfiguration = WebSocketConfiguration.createDefault();
webSocketConfiguration.setEnabled(isRpcWsEnabled);
webSocketConfiguration.setHost(rpcWsHost);
webSocketConfiguration.setPort(rpcWsPort);
webSocketConfiguration.setMaxActiveConnections(rpcWsMaxConnections);
webSocketConfiguration.setRpcApis(rpcWsApis);
webSocketConfiguration.setAuthenticationEnabled(isRpcWsAuthenticationEnabled);
webSocketConfiguration.setAuthenticationCredentialsFile(rpcWsAuthenticationCredentialsFile());
webSocketConfiguration.setHostsAllowlist(hostsAllowlist);
webSocketConfiguration.setAuthenticationPublicKeyFile(rpcWsAuthenticationPublicKeyFile);
webSocketConfiguration.setTimeoutSec(unstableRPCOptions.getWsTimeoutSec());
return webSocketConfiguration;
}
private ApiConfiguration apiConfiguration() {
return ImmutableApiConfiguration.builder()
.gasPriceBlocks(apiGasPriceBlocks)
.gasPricePercentile(apiGasPricePercentile)
.gasPriceMin(minTransactionGasPrice.toLong())
.gasPriceMax(apiGasPriceMax)
.build();
}
public MetricsConfiguration metricsConfiguration() {
if (isMetricsEnabled && isMetricsPushEnabled) {
throw new ParameterException(
this.commandLine,
"--metrics-enabled option and --metrics-push-enabled option can't be used at the same "
+ "time. Please refer to CLI reference for more details about this constraint.");
}
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--metrics-enabled",
!isMetricsEnabled,
asList("--metrics-host", "--metrics-port"));
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--metrics-push-enabled",
!isMetricsPushEnabled,
asList(
"--metrics-push-host",
"--metrics-push-port",
"--metrics-push-interval",
"--metrics-push-prometheus-job"));
return unstableMetricsCLIOptions
.toDomainObject()
.enabled(isMetricsEnabled)
.host(metricsHost)
.port(metricsPort)
.protocol(metricsProtocol)
.metricCategories(metricCategories)
.pushEnabled(isMetricsPushEnabled)
.pushHost(metricsPushHost)
.pushPort(metricsPushPort)
.pushInterval(metricsPushInterval)
.hostsAllowlist(hostsAllowlist)
.prometheusJob(metricsPrometheusJob)
.build();
}
private Optional<PermissioningConfiguration> permissioningConfiguration() throws Exception {
if (!(localPermissionsEnabled() || contractPermissionsEnabled())) {
if (rpcHttpApis.contains(RpcApis.PERM) || rpcWsApis.contains(RpcApis.PERM)) {
logger.warn(
"Permissions are disabled. Cannot enable PERM APIs when not using Permissions.");
}
return Optional.empty();
}
final Optional<LocalPermissioningConfiguration> localPermissioningConfigurationOptional;
if (localPermissionsEnabled()) {
final Optional<String> nodePermissioningConfigFile =
Optional.ofNullable(nodePermissionsConfigFile);
final Optional<String> accountPermissioningConfigFile =
Optional.ofNullable(accountPermissionsConfigFile);
final LocalPermissioningConfiguration localPermissioningConfiguration =
PermissioningConfigurationBuilder.permissioningConfiguration(
permissionsNodesEnabled,
getEnodeDnsConfiguration(),
nodePermissioningConfigFile.orElse(getDefaultPermissioningFilePath()),
permissionsAccountsEnabled,
accountPermissioningConfigFile.orElse(getDefaultPermissioningFilePath()));
localPermissioningConfigurationOptional = Optional.of(localPermissioningConfiguration);
} else {
if (nodePermissionsConfigFile != null && !permissionsNodesEnabled) {
logger.warn(
"Node permissioning config file set {} but no permissions enabled",
nodePermissionsConfigFile);
}
if (accountPermissionsConfigFile != null && !permissionsAccountsEnabled) {
logger.warn(
"Account permissioning config file set {} but no permissions enabled",
accountPermissionsConfigFile);
}
localPermissioningConfigurationOptional = Optional.empty();
}
final SmartContractPermissioningConfiguration smartContractPermissioningConfiguration =
SmartContractPermissioningConfiguration.createDefault();
if (permissionsNodesContractEnabled) {
if (permissionsNodesContractAddress == null) {
throw new ParameterException(
this.commandLine,
"No node permissioning contract address specified. Cannot enable smart contract based node permissioning.");
} else {
smartContractPermissioningConfiguration.setSmartContractNodeAllowlistEnabled(
permissionsNodesContractEnabled);
smartContractPermissioningConfiguration.setNodeSmartContractAddress(
permissionsNodesContractAddress);
smartContractPermissioningConfiguration.setNodeSmartContractInterfaceVersion(
permissionsNodesContractVersion);
}
} else if (permissionsNodesContractAddress != null) {
logger.warn(
"Node permissioning smart contract address set {} but smart contract node permissioning is disabled.",
permissionsNodesContractAddress);
}
if (permissionsAccountsContractEnabled) {
if (permissionsAccountsContractAddress == null) {
throw new ParameterException(
this.commandLine,
"No account permissioning contract address specified. Cannot enable smart contract based account permissioning.");
} else {
smartContractPermissioningConfiguration.setSmartContractAccountAllowlistEnabled(
permissionsAccountsContractEnabled);
smartContractPermissioningConfiguration.setAccountSmartContractAddress(
permissionsAccountsContractAddress);
}
} else if (permissionsAccountsContractAddress != null) {
logger.warn(
"Account permissioning smart contract address set {} but smart contract account permissioning is disabled.",
permissionsAccountsContractAddress);
}
final PermissioningConfiguration permissioningConfiguration =
new PermissioningConfiguration(
localPermissioningConfigurationOptional,
Optional.of(smartContractPermissioningConfiguration),
quorumPermissioningConfig());
return Optional.of(permissioningConfiguration);
}
private Optional<GoQuorumPermissioningConfiguration> quorumPermissioningConfig() {
if (!isGoQuorumCompatibilityMode) {
return Optional.empty();
}
try {
final GenesisConfigOptions genesisConfigOptions = readGenesisConfigOptions();
final OptionalLong qip714BlockNumber = genesisConfigOptions.getQip714BlockNumber();
return Optional.of(
GoQuorumPermissioningConfiguration.enabled(
qip714BlockNumber.orElse(QIP714_DEFAULT_BLOCK)));
} catch (final Exception e) {
throw new IllegalStateException("Error reading GoQuorum permissioning options", e);
}
}
private boolean localPermissionsEnabled() {
return permissionsAccountsEnabled || permissionsNodesEnabled;
}
private boolean contractPermissionsEnabled() {
return permissionsNodesContractEnabled || permissionsAccountsContractEnabled;
}
private PrivacyParameters privacyParameters(final KeyValueStorageProvider storageProvider) {
CommandLineUtils.checkOptionDependencies(
logger,
commandLine,
"--privacy-enabled",
!isPrivacyEnabled,
asList(
"--privacy-url",
"--privacy-public-key-file",
"--privacy-multi-tenancy-enabled",
"--privacy-tls-enabled"));
checkPrivacyTlsOptionsDependencies();
final PrivacyParameters.Builder privacyParametersBuilder = new PrivacyParameters.Builder();
if (isPrivacyEnabled) {
final String errorSuffix = "cannot be enabled with privacy.";
if (syncMode == SyncMode.FAST) {
throw new ParameterException(commandLine, String.format("%s %s", "Fast sync", errorSuffix));
}
if (isPruningEnabled()) {
throw new ParameterException(commandLine, String.format("%s %s", "Pruning", errorSuffix));
}
if (isGoQuorumCompatibilityMode) {
throw new ParameterException(
commandLine, String.format("%s %s", "GoQuorum mode", errorSuffix));
}
if (isPrivacyMultiTenancyEnabled
&& !jsonRpcConfiguration.isAuthenticationEnabled()
&& !webSocketConfiguration.isAuthenticationEnabled()) {
throw new ParameterException(
commandLine,
"Privacy multi-tenancy requires either http authentication to be enabled or WebSocket authentication to be enabled");
}
privacyParametersBuilder.setEnabled(true);
privacyParametersBuilder.setEnclaveUrl(privacyUrl);
privacyParametersBuilder.setMultiTenancyEnabled(isPrivacyMultiTenancyEnabled);
privacyParametersBuilder.setOnchainPrivacyGroupsEnabled(isFlexiblePrivacyGroupsEnabled);
final boolean hasPrivacyPublicKey = privacyPublicKeyFile != null;
if (hasPrivacyPublicKey && !isPrivacyMultiTenancyEnabled) {
try {
privacyParametersBuilder.setEnclavePublicKeyUsingFile(privacyPublicKeyFile);
} catch (final IOException e) {
throw new ParameterException(
commandLine, "Problem with privacy-public-key-file: " + e.getMessage(), e);
} catch (final IllegalArgumentException e) {
throw new ParameterException(
commandLine, "Contents of privacy-public-key-file invalid: " + e.getMessage(), e);
}
} else if (hasPrivacyPublicKey) {
throw new ParameterException(
commandLine, "Privacy multi-tenancy and privacy public key cannot be used together");
} else if (!isPrivacyMultiTenancyEnabled) {
throw new ParameterException(
commandLine, "Please specify Enclave public key file path to enable privacy");
}
if (Wei.ZERO.compareTo(minTransactionGasPrice) < 0) {
// if gas is required, cannot use random keys to sign private tx
// ie --privacy-marker-transaction-signing-key-file must be set
if (privacyMarkerTransactionSigningKeyPath == null) {
throw new ParameterException(
commandLine,
"Not a free gas network. --privacy-marker-transaction-signing-key-file must be specified and must be a funded account. Private transactions cannot be signed by random (non-funded) accounts in paid gas networks");
}
}
if (!Address.PRIVACY.equals(privacyPrecompiledAddress)) {
logger.warn(
"--privacy-precompiled-address option is deprecated. This address is derived, based on --privacy-onchain-groups-enabled.");
}
privacyParametersBuilder.setPrivateKeyPath(privacyMarkerTransactionSigningKeyPath);
privacyParametersBuilder.setStorageProvider(
privacyKeyStorageProvider(keyValueStorageName + "-privacy"));
if (isPrivacyTlsEnabled) {
privacyParametersBuilder.setPrivacyKeyStoreFile(privacyKeyStoreFile);
privacyParametersBuilder.setPrivacyKeyStorePasswordFile(privacyKeyStorePasswordFile);
privacyParametersBuilder.setPrivacyTlsKnownEnclaveFile(privacyTlsKnownEnclaveFile);
}
privacyParametersBuilder.setEnclaveFactory(new EnclaveFactory(vertx));
} else if (isGoQuorumCompatibilityMode) {
privacyParametersBuilder.setGoQuorumPrivacyParameters(
Optional.of(configureGoQuorumPrivacy(storageProvider)));
}
if (!isPrivacyEnabled && anyPrivacyApiEnabled()) {
logger.warn("Privacy is disabled. Cannot use EEA/PRIV API methods when not using Privacy.");
}
if (!isGoQuorumCompatibilityMode
&& (rpcHttpApis.contains(RpcApis.GOQUORUM) || rpcWsApis.contains(RpcApis.GOQUORUM))) {
logger.warn("Cannot use GOQUORUM API methods when not in GoQuorum mode.");
}
final PrivacyParameters privacyParameters = privacyParametersBuilder.build();
if (isPrivacyEnabled) {
preSynchronizationTaskRunner.addTask(
new PrivateDatabaseMigrationPreSyncTask(privacyParameters, migratePrivateDatabase));
}
return privacyParameters;
}
public WorldStateArchive createPrivateWorldStateArchive(final StorageProvider storageProvider) {
final WorldStateStorage privateWorldStateStorage =
storageProvider.createPrivateWorldStateStorage();
final WorldStatePreimageStorage preimageStorage =
storageProvider.createPrivateWorldStatePreimageStorage();
return new DefaultWorldStateArchive(privateWorldStateStorage, preimageStorage);
}
private boolean anyPrivacyApiEnabled() {
return rpcHttpApis.contains(RpcApis.EEA)
|| rpcWsApis.contains(RpcApis.EEA)
|| rpcHttpApis.contains(RpcApis.PRIV)
|| rpcWsApis.contains(RpcApis.PRIV);
}
private PrivacyKeyValueStorageProvider privacyKeyStorageProvider(final String name) {
return new PrivacyKeyValueStorageProviderBuilder()
.withStorageFactory(privacyKeyValueStorageFactory(name))
.withCommonConfiguration(pluginCommonConfiguration)
.withMetricsSystem(getMetricsSystem())
.build();
}
private PrivacyKeyValueStorageFactory privacyKeyValueStorageFactory(final String name) {
return (PrivacyKeyValueStorageFactory)
storageService
.getByName(name)
.orElseThrow(
() -> new StorageException("No KeyValueStorageFactory found for key: " + name));
}
private KeyValueStorageProvider keyValueStorageProvider(final String name) {
if (this.keyValueStorageProvider == null) {
this.keyValueStorageProvider =
new KeyValueStorageProviderBuilder()
.withStorageFactory(
storageService
.getByName(name)
.orElseThrow(
() ->
new StorageException(
"No KeyValueStorageFactory found for key: " + name)))
.withCommonConfiguration(pluginCommonConfiguration)
.withMetricsSystem(getMetricsSystem())
.build();
}
return this.keyValueStorageProvider;
}
private SynchronizerConfiguration buildSyncConfig() {
return unstableSynchronizerOptions
.toDomainObject()
.syncMode(syncMode)
.fastSyncMinimumPeerCount(fastSyncMinPeerCount)
.build();
}
private TransactionPoolConfiguration buildTransactionPoolConfiguration() {
return unstableTransactionPoolOptions
.toDomainObject()
.txPoolMaxSize(txPoolMaxSize)
.pooledTransactionHashesSize(pooledTransactionHashesSize)
.pendingTxRetentionPeriod(pendingTxRetentionPeriod)
.priceBump(Percentage.fromInt(priceBump))
.txFeeCap(txFeeCap)
.build();
}
private boolean isPruningEnabled() {
return pruningEnabled;
}
// Blockchain synchronisation from peers.
private void synchronize(
final BesuController controller,
final boolean p2pEnabled,
final boolean peerDiscoveryEnabled,
final EthNetworkConfig ethNetworkConfig,
final int maxPeers,
final String p2pAdvertisedHost,
final String p2pListenInterface,
final int p2pListenPort,
final GraphQLConfiguration graphQLConfiguration,
final JsonRpcConfiguration jsonRpcConfiguration,
final WebSocketConfiguration webSocketConfiguration,
final ApiConfiguration apiConfiguration,
final MetricsConfiguration metricsConfiguration,
final Optional<PermissioningConfiguration> permissioningConfiguration,
final Collection<EnodeURL> staticNodes,
final Path pidPath) {
checkNotNull(runnerBuilder);
permissioningConfiguration.ifPresent(runnerBuilder::permissioningConfiguration);
final ObservableMetricsSystem metricsSystem = this.metricsSystem.get();
final Runner runner =
runnerBuilder
.vertx(vertx)
.besuController(controller)
.p2pEnabled(p2pEnabled)
.natMethod(natMethod)
.natManagerServiceName(unstableNatOptions.getNatManagerServiceName())
.natMethodFallbackEnabled(unstableNatOptions.getNatMethodFallbackEnabled())
.discovery(peerDiscoveryEnabled)
.ethNetworkConfig(ethNetworkConfig)
.p2pAdvertisedHost(p2pAdvertisedHost)
.p2pListenInterface(p2pListenInterface)
.p2pListenPort(p2pListenPort)
.maxPeers(maxPeers)
.limitRemoteWireConnectionsEnabled(isLimitRemoteWireConnectionsEnabled)
.fractionRemoteConnectionsAllowed(
Fraction.fromPercentage(maxRemoteConnectionsPercentage).getValue())
.randomPeerPriority(randomPeerPriority)
.networkingConfiguration(unstableNetworkingOptions.toDomainObject())
.graphQLConfiguration(graphQLConfiguration)
.jsonRpcConfiguration(jsonRpcConfiguration)
.webSocketConfiguration(webSocketConfiguration)
.apiConfiguration(apiConfiguration)
.pidPath(pidPath)
.dataDir(dataDir())
.bannedNodeIds(bannedNodeIds)
.metricsSystem(metricsSystem)
.metricsConfiguration(metricsConfiguration)
.staticNodes(staticNodes)
.identityString(identityString)
.besuPluginContext(besuPluginContext)
.autoLogBloomCaching(autoLogBloomCachingEnabled)
.ethstatsUrl(unstableEthstatsOptions.getEthstatsUrl())
.ethstatsContact(unstableEthstatsOptions.getEthstatsContact())
.storageProvider(keyValueStorageProvider(keyValueStorageName))
.forkIdSupplier(() -> besuController.getProtocolManager().getForkIdAsBytesList())
.build();
addShutdownHook(runner);
runner.start();
runner.awaitStop();
}
protected Vertx createVertx(final VertxOptions vertxOptions) {
return Vertx.vertx(vertxOptions);
}
private VertxOptions createVertxOptions(final MetricsSystem metricsSystem) {
return new VertxOptions()
.setMetricsOptions(
new MetricsOptions()
.setEnabled(true)
.setFactory(new VertxMetricsAdapterFactory(metricsSystem)));
}
private void addShutdownHook(final Runner runner) {
Runtime.getRuntime()
.addShutdownHook(
new Thread(
() -> {
try {
besuPluginContext.stopPlugins();
runner.close();
LogManager.shutdown();
} catch (final Exception e) {
logger.error("Failed to stop Besu");
}
}));
}
// Used to discover the default IP of the client.
// Loopback IP is used by default as this is how smokeTests require it to be
// and it's probably a good security behaviour to default only on the localhost.
private InetAddress autoDiscoverDefaultIP() {
if (autoDiscoveredDefaultIP != null) {
return autoDiscoveredDefaultIP;
}
autoDiscoveredDefaultIP = InetAddress.getLoopbackAddress();
return autoDiscoveredDefaultIP;
}
private EthNetworkConfig updateNetworkConfig(final NetworkName network) {
final EthNetworkConfig.Builder builder =
new EthNetworkConfig.Builder(EthNetworkConfig.getNetworkConfig(network));
// custom genesis file use comes with specific default values for the genesis
// file itself
// but also for the network id and the bootnodes list.
if (genesisFile != null) {
// noinspection ConstantConditions network is not always null but injected by
// PicoCLI if used
if (this.network != null) {
// We check if network option was really provided by user and not only looking
// at the
// default value.
// if user provided it and provided the genesis file option at the same time, it
// raises a
// conflict error
throw new ParameterException(
this.commandLine,
"--network option and --genesis-file option can't be used at the same time. Please "
+ "refer to CLI reference for more details about this constraint.");
}
builder.setGenesisConfig(genesisConfig());
if (networkId == null) {
// if no network id option is defined on the CLI we have to set a default value
// from the
// genesis file.
// We do the genesis parsing only in this case as we already have network id
// constants
// for known networks to speed up the process.
// Also we have to parse the genesis as we don't already have a parsed version
// at this
// stage.
// If no chain id is found in the genesis as it's an optional, we use mainnet
// network id.
try {
builder.setNetworkId(
getGenesisConfigFile()
.getConfigOptions(genesisConfigOverrides)
.getChainId()
.orElse(EthNetworkConfig.getNetworkConfig(MAINNET).getNetworkId()));
} catch (final DecodeException e) {
throw new ParameterException(
this.commandLine, String.format("Unable to parse genesis file %s.", genesisFile), e);
} catch (final ArithmeticException e) {
throw new ParameterException(
this.commandLine,
"No networkId specified and chainId in "
+ "genesis file is too large to be used as a networkId");
}
}
if (bootNodes == null) {
// We default to an empty bootnodes list if the option is not provided on CLI
// because
// mainnet bootnodes won't work as the default value for a custom genesis,
// so it's better to have an empty list as default value that forces to create a
// custom one
// than a useless one that may make user think that it can work when it can't.
builder.setBootNodes(new ArrayList<>());
}
builder.setDnsDiscoveryUrl(null);
}
if (networkId != null) {
builder.setNetworkId(networkId);
}
if (bootNodes != null) {
if (!peerDiscoveryEnabled) {
logger.warn("Discovery disabled: bootnodes will be ignored.");
}
try {
final List<EnodeURL> listBootNodes =
bootNodes.stream()
.filter(value -> !value.isEmpty())
.map(url -> EnodeURL.fromString(url, getEnodeDnsConfiguration()))
.collect(Collectors.toList());
DiscoveryConfiguration.assertValidBootnodes(listBootNodes);
builder.setBootNodes(listBootNodes);
} catch (final IllegalArgumentException e) {
throw new ParameterException(commandLine, e.getMessage());
}
}
return builder.build();
}
private GenesisConfigFile getGenesisConfigFile() {
return GenesisConfigFile.fromConfig(genesisConfig());
}
private String genesisConfig() {
try {
return Resources.toString(genesisFile.toURI().toURL(), UTF_8);
} catch (final IOException e) {
throw new ParameterException(
this.commandLine, String.format("Unable to load genesis file %s.", genesisFile), e);
}
}
// dataDir() is public because it is accessed by subcommands
public Path dataDir() {
return dataPath.toAbsolutePath();
}
private Path pluginsDir() {
final String pluginsDir = System.getProperty("besu.plugins.dir");
if (pluginsDir == null) {
return new File(System.getProperty("besu.home", "."), "plugins").toPath();
} else {
return new File(pluginsDir).toPath();
}
}
@VisibleForTesting
NodeKey buildNodeKey() {
return new NodeKey(securityModule());
}
private SecurityModule securityModule() {
return securityModuleService
.getByName(securityModuleName)
.orElseThrow(() -> new RuntimeException("Security Module not found: " + securityModuleName))
.get();
}
private File nodePrivateKeyFile() {
return Optional.ofNullable(nodePrivateKeyFile)
.orElseGet(() -> KeyPairUtil.getDefaultKeyFile(dataDir()));
}
private String rpcHttpAuthenticationCredentialsFile() {
final String filename = rpcHttpAuthenticationCredentialsFile;
if (filename != null) {
RpcAuthFileValidator.validate(commandLine, filename, "HTTP");
}
return filename;
}
private String rpcWsAuthenticationCredentialsFile() {
final String filename = rpcWsAuthenticationCredentialsFile;
if (filename != null) {
RpcAuthFileValidator.validate(commandLine, filename, "WS");
}
return filename;
}
private String getDefaultPermissioningFilePath() {
return dataDir()
+ System.getProperty("file.separator")
+ DefaultCommandValues.PERMISSIONING_CONFIG_LOCATION;
}
public MetricsSystem getMetricsSystem() {
return metricsSystem.get();
}
private Set<EnodeURL> loadStaticNodes() throws IOException {
final Path staticNodesPath;
if (staticNodesFile != null) {
staticNodesPath = staticNodesFile.toAbsolutePath();
if (!staticNodesPath.toFile().exists()) {
throw new ParameterException(
commandLine, String.format("Static nodes file %s does not exist", staticNodesPath));
}
} else {
final String staticNodesFilename = "static-nodes.json";
staticNodesPath = dataDir().resolve(staticNodesFilename);
}
logger.info("Static Nodes file = {}", staticNodesPath);
return StaticNodesParser.fromPath(staticNodesPath, getEnodeDnsConfiguration());
}
public BesuExceptionHandler exceptionHandler() {
return new BesuExceptionHandler(this::getLogLevel);
}
public EnodeDnsConfiguration getEnodeDnsConfiguration() {
if (enodeDnsConfiguration == null) {
enodeDnsConfiguration = unstableDnsOptions.toDomainObject();
}
return enodeDnsConfiguration;
}
private void checkPortClash() {
getEffectivePorts().stream()
.filter(Objects::nonNull)
.filter(port -> port > 0)
.forEach(
port -> {
if (!allocatedPorts.add(port)) {
throw new ParameterException(
commandLine,
"Port number '"
+ port
+ "' has been specified multiple times. Please review the supplied configuration.");
}
});
}
/**
* * Gets the list of effective ports (ports that are enabled).
*
* @return The list of effective ports
*/
private List<Integer> getEffectivePorts() {
final List<Integer> effectivePorts = new ArrayList<>();
addPortIfEnabled(effectivePorts, p2pPort, p2pEnabled);
addPortIfEnabled(effectivePorts, graphQLHttpPort, isGraphQLHttpEnabled);
addPortIfEnabled(effectivePorts, rpcHttpPort, isRpcHttpEnabled);
addPortIfEnabled(effectivePorts, rpcWsPort, isRpcWsEnabled);
addPortIfEnabled(effectivePorts, metricsPort, isMetricsEnabled);
addPortIfEnabled(effectivePorts, metricsPushPort, isMetricsPushEnabled);
addPortIfEnabled(effectivePorts, stratumPort, iStratumMiningEnabled);
return effectivePorts;
}
/**
* Adds port in the passed list only if enabled.
*
* @param ports The list of ports
* @param port The port value
* @param enabled true if enabled, false otherwise
*/
private void addPortIfEnabled(
final List<Integer> ports, final Integer port, final boolean enabled) {
if (enabled) {
ports.add(port);
}
}
private void checkGoQuorumCompatibilityConfig(final EthNetworkConfig ethNetworkConfig) {
if (isGoQuorumCompatibilityMode) {
final GenesisConfigOptions genesisConfigOptions = readGenesisConfigOptions();
// this static flag is read by the RLP decoder
GoQuorumOptions.goQuorumCompatibilityMode = true;
if (!genesisConfigOptions.isQuorum()) {
throw new IllegalStateException(
"GoQuorum compatibility mode (enabled) can only be used if genesis file has 'isQuorum' flag set to true.");
}
genesisConfigOptions
.getChainId()
.ifPresent(
chainId ->
ensureGoQuorumCompatibilityModeNotUsedOnMainnet(
chainId, isGoQuorumCompatibilityMode));
if (genesisFile != null
&& getGenesisConfigFile().getConfigOptions().isQuorum()
&& !minTransactionGasPrice.isZero()) {
throw new ParameterException(
this.commandLine,
"--min-gas-price must be set to zero if GoQuorum compatibility is enabled in the genesis config.");
}
if (ethNetworkConfig.getNetworkId().equals(EthNetworkConfig.MAINNET_NETWORK_ID)) {
throw new ParameterException(
this.commandLine, "GoQuorum compatibility mode (enabled) cannot be used on Mainnet.");
}
}
}
private void ensureGoQuorumCompatibilityModeNotUsedOnMainnet(
final BigInteger chainId, final boolean isGoQuorumCompatibilityMode) {
if (isGoQuorumCompatibilityMode && chainId.equals(EthNetworkConfig.MAINNET_NETWORK_ID)) {
throw new IllegalStateException(
"GoQuorum compatibility mode (enabled) cannot be used on Mainnet.");
}
}
@VisibleForTesting
Level getLogLevel() {
return logLevel;
}
private class BesuCommandConfigurationService implements BesuConfiguration {
@Override
public Path getStoragePath() {
return dataDir().resolve(DATABASE_PATH);
}
@Override
public Path getDataPath() {
return dataDir();
}
@Override
public int getDatabaseVersion() {
return unstableDataStorageOptions
.toDomainObject()
.getDataStorageFormat()
.getDatabaseVersion();
}
}
}
| 1 | 24,732 | This breaks backwards compatibility, so it will have to wait for the next quarterly release of the default is to deny. I would recommend adding the flag with the default to allow and then at the next quarterly release rc cycle flip the flag to deny. | hyperledger-besu | java |
@@ -57,12 +57,12 @@
// league_size) is not limited by physical constraints. Its a pure logical
// number.
-typedef Kokkos::TeamPolicy<> team_policy;
-typedef team_policy::member_type team_member;
+using team_policy = Kokkos::TeamPolicy<>;
+using team_member = team_policy::member_type;
// Define a functor which can be launched using the TeamPolicy
struct hello_world {
- typedef int value_type; // Specify value type for reduction target, sum
+ using value_type = int; // Specify value type for reduction target, sum
// This is a reduction operator which now takes as first argument the
// TeamPolicy member_type. Every member of the team contributes to the | 1 | /*
//@HEADER
// ************************************************************************
//
// Kokkos v. 3.0
// Copyright (2020) National Technology & Engineering
// Solutions of Sandia, LLC (NTESS).
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the Corporation nor the names of the
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY NTESS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NTESS OR THE
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Questions? Contact Christian R. Trott ([email protected])
//
// ************************************************************************
//@HEADER
*/
#include <Kokkos_Core.hpp>
#include <cstdio>
// Using default execution space define a TeamPolicy and its member_type
// The member_type is what the operator of a functor or Lambda gets, for
// a simple RangePolicy the member_type is simply an integer
// For a TeamPolicy its a much richer object, since it provides all information
// to identify a thread uniquely and some team related function calls such as a
// barrier (which will be used in a subsequent example).
// A ThreadTeam consists of 1 to n threads where the maxmimum value of n is
// determined by the hardware. On a dual socket CPU machine with 8 cores per
// socket the maximum size of a team is 8. The number of teams (i.e. the
// league_size) is not limited by physical constraints. Its a pure logical
// number.
typedef Kokkos::TeamPolicy<> team_policy;
typedef team_policy::member_type team_member;
// Define a functor which can be launched using the TeamPolicy
struct hello_world {
typedef int value_type; // Specify value type for reduction target, sum
// This is a reduction operator which now takes as first argument the
// TeamPolicy member_type. Every member of the team contributes to the
// total sum.
// It is helpful to think of this operator as a parallel region for a team
// (i.e. every team member is active and will execute the code).
KOKKOS_INLINE_FUNCTION
void operator()(const team_member& thread, int& sum) const {
sum += 1;
// The TeamPolicy<>::member_type provides functions to query the multi
// dimensional index of a thread as well as the number of thread-teams and
// the size of each team.
printf("Hello World: %i %i // %i %i\n", thread.league_rank(),
thread.team_rank(), thread.league_size(), thread.team_size());
}
};
int main(int narg, char* args[]) {
Kokkos::initialize(narg, args);
// Launch 12 teams of the maximum number of threads per team
const int team_size_max = team_policy(1, 1).team_size_max(
hello_world(), Kokkos::ParallelReduceTag());
const team_policy policy_a(12, team_size_max);
int sum = 0;
Kokkos::parallel_reduce(policy_a, hello_world(), sum);
// The result will be 12*team_size_max
printf("Result A: %i == %i\n", sum, team_size_max * 12);
// In practice it is often better to let Kokkos decide on the team_size
const team_policy policy_b(12, Kokkos::AUTO);
Kokkos::parallel_reduce(policy_b, hello_world(), sum);
// The result will be 12*policy_b.team_size_recommended( hello_world(),
// Kokkos::ParallelReduceTag())
const int team_size_recommended = policy_b.team_size_recommended(
hello_world(), Kokkos::ParallelReduceTag());
printf("Result B: %i %i\n", sum, team_size_recommended * 12);
Kokkos::finalize();
}
| 1 | 24,405 | I'm kind of surprised this doesn't require `typename`? | kokkos-kokkos | cpp |
@@ -38,8 +38,7 @@ GLIB_TESTS
AC_CHECK_HEADER([sys/xattr.h],,[AC_MSG_ERROR([You must have sys/xattr.h from glibc])])
-AC_CHECK_PROGS(YACC, 'bison -y', :)
-AS_IF([test "$YACC" = :], [AC_MSG_ERROR([bison not found but required])])
+AS_IF([test "$YACC" != "bison -y"], [AC_MSG_ERROR([bison not found but required])])
PKG_PROG_PKG_CONFIG
| 1 | AC_PREREQ([2.63])
AC_INIT([ostree], [2016.5], [[email protected]])
AC_CONFIG_HEADER([config.h])
AC_CONFIG_MACRO_DIR([buildutil])
AC_CONFIG_AUX_DIR([build-aux])
AM_INIT_AUTOMAKE([1.13 -Wno-portability foreign no-define tar-ustar no-dist-gzip dist-xz
color-tests subdir-objects])
AM_MAINTAINER_MODE([enable])
AM_SILENT_RULES([yes])
AC_USE_SYSTEM_EXTENSIONS
AC_SYS_LARGEFILE
AC_PROG_CC
AM_PROG_CC_C_O
AC_PROG_YACC
changequote(,)dnl
if test "x$GCC" = "xyes"; then
WARN_CFLAGS="-Wall -Wstrict-prototypes -Werror=missing-prototypes \
-Werror=implicit-function-declaration \
-Werror=pointer-arith -Werror=init-self -Werror=format=2 \
-Werror=format-security \
-Werror=missing-include-dirs -Werror=aggregate-return \
-Werror=declaration-after-statement"
fi
changequote([,])dnl
AC_SUBST(WARN_CFLAGS)
# Initialize libtool
LT_PREREQ([2.2.4])
LT_INIT([disable-static])
OSTREE_FEATURES=""
AC_SUBST([OSTREE_FEATURES])
GLIB_TESTS
AC_CHECK_HEADER([sys/xattr.h],,[AC_MSG_ERROR([You must have sys/xattr.h from glibc])])
AC_CHECK_PROGS(YACC, 'bison -y', :)
AS_IF([test "$YACC" = :], [AC_MSG_ERROR([bison not found but required])])
PKG_PROG_PKG_CONFIG
AM_PATH_GLIB_2_0
dnl When bumping the gio-unix-2.0 dependency (or glib-2.0 in general),
dnl remember to bump GLIB_VERSION_MIN_REQUIRED and
dnl GLIB_VERSION_MAX_ALLOWED in Makefile.am
GIO_DEPENDENCY="gio-unix-2.0 >= 2.40.0 libgsystem >= 2015.1"
PKG_CHECK_MODULES(OT_DEP_GIO_UNIX, $GIO_DEPENDENCY)
dnl 5.1.0 is an arbitrary version here
PKG_CHECK_MODULES(OT_DEP_LZMA, liblzma >= 5.0.5)
dnl Needed for rollsum
PKG_CHECK_MODULES(OT_DEP_ZLIB, zlib)
dnl We're not actually linking to this, just using the header
PKG_CHECK_MODULES(OT_DEP_E2P, e2p)
dnl When bumping the libsoup-2.4 dependency, remember to bump
dnl SOUP_VERSION_MIN_REQUIRED and SOUP_VERSION_MAX_ALLOWED in
dnl Makefile.am
SOUP_DEPENDENCY="libsoup-2.4 >= 2.39.1"
AC_ARG_WITH(soup,
AS_HELP_STRING([--with-soup], [Use libsoup @<:@default=yes@:>@]),
[], [with_soup=check])
AS_IF([test x$with_soup != xno ], [
AC_ARG_ENABLE(libsoup_client_certs,
AS_HELP_STRING([--enable-libsoup-client-certs],
[Require availability of new enough libsoup TLS client cert API (default: auto)]),,
[enable_libsoup_client_certs=auto])
AC_MSG_CHECKING([for $SOUP_DEPENDENCY])
PKG_CHECK_EXISTS($SOUP_DEPENDENCY, have_soup=yes, have_soup=no)
AC_MSG_RESULT([$have_soup])
AS_IF([ test x$have_soup = xno && test x$with_soup != xcheck], [
AC_MSG_ERROR([libsoup is enabled but could not be found])
])
AS_IF([test x$have_soup = xyes], [
PKG_CHECK_MODULES(OT_DEP_SOUP, $SOUP_DEPENDENCY)
AC_DEFINE([HAVE_LIBSOUP], 1, [Define if we have libsoup.pc])
with_soup=yes
save_CFLAGS=$CFLAGS
CFLAGS=$OT_DEP_SOUP_CFLAGS
have_libsoup_client_certs=no
AC_CHECK_DECL([SOUP_SESSION_TLS_INTERACTION], [
AC_DEFINE([HAVE_LIBSOUP_CLIENT_CERTS], 1, [Define if we have libsoup client certs])
have_libsoup_client_certs=yes
], [], [#include <libsoup/soup.h>])
AS_IF([test x$enable_libsoup_client_certs = xyes && test x$have_libsoup_client_certs != xyes], [
AC_MSG_ERROR([libsoup client certs explicitly requested but not found])
])
CFLAGS=$save_CFLAGS
], [
with_soup=no
])
], [ with_soup=no ])
if test x$with_soup != xno; then OSTREE_FEATURES="$OSTREE_FEATURES +libsoup"; fi
AM_CONDITIONAL(USE_LIBSOUP, test x$with_soup != xno)
AM_CONDITIONAL(HAVE_LIBSOUP_CLIENT_CERTS, test x$have_libsoup_client_certs = xyes)
m4_ifdef([GOBJECT_INTROSPECTION_CHECK], [
GOBJECT_INTROSPECTION_CHECK([1.34.0])
])
AM_CONDITIONAL(BUILDOPT_INTROSPECTION, test "x$found_introspection" = xyes)
LIBGPGME_DEPENDENCY="1.1.8"
PKG_CHECK_MODULES(OT_DEP_GPGME, gpgme-pthread >= $LIBGPGME_DEPENDENCY, have_gpgme=yes, [
m4_ifdef([AM_PATH_GPGME_PTHREAD], [
AM_PATH_GPGME_PTHREAD($LIBGPGME_DEPENDENCY, have_gpgme=yes, have_gpgme=no)
],[ have_gpgme=no ])
])
AS_IF([ test x$have_gpgme = xno ], [
AC_MSG_ERROR([Need GPGME_PTHREAD version $LIBGPGME_DEPENDENCY or later])
])
OSTREE_FEATURES="$OSTREE_FEATURES +gpgme"
LIBARCHIVE_DEPENDENCY="libarchive >= 2.8.0"
# What's in RHEL7.2.
FUSE_DEPENDENCY="fuse >= 2.9.2"
# check for gtk-doc
m4_ifdef([GTK_DOC_CHECK], [
GTK_DOC_CHECK([1.15], [--flavour no-tmpl])
],[
enable_gtk_doc=no
AM_CONDITIONAL([ENABLE_GTK_DOC], false)
])
AC_ARG_ENABLE(man,
[AS_HELP_STRING([--enable-man],
[generate man pages [default=auto]])],,
enable_man=maybe)
AS_IF([test "$enable_man" != no], [
AC_PATH_PROG([XSLTPROC], [xsltproc])
AS_IF([test -z "$XSLTPROC"], [
AS_IF([test "$enable_man" = yes], [
AC_MSG_ERROR([xsltproc is required for --enable-man])
])
enable_man=no
])
enable_man=yes
])
AM_CONDITIONAL(ENABLE_MAN, test "$enable_man" != no)
AC_ARG_WITH(libarchive,
AS_HELP_STRING([--without-libarchive], [Do not use libarchive]),
:, with_libarchive=maybe)
AS_IF([ test x$with_libarchive != xno ], [
AC_MSG_CHECKING([for $LIBARCHIVE_DEPENDENCY])
PKG_CHECK_EXISTS($LIBARCHIVE_DEPENDENCY, have_libarchive=yes, have_libarchive=no)
AC_MSG_RESULT([$have_libarchive])
AS_IF([ test x$have_libarchive = xno && test x$with_libarchive != xmaybe ], [
AC_MSG_ERROR([libarchive is enabled but could not be found])
])
AS_IF([ test x$have_libarchive = xyes], [
AC_DEFINE([HAVE_LIBARCHIVE], 1, [Define if we have libarchive.pc])
PKG_CHECK_MODULES(OT_DEP_LIBARCHIVE, $LIBARCHIVE_DEPENDENCY)
save_LIBS=$LIBS
LIBS=$OT_DEP_LIBARCHIVE_LIBS
AC_CHECK_FUNCS(archive_read_support_filter_all)
LIBS=$save_LIBS
with_libarchive=yes
], [
with_libarchive=no
])
], [ with_libarchive=no ])
if test x$with_libarchive != xno; then OSTREE_FEATURES="$OSTREE_FEATURES +libarchive"; fi
AM_CONDITIONAL(USE_LIBARCHIVE, test $with_libarchive != no)
dnl This is what is in RHEL7 anyways
SELINUX_DEPENDENCY="libselinux >= 2.1.13"
AC_ARG_WITH(selinux,
AS_HELP_STRING([--without-selinux], [Do not use SELinux]),
:, with_selinux=maybe)
AS_IF([ test x$with_selinux != xno ], [
AC_MSG_CHECKING([for $SELINUX_DEPENDENCY])
PKG_CHECK_EXISTS($SELINUX_DEPENDENCY, have_selinux=yes, have_selinux=no)
AC_MSG_RESULT([$have_selinux])
AS_IF([ test x$have_selinux = xno && test x$with_selinux != xmaybe ], [
AC_MSG_ERROR([SELinux is enabled but could not be found])
])
AS_IF([ test x$have_selinux = xyes], [
AC_DEFINE([HAVE_SELINUX], 1, [Define if we have libselinux.pc])
PKG_CHECK_MODULES(OT_DEP_SELINUX, $SELINUX_DEPENDENCY)
with_selinux=yes
], [
with_selinux=no
])
], [ with_selinux=no ])
if test x$with_selinux != xno; then OSTREE_FEATURES="$OSTREE_FEATURES +selinux"; fi
AM_CONDITIONAL(USE_SELINUX, test $with_selinux != no)
dnl This is what is in RHEL7.2 right now, picking it arbitrarily
LIBMOUNT_DEPENDENCY="mount >= 2.23.0"
AC_ARG_WITH(libmount,
AS_HELP_STRING([--without-libmount], [Do not use libmount]),
:, with_libmount=maybe)
AS_IF([ test x$with_libmount != xno ], [
AC_MSG_CHECKING([for $LIBMOUNT_DEPENDENCY])
PKG_CHECK_EXISTS($LIBMOUNT_DEPENDENCY, have_libmount=yes, have_libmount=no)
AC_MSG_RESULT([$have_libmount])
AS_IF([ test x$have_libmount = xno && test x$with_libmount != xmaybe ], [
AC_MSG_ERROR([libmount is enabled but could not be found])
])
AS_IF([ test x$have_libmount = xyes], [
AC_DEFINE([HAVE_LIBMOUNT], 1, [Define if we have libmount.pc])
PKG_CHECK_MODULES(OT_DEP_LIBMOUNT, $LIBMOUNT_DEPENDENCY)
with_libmount=yes
], [
with_libmount=no
])
], [ with_libmount=no ])
if test x$with_libmount != xno; then OSTREE_FEATURES="$OSTREE_FEATURES +libmount"; fi
AM_CONDITIONAL(USE_LIBMOUNT, test $with_libmount != no)
# Enabled by default because I think people should use it.
AC_ARG_ENABLE(rofiles-fuse,
[AS_HELP_STRING([--enable-rofiles-fuse],
[generate rofiles-fuse helper [default=yes]])],,
enable_rofiles_fuse=yes)
AS_IF([ test x$enable_rofiles_fuse != xno ], [
PKG_CHECK_MODULES(BUILDOPT_FUSE, $FUSE_DEPENDENCY)
], [enable_rofiles_fuse=no])
AM_CONDITIONAL(BUILDOPT_FUSE, test x$enable_rofiles_fuse = xyes)
AC_ARG_WITH(dracut,
AS_HELP_STRING([--with-dracut],
[Install dracut module (default: no)]),,
[with_dracut=no])
case x$with_dracut in
xno) ;;
xyes) ;;
xyesbutnoconf) ;;
*) AC_MSG_ERROR([Unknown --with-dracut value $with_dracut])
esac
AM_CONDITIONAL(BUILDOPT_DRACUT, test x$with_dracut = xyes || test x$with_dracut = xyesbutnoconf)
AM_CONDITIONAL(BUILDOPT_DRACUT_CONF, test x$with_dracut = xyes)
AC_ARG_WITH(mkinitcpio,
AS_HELP_STRING([--with-mkinitcpio],
[Install mkinitcpio module (default: no)]),,
[with_mkinitcpio=no])
AM_CONDITIONAL(BUILDOPT_MKINITCPIO, test x$with_mkinitcpio = xyes)
AS_IF([test "x$with_dracut" = "xyes" || test "x$with_dracut" = "xyesbutnoconf" || test "x$with_mkinitcpio" = "xyes"], [
with_systemd=yes
AC_ARG_WITH([systemdsystemunitdir],
AS_HELP_STRING([--with-systemdsystemunitdir=DIR], [Directory for systemd service files]),
[],
[with_systemdsystemunitdir=$($PKG_CONFIG --variable=systemdsystemunitdir systemd)])
AS_IF([test "x$with_systemdsystemunitdir" != "xno"], [
AC_SUBST([systemdsystemunitdir], [$with_systemdsystemunitdir])
])
])
AM_CONDITIONAL(BUILDOPT_SYSTEMD, test x$with_systemd = xyes)
AC_ARG_WITH(builtin-grub2-mkconfig,
AS_HELP_STRING([--with-builtin-grub2-mkconfig],
[Use a builtin minimal grub2-mkconfig to generate a GRUB2 configuration file (default: no)]),,
[with_builtin_grub2_mkconfig=no])
AM_CONDITIONAL(BUILDOPT_BUILTIN_GRUB2_MKCONFIG, test x$with_builtin_grub2_mkconfig = xyes)
AM_COND_IF(BUILDOPT_BUILTIN_GRUB2_MKCONFIG,
AC_DEFINE([USE_BUILTIN_GRUB2_MKCONFIG], 1, [Define if using internal ostree-grub-generator]))
AC_ARG_WITH(grub2-mkconfig-path,
AS_HELP_STRING([--with-grub2-mkconfig-path],
[Path to grub2-mkconfig]))
AS_IF([test x$with_grub2_mkconfig_path = x], [
dnl Otherwise, look for the path to the system generator. On some
dnl distributions GRUB2 *-mkconfig executable has 'grub2' prefix and
dnl on some 'grub'. We default to grub2-mkconfig.
AC_CHECK_PROGS(GRUB2_MKCONFIG, [grub2-mkconfig grub-mkconfig], [grub2-mkconfig])
],[GRUB2_MKCONFIG=$with_grub2_mkconfig_path])
AC_DEFINE_UNQUOTED([GRUB2_MKCONFIG_PATH], ["$GRUB2_MKCONFIG"], [The system grub2-mkconfig executible name])
dnl for tests
AS_IF([test "x$found_introspection" = xyes], [
AC_PATH_PROG(GJS, [gjs])
if test -n "$GJS"; then
have_gjs=yes
else
have_gjs=no
fi
], [have_gjs=no])
AM_CONDITIONAL(BUILDOPT_GJS, test x$have_gjs = xyes)
AC_CONFIG_FILES([
Makefile
apidoc/Makefile
src/libostree/ostree-1.pc
])
AC_OUTPUT
echo "
OSTree $VERSION
===============
introspection: $found_introspection
rofiles-fuse: $enable_rofiles_fuse
libsoup (retrieve remote HTTP repositories): $with_soup
libsoup TLS client certs: $have_libsoup_client_certs
SELinux: $with_selinux
libmount: $with_libmount
libarchive (parse tar files directly): $with_libarchive
static deltas: yes (always enabled now)
man pages (xsltproc): $enable_man
api docs (gtk-doc): $enable_gtk_doc
gjs-based tests: $have_gjs
dracut: $with_dracut
mkinitcpio: $with_mkinitcpio"
AS_IF([test x$with_builtin_grub2_mkconfig = xyes], [
echo " builtin grub2-mkconfig (instead of system): $with_builtin_grub2_mkconfig"
], [
echo " grub2-mkconfig path: $GRUB2_MKCONFIG"
])
AS_IF([test "x$with_systemd" = "xyes"], [
echo " systemd unit dir: $with_systemdsystemunitdir"
])
echo ""
| 1 | 7,729 | I think this will break the case (you can try even with /usr/bin/bison as value): `YACC="/path/to/bison -y" ./configure` I wonder if we should use AC_PROG_YACC at all or simply use AC_CHECK_PROGS since we want to use bison and not another yacc | ostreedev-ostree | c |
@@ -66,8 +66,14 @@ namespace Nethermind.KeyStore.Config
[ConfigItem(Description = "Plain private key to be used in test scenarios")]
string TestNodeKey { get; set; }
- [ConfigItem(Description = "Account to be used by the block author / coinbase")]
+ [ConfigItem(Description = "Account to be used by the block author / coinbase, to be loaded from keystore")]
string BlockAuthorAccount { get; set; }
+
+ [ConfigItem(Description = "Account to be used by the node for network communication (enode), to be loaded from keystore")]
+ string EnodeAccount { get; set; }
+
+ [ConfigItem(Description = "Path to key file to be used by the node for network communication (enode)")]
+ string EnodeKeyFile { get; set; }
[ConfigItem(Description = "Passwords to use to unlock accounts from the UnlockAccounts configuration item. Only used when no PasswordFiles provided.", DefaultValue = "System.String[]")]
string[] Passwords { get; set; } | 1 | // Copyright (c) 2018 Demerzel Solutions Limited
// This file is part of the Nethermind library.
//
// The Nethermind library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The Nethermind library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the Nethermind. If not, see <http://www.gnu.org/licenses/>.
using System;
using System.Linq;
using Nethermind.Config;
using Nethermind.Core;
namespace Nethermind.KeyStore.Config
{
/// <summary>
/// https://medium.com/@julien.maffre/what-is-an-ethereum-keystore-file-86c8c5917b97
/// https://github.com/ethereum/wiki/wiki/Web3-Secret-Storage-Definition
/// </summary>
public interface IKeyStoreConfig : IConfig
{
[ConfigItem(Description = "Directory to store keys in.", DefaultValue = "keystore")]
string KeyStoreDirectory { get; set; }
[ConfigItem(Description = "See https://github.com/ethereum/wiki/wiki/Web3-Secret-Storage-Definition", DefaultValue = "UTF-8")]
string KeyStoreEncoding { get; set; }
[ConfigItem(Description = "See https://github.com/ethereum/wiki/wiki/Web3-Secret-Storage-Definition", DefaultValue = "scrypt")]
string Kdf { get; set; }
[ConfigItem(Description = "See https://github.com/ethereum/wiki/wiki/Web3-Secret-Storage-Definition", DefaultValue = "aes-128-ctr")]
string Cipher { get; set; }
[ConfigItem(Description = "See https://github.com/ethereum/wiki/wiki/Web3-Secret-Storage-Definition", DefaultValue = "32")]
int KdfparamsDklen { get; set; }
[ConfigItem(Description = "See https://github.com/ethereum/wiki/wiki/Web3-Secret-Storage-Definition", DefaultValue = "262144")]
int KdfparamsN { get; set; }
[ConfigItem(Description = "See https://github.com/ethereum/wiki/wiki/Web3-Secret-Storage-Definition", DefaultValue = "1")]
int KdfparamsP { get; set; }
[ConfigItem(Description = "See https://github.com/ethereum/wiki/wiki/Web3-Secret-Storage-Definition", DefaultValue = "8")]
int KdfparamsR { get; set; }
[ConfigItem(Description = "See https://github.com/ethereum/wiki/wiki/Web3-Secret-Storage-Definition", DefaultValue = "32")]
int KdfparamsSaltLen { get; set; }
[ConfigItem(Description = "See https://github.com/ethereum/wiki/wiki/Web3-Secret-Storage-Definition", DefaultValue = "128")]
int SymmetricEncrypterBlockSize { get; set; }
[ConfigItem(Description = "See https://github.com/ethereum/wiki/wiki/Web3-Secret-Storage-Definition", DefaultValue = "128")]
int SymmetricEncrypterKeySize { get; set; }
[ConfigItem(Description = "See https://github.com/ethereum/wiki/wiki/Web3-Secret-Storage-Definition", DefaultValue = "16")]
int IVSize { get; set; }
[ConfigItem(Description = "Plain private key to be used in test scenarios")]
string TestNodeKey { get; set; }
[ConfigItem(Description = "Account to be used by the block author / coinbase")]
string BlockAuthorAccount { get; set; }
[ConfigItem(Description = "Passwords to use to unlock accounts from the UnlockAccounts configuration item. Only used when no PasswordFiles provided.", DefaultValue = "System.String[]")]
string[] Passwords { get; set; }
[ConfigItem(Description = "Password files storing passwords to unlock the accounts from the UnlockAccounts configuration item", DefaultValue = "System.String[]")]
string[] PasswordFiles { get; set; }
[ConfigItem(Description = "Accounts to unlock on startup using provided PasswordFiles and Passwords", DefaultValue = "System.String[]")]
string[] UnlockAccounts { get; set; }
}
public static class KeyStoreConfigExtensions
{
public static int FindUnlockAccountIndex(this IKeyStoreConfig keyStoreConfig, Address address)
{
return Array.IndexOf(
(keyStoreConfig.UnlockAccounts ?? Array.Empty<string>())
.Select(a => a.ToUpperInvariant())
.ToArray(),
address.ToString().ToUpperInvariant());
}
}
} | 1 | 24,762 | can we explain to users in the docs what happens if they leave the field blank? | NethermindEth-nethermind | .cs |
@@ -442,10 +442,10 @@ class EC2Connection(AWSQueryConnection):
[('item', Reservation)], verb='POST')
def run_instances(self, image_id, min_count=1, max_count=1,
- key_name=None, security_groups=None,
- user_data=None, addressing_type=None,
- instance_type='m1.small', placement=None,
- kernel_id=None, ramdisk_id=None,
+ key_name=None, security_group_ids=None,
+ security_groups=None, user_data=None,
+ addressing_type=None, instance_type='m1.small',
+ placement=None, kernel_id=None, ramdisk_id=None,
monitoring_enabled=False, subnet_id=None,
block_device_map=None,
disable_api_termination=False, | 1 | # Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents a connection to the EC2 service.
"""
import base64
import warnings
from datetime import datetime
from datetime import timedelta
import boto
from boto.connection import AWSQueryConnection
from boto.resultset import ResultSet
from boto.ec2.image import Image, ImageAttribute
from boto.ec2.instance import Reservation, Instance, ConsoleOutput, InstanceAttribute
from boto.ec2.keypair import KeyPair
from boto.ec2.address import Address
from boto.ec2.volume import Volume
from boto.ec2.snapshot import Snapshot
from boto.ec2.snapshot import SnapshotAttribute
from boto.ec2.zone import Zone
from boto.ec2.securitygroup import SecurityGroup
from boto.ec2.regioninfo import RegionInfo
from boto.ec2.instanceinfo import InstanceInfo
from boto.ec2.reservedinstance import ReservedInstancesOffering, ReservedInstance
from boto.ec2.spotinstancerequest import SpotInstanceRequest
from boto.ec2.spotpricehistory import SpotPriceHistory
from boto.ec2.spotdatafeedsubscription import SpotDatafeedSubscription
from boto.ec2.bundleinstance import BundleInstanceTask
from boto.ec2.placementgroup import PlacementGroup
from boto.ec2.tag import Tag
from boto.exception import EC2ResponseError
#boto.set_stream_logger('ec2')
class EC2Connection(AWSQueryConnection):
APIVersion = boto.config.get('Boto', 'ec2_version', '2011-01-01')
DefaultRegionName = boto.config.get('Boto', 'ec2_region_name', 'us-east-1')
DefaultRegionEndpoint = boto.config.get('Boto', 'ec2_region_endpoint',
'ec2.amazonaws.com')
ResponseError = EC2ResponseError
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, host=None, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/',
api_version=None):
"""
Init method to create a new connection to EC2.
B{Note:} The host argument is overridden by the host specified in the
boto configuration file.
"""
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
self.region = region
AWSQueryConnection.__init__(self, aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
self.region.endpoint, debug,
https_connection_factory, path)
if api_version:
self.APIVersion = api_version
def _required_auth_capability(self):
return ['ec2']
def get_params(self):
"""
Returns a dictionary containing the value of of all of the keyword
arguments passed when constructing this connection.
"""
param_names = ['aws_access_key_id', 'aws_secret_access_key', 'is_secure',
'port', 'proxy', 'proxy_port', 'proxy_user', 'proxy_pass',
'debug', 'https_connection_factory']
params = {}
for name in param_names:
params[name] = getattr(self, name)
return params
def build_filter_params(self, params, filters):
i = 1
for name in filters:
aws_name = name.replace('_', '-')
params['Filter.%d.Name' % i] = aws_name
value = filters[name]
if not isinstance(value, list):
value = [value]
j = 1
for v in value:
params['Filter.%d.Value.%d' % (i,j)] = v
j += 1
i += 1
# Image methods
def get_all_images(self, image_ids=None, owners=None,
executable_by=None, filters=None):
"""
Retrieve all the EC2 images available on your account.
:type image_ids: list
:param image_ids: A list of strings with the image IDs wanted
:type owners: list
:param owners: A list of owner IDs
:type executable_by: list
:param executable_by: Returns AMIs for which the specified
user ID has explicit launch permissions
:type filters: dict
:param filters: Optional filters that can be used to limit
the results returned. Filters are provided
in the form of a dictionary consisting of
filter names as the key and filter values
as the value. The set of allowable filter
names/values is dependent on the request
being performed. Check the EC2 API guide
for details.
:rtype: list
:return: A list of :class:`boto.ec2.image.Image`
"""
params = {}
if image_ids:
self.build_list_params(params, image_ids, 'ImageId')
if owners:
self.build_list_params(params, owners, 'Owner')
if executable_by:
self.build_list_params(params, executable_by, 'ExecutableBy')
if filters:
self.build_filter_params(params, filters)
return self.get_list('DescribeImages', params, [('item', Image)], verb='POST')
def get_all_kernels(self, kernel_ids=None, owners=None):
"""
Retrieve all the EC2 kernels available on your account.
Constructs a filter to allow the processing to happen server side.
:type kernel_ids: list
:param kernel_ids: A list of strings with the image IDs wanted
:type owners: list
:param owners: A list of owner IDs
:rtype: list
:return: A list of :class:`boto.ec2.image.Image`
"""
params = {}
if kernel_ids:
self.build_list_params(params, kernel_ids, 'ImageId')
if owners:
self.build_list_params(params, owners, 'Owner')
filter = {'image-type' : 'kernel'}
self.build_filter_params(params, filter)
return self.get_list('DescribeImages', params, [('item', Image)], verb='POST')
def get_all_ramdisks(self, ramdisk_ids=None, owners=None):
"""
Retrieve all the EC2 ramdisks available on your account.
Constructs a filter to allow the processing to happen server side.
:type ramdisk_ids: list
:param ramdisk_ids: A list of strings with the image IDs wanted
:type owners: list
:param owners: A list of owner IDs
:rtype: list
:return: A list of :class:`boto.ec2.image.Image`
"""
params = {}
if ramdisk_ids:
self.build_list_params(params, ramdisk_ids, 'ImageId')
if owners:
self.build_list_params(params, owners, 'Owner')
filter = {'image-type' : 'ramdisk'}
self.build_filter_params(params, filter)
return self.get_list('DescribeImages', params, [('item', Image)], verb='POST')
def get_image(self, image_id):
"""
Shortcut method to retrieve a specific image (AMI).
:type image_id: string
:param image_id: the ID of the Image to retrieve
:rtype: :class:`boto.ec2.image.Image`
:return: The EC2 Image specified or None if the image is not found
"""
try:
return self.get_all_images(image_ids=[image_id])[0]
except IndexError: # None of those images available
return None
def register_image(self, name=None, description=None, image_location=None,
architecture=None, kernel_id=None, ramdisk_id=None,
root_device_name=None, block_device_map=None):
"""
Register an image.
:type name: string
:param name: The name of the AMI. Valid only for EBS-based images.
:type description: string
:param description: The description of the AMI.
:type image_location: string
:param image_location: Full path to your AMI manifest in Amazon S3 storage.
Only used for S3-based AMI's.
:type architecture: string
:param architecture: The architecture of the AMI. Valid choices are:
i386 | x86_64
:type kernel_id: string
:param kernel_id: The ID of the kernel with which to launch the instances
:type root_device_name: string
:param root_device_name: The root device name (e.g. /dev/sdh)
:type block_device_map: :class:`boto.ec2.blockdevicemapping.BlockDeviceMapping`
:param block_device_map: A BlockDeviceMapping data structure
describing the EBS volumes associated
with the Image.
:rtype: string
:return: The new image id
"""
params = {}
if name:
params['Name'] = name
if description:
params['Description'] = description
if architecture:
params['Architecture'] = architecture
if kernel_id:
params['KernelId'] = kernel_id
if ramdisk_id:
params['RamdiskId'] = ramdisk_id
if image_location:
params['ImageLocation'] = image_location
if root_device_name:
params['RootDeviceName'] = root_device_name
if block_device_map:
block_device_map.build_list_params(params)
rs = self.get_object('RegisterImage', params, ResultSet, verb='POST')
image_id = getattr(rs, 'imageId', None)
return image_id
def deregister_image(self, image_id, delete_snapshot=False):
"""
Unregister an AMI.
:type image_id: string
:param image_id: the ID of the Image to unregister
:type delete_snapshot: bool
:param delete_snapshot: Set to True if we should delete the snapshot associated with an EBS volume mounted at /dev/sda1
:rtype: bool
:return: True if successful
"""
snapshot_id = None
if delete_snapshot:
image = self.get_image(image_id)
for key in image.block_device_mapping:
if key == "/dev/sda1":
snapshot_id = image.block_device_mapping[key].snapshot_id
break
result = self.get_status('DeregisterImage', {'ImageId':image_id}, verb='POST')
if result and snapshot_id:
return result and self.delete_snapshot(snapshot_id)
return result
def create_image(self, instance_id, name, description=None, no_reboot=False):
"""
Will create an AMI from the instance in the running or stopped
state.
:type instance_id: string
:param instance_id: the ID of the instance to image.
:type name: string
:param name: The name of the new image
:type description: string
:param description: An optional human-readable string describing
the contents and purpose of the AMI.
:type no_reboot: bool
:param no_reboot: An optional flag indicating that the bundling process
should not attempt to shutdown the instance before
bundling. If this flag is True, the responsibility
of maintaining file system integrity is left to the
owner of the instance.
:rtype: string
:return: The new image id
"""
params = {'InstanceId' : instance_id,
'Name' : name}
if description:
params['Description'] = description
if no_reboot:
params['NoReboot'] = 'true'
img = self.get_object('CreateImage', params, Image, verb='POST')
return img.id
# ImageAttribute methods
def get_image_attribute(self, image_id, attribute='launchPermission'):
"""
Gets an attribute from an image.
:type image_id: string
:param image_id: The Amazon image id for which you want info about
:type attribute: string
:param attribute: The attribute you need information about.
Valid choices are:
* launchPermission
* productCodes
* blockDeviceMapping
:rtype: :class:`boto.ec2.image.ImageAttribute`
:return: An ImageAttribute object representing the value of the
attribute requested
"""
params = {'ImageId' : image_id,
'Attribute' : attribute}
return self.get_object('DescribeImageAttribute', params, ImageAttribute, verb='POST')
def modify_image_attribute(self, image_id, attribute='launchPermission',
operation='add', user_ids=None, groups=None,
product_codes=None):
"""
Changes an attribute of an image.
:type image_id: string
:param image_id: The image id you wish to change
:type attribute: string
:param attribute: The attribute you wish to change
:type operation: string
:param operation: Either add or remove (this is required for changing
launchPermissions)
:type user_ids: list
:param user_ids: The Amazon IDs of users to add/remove attributes
:type groups: list
:param groups: The groups to add/remove attributes
:type product_codes: list
:param product_codes: Amazon DevPay product code. Currently only one
product code can be associated with an AMI. Once
set, the product code cannot be changed or reset.
"""
params = {'ImageId' : image_id,
'Attribute' : attribute,
'OperationType' : operation}
if user_ids:
self.build_list_params(params, user_ids, 'UserId')
if groups:
self.build_list_params(params, groups, 'UserGroup')
if product_codes:
self.build_list_params(params, product_codes, 'ProductCode')
return self.get_status('ModifyImageAttribute', params, verb='POST')
def reset_image_attribute(self, image_id, attribute='launchPermission'):
"""
Resets an attribute of an AMI to its default value.
:type image_id: string
:param image_id: ID of the AMI for which an attribute will be described
:type attribute: string
:param attribute: The attribute to reset
:rtype: bool
:return: Whether the operation succeeded or not
"""
params = {'ImageId' : image_id,
'Attribute' : attribute}
return self.get_status('ResetImageAttribute', params, verb='POST')
# Instance methods
def get_all_instances(self, instance_ids=None, filters=None):
"""
Retrieve all the instances associated with your account.
:type instance_ids: list
:param instance_ids: A list of strings of instance IDs
:type filters: dict
:param filters: Optional filters that can be used to limit
the results returned. Filters are provided
in the form of a dictionary consisting of
filter names as the key and filter values
as the value. The set of allowable filter
names/values is dependent on the request
being performed. Check the EC2 API guide
for details.
:rtype: list
:return: A list of :class:`boto.ec2.instance.Reservation`
"""
params = {}
if instance_ids:
self.build_list_params(params, instance_ids, 'InstanceId')
if filters:
self.build_filter_params(params, filters)
return self.get_list('DescribeInstances', params,
[('item', Reservation)], verb='POST')
def run_instances(self, image_id, min_count=1, max_count=1,
key_name=None, security_groups=None,
user_data=None, addressing_type=None,
instance_type='m1.small', placement=None,
kernel_id=None, ramdisk_id=None,
monitoring_enabled=False, subnet_id=None,
block_device_map=None,
disable_api_termination=False,
instance_initiated_shutdown_behavior=None,
private_ip_address=None,
placement_group=None, client_token=None):
"""
Runs an image on EC2.
:type image_id: string
:param image_id: The ID of the image to run
:type min_count: int
:param min_count: The minimum number of instances to launch
:type max_count: int
:param max_count: The maximum number of instances to launch
:type key_name: string
:param key_name: The name of the key pair with which to launch instances
:type security_groups: list of strings
:param security_groups: The names of the security groups with which to
associate instances
:type user_data: string
:param user_data: The user data passed to the launched instances
:type instance_type: string
:param instance_type: The type of instance to run:
* m1.small
* m1.large
* m1.xlarge
* c1.medium
* c1.xlarge
* m2.xlarge
* m2.2xlarge
* m2.4xlarge
* cc1.4xlarge
* t1.micro
:type placement: string
:param placement: The availability zone in which to launch the instances
:type kernel_id: string
:param kernel_id: The ID of the kernel with which to launch the
instances
:type ramdisk_id: string
:param ramdisk_id: The ID of the RAM disk with which to launch the
instances
:type monitoring_enabled: bool
:param monitoring_enabled: Enable CloudWatch monitoring on the instance.
:type subnet_id: string
:param subnet_id: The subnet ID within which to launch the instances
for VPC.
:type private_ip_address: string
:param private_ip_address: If you're using VPC, you can optionally use
this parameter to assign the instance a
specific available IP address from the
subnet (e.g., 10.0.0.25).
:type block_device_map: :class:`boto.ec2.blockdevicemapping.BlockDeviceMapping`
:param block_device_map: A BlockDeviceMapping data structure
describing the EBS volumes associated
with the Image.
:type disable_api_termination: bool
:param disable_api_termination: If True, the instances will be locked
and will not be able to be terminated
via the API.
:type instance_initiated_shutdown_behavior: string
:param instance_initiated_shutdown_behavior: Specifies whether the
instance stops or terminates on
instance-initiated shutdown.
Valid values are:
* stop
* terminate
:type placement_group: string
:param placement_group: If specified, this is the name of the placement
group in which the instance(s) will be launched.
:type client_token: string
:param client_token: Unique, case-sensitive identifier you provide
to ensure idempotency of the request.
Maximum 64 ASCII characters
:rtype: Reservation
:return: The :class:`boto.ec2.instance.Reservation` associated with
the request for machines
"""
params = {'ImageId':image_id,
'MinCount':min_count,
'MaxCount': max_count}
if key_name:
params['KeyName'] = key_name
if security_groups:
l = []
for group in security_groups:
if isinstance(group, SecurityGroup):
l.append(group.name)
else:
l.append(group)
self.build_list_params(params, l, 'SecurityGroup')
if user_data:
params['UserData'] = base64.b64encode(user_data)
if addressing_type:
params['AddressingType'] = addressing_type
if instance_type:
params['InstanceType'] = instance_type
if placement:
params['Placement.AvailabilityZone'] = placement
if placement_group:
params['Placement.GroupName'] = placement_group
if kernel_id:
params['KernelId'] = kernel_id
if ramdisk_id:
params['RamdiskId'] = ramdisk_id
if monitoring_enabled:
params['Monitoring.Enabled'] = 'true'
if subnet_id:
params['SubnetId'] = subnet_id
if private_ip_address:
params['PrivateIpAddress'] = private_ip_address
if block_device_map:
block_device_map.build_list_params(params)
if disable_api_termination:
params['DisableApiTermination'] = 'true'
if instance_initiated_shutdown_behavior:
val = instance_initiated_shutdown_behavior
params['InstanceInitiatedShutdownBehavior'] = val
if client_token:
params['ClientToken'] = client_token
return self.get_object('RunInstances', params, Reservation, verb='POST')
def terminate_instances(self, instance_ids=None):
"""
Terminate the instances specified
:type instance_ids: list
:param instance_ids: A list of strings of the Instance IDs to terminate
:rtype: list
:return: A list of the instances terminated
"""
params = {}
if instance_ids:
self.build_list_params(params, instance_ids, 'InstanceId')
return self.get_list('TerminateInstances', params, [('item', Instance)], verb='POST')
def stop_instances(self, instance_ids=None, force=False):
"""
Stop the instances specified
:type instance_ids: list
:param instance_ids: A list of strings of the Instance IDs to stop
:type force: bool
:param force: Forces the instance to stop
:rtype: list
:return: A list of the instances stopped
"""
params = {}
if force:
params['Force'] = 'true'
if instance_ids:
self.build_list_params(params, instance_ids, 'InstanceId')
return self.get_list('StopInstances', params, [('item', Instance)], verb='POST')
def start_instances(self, instance_ids=None):
"""
Start the instances specified
:type instance_ids: list
:param instance_ids: A list of strings of the Instance IDs to start
:rtype: list
:return: A list of the instances started
"""
params = {}
if instance_ids:
self.build_list_params(params, instance_ids, 'InstanceId')
return self.get_list('StartInstances', params, [('item', Instance)], verb='POST')
def get_console_output(self, instance_id):
"""
Retrieves the console output for the specified instance.
:type instance_id: string
:param instance_id: The instance ID of a running instance on the cloud.
:rtype: :class:`boto.ec2.instance.ConsoleOutput`
:return: The console output as a ConsoleOutput object
"""
params = {}
self.build_list_params(params, [instance_id], 'InstanceId')
return self.get_object('GetConsoleOutput', params, ConsoleOutput, verb='POST')
def reboot_instances(self, instance_ids=None):
"""
Reboot the specified instances.
:type instance_ids: list
:param instance_ids: The instances to terminate and reboot
"""
params = {}
if instance_ids:
self.build_list_params(params, instance_ids, 'InstanceId')
return self.get_status('RebootInstances', params)
def confirm_product_instance(self, product_code, instance_id):
params = {'ProductCode' : product_code,
'InstanceId' : instance_id}
rs = self.get_object('ConfirmProductInstance', params, ResultSet, verb='POST')
return (rs.status, rs.ownerId)
# InstanceAttribute methods
def get_instance_attribute(self, instance_id, attribute):
"""
Gets an attribute from an instance.
:type instance_id: string
:param instance_id: The Amazon id of the instance
:type attribute: string
:param attribute: The attribute you need information about
Valid choices are:
* instanceType|kernel|ramdisk|userData|
* disableApiTermination|
* instanceInitiatedShutdownBehavior|
* rootDeviceName|blockDeviceMapping
:rtype: :class:`boto.ec2.image.InstanceAttribute`
:return: An InstanceAttribute object representing the value of the
attribute requested
"""
params = {'InstanceId' : instance_id}
if attribute:
params['Attribute'] = attribute
return self.get_object('DescribeInstanceAttribute', params,
InstanceAttribute, verb='POST')
def modify_instance_attribute(self, instance_id, attribute, value):
"""
Changes an attribute of an instance
:type instance_id: string
:param instance_id: The instance id you wish to change
:type attribute: string
:param attribute: The attribute you wish to change.
* AttributeName - Expected value (default)
* instanceType - A valid instance type (m1.small)
* kernel - Kernel ID (None)
* ramdisk - Ramdisk ID (None)
* userData - Base64 encoded String (None)
* disableApiTermination - Boolean (true)
* instanceInitiatedShutdownBehavior - stop|terminate
* rootDeviceName - device name (None)
:type value: string
:param value: The new value for the attribute
:rtype: bool
:return: Whether the operation succeeded or not
"""
# Allow a bool to be passed in for value of disableApiTermination
if attribute == 'disableApiTermination':
if isinstance(value, bool):
if value:
value = 'true'
else:
value = 'false'
params = {'InstanceId' : instance_id,
'Attribute' : attribute,
'Value' : value}
return self.get_status('ModifyInstanceAttribute', params, verb='POST')
def reset_instance_attribute(self, instance_id, attribute):
"""
Resets an attribute of an instance to its default value.
:type instance_id: string
:param instance_id: ID of the instance
:type attribute: string
:param attribute: The attribute to reset. Valid values are:
kernel|ramdisk
:rtype: bool
:return: Whether the operation succeeded or not
"""
params = {'InstanceId' : instance_id,
'Attribute' : attribute}
return self.get_status('ResetInstanceAttribute', params, verb='POST')
# Spot Instances
def get_all_spot_instance_requests(self, request_ids=None,
filters=None):
"""
Retrieve all the spot instances requests associated with your account.
:type request_ids: list
:param request_ids: A list of strings of spot instance request IDs
:type filters: dict
:param filters: Optional filters that can be used to limit
the results returned. Filters are provided
in the form of a dictionary consisting of
filter names as the key and filter values
as the value. The set of allowable filter
names/values is dependent on the request
being performed. Check the EC2 API guide
for details.
:rtype: list
:return: A list of
:class:`boto.ec2.spotinstancerequest.SpotInstanceRequest`
"""
params = {}
if request_ids:
self.build_list_params(params, request_ids, 'SpotInstanceRequestId')
if filters:
self.build_filter_params(params, filters)
return self.get_list('DescribeSpotInstanceRequests', params,
[('item', SpotInstanceRequest)], verb='POST')
def get_spot_price_history(self, start_time=None, end_time=None,
instance_type=None, product_description=None):
"""
Retrieve the recent history of spot instances pricing.
:type start_time: str
:param start_time: An indication of how far back to provide price
changes for. An ISO8601 DateTime string.
:type end_time: str
:param end_time: An indication of how far forward to provide price
changes for. An ISO8601 DateTime string.
:type instance_type: str
:param instance_type: Filter responses to a particular instance type.
:type product_description: str
:param product_descripton: Filter responses to a particular platform.
Valid values are currently: Linux
:rtype: list
:return: A list tuples containing price and timestamp.
"""
params = {}
if start_time:
params['StartTime'] = start_time
if end_time:
params['EndTime'] = end_time
if instance_type:
params['InstanceType'] = instance_type
if product_description:
params['ProductDescription'] = product_description
return self.get_list('DescribeSpotPriceHistory', params,
[('item', SpotPriceHistory)], verb='POST')
def request_spot_instances(self, price, image_id, count=1, type='one-time',
valid_from=None, valid_until=None,
launch_group=None, availability_zone_group=None,
key_name=None, security_groups=None,
user_data=None, addressing_type=None,
instance_type='m1.small', placement=None,
kernel_id=None, ramdisk_id=None,
monitoring_enabled=False, subnet_id=None,
block_device_map=None):
"""
Request instances on the spot market at a particular price.
:type price: str
:param price: The maximum price of your bid
:type image_id: string
:param image_id: The ID of the image to run
:type count: int
:param count: The of instances to requested
:type type: str
:param type: Type of request. Can be 'one-time' or 'persistent'.
Default is one-time.
:type valid_from: str
:param valid_from: Start date of the request. An ISO8601 time string.
:type valid_until: str
:param valid_until: End date of the request. An ISO8601 time string.
:type launch_group: str
:param launch_group: If supplied, all requests will be fulfilled
as a group.
:type availability_zone_group: str
:param availability_zone_group: If supplied, all requests will be
fulfilled within a single
availability zone.
:type key_name: string
:param key_name: The name of the key pair with which to launch instances
:type security_groups: list of strings
:param security_groups: The names of the security groups with which to
associate instances
:type user_data: string
:param user_data: The user data passed to the launched instances
:type instance_type: string
:param instance_type: The type of instance to run:
* m1.small
* m1.large
* m1.xlarge
* c1.medium
* c1.xlarge
* m2.xlarge
* m2.2xlarge
* m2.4xlarge
* cc1.4xlarge
* t1.micro
:type placement: string
:param placement: The availability zone in which to launch the instances
:type kernel_id: string
:param kernel_id: The ID of the kernel with which to launch the
instances
:type ramdisk_id: string
:param ramdisk_id: The ID of the RAM disk with which to launch the
instances
:type monitoring_enabled: bool
:param monitoring_enabled: Enable CloudWatch monitoring on the instance.
:type subnet_id: string
:param subnet_id: The subnet ID within which to launch the instances
for VPC.
:type block_device_map: :class:`boto.ec2.blockdevicemapping.BlockDeviceMapping`
:param block_device_map: A BlockDeviceMapping data structure
describing the EBS volumes associated
with the Image.
:rtype: Reservation
:return: The :class:`boto.ec2.spotinstancerequest.SpotInstanceRequest`
associated with the request for machines
"""
params = {'LaunchSpecification.ImageId':image_id,
'Type' : type,
'SpotPrice' : price}
if count:
params['InstanceCount'] = count
if valid_from:
params['ValidFrom'] = valid_from
if valid_until:
params['ValidUntil'] = valid_until
if launch_group:
params['LaunchGroup'] = launch_group
if availability_zone_group:
params['AvailabilityZoneGroup'] = availability_zone_group
if key_name:
params['LaunchSpecification.KeyName'] = key_name
if security_groups:
l = []
for group in security_groups:
if isinstance(group, SecurityGroup):
l.append(group.name)
else:
l.append(group)
self.build_list_params(params, l,
'LaunchSpecification.SecurityGroup')
if user_data:
params['LaunchSpecification.UserData'] = base64.b64encode(user_data)
if addressing_type:
params['LaunchSpecification.AddressingType'] = addressing_type
if instance_type:
params['LaunchSpecification.InstanceType'] = instance_type
if placement:
params['LaunchSpecification.Placement.AvailabilityZone'] = placement
if kernel_id:
params['LaunchSpecification.KernelId'] = kernel_id
if ramdisk_id:
params['LaunchSpecification.RamdiskId'] = ramdisk_id
if monitoring_enabled:
params['LaunchSpecification.Monitoring.Enabled'] = 'true'
if subnet_id:
params['LaunchSpecification.SubnetId'] = subnet_id
if block_device_map:
block_device_map.build_list_params(params, 'LaunchSpecification.')
return self.get_list('RequestSpotInstances', params,
[('item', SpotInstanceRequest)],
verb='POST')
def cancel_spot_instance_requests(self, request_ids):
"""
Cancel the specified Spot Instance Requests.
:type request_ids: list
:param request_ids: A list of strings of the Request IDs to terminate
:rtype: list
:return: A list of the instances terminated
"""
params = {}
if request_ids:
self.build_list_params(params, request_ids, 'SpotInstanceRequestId')
return self.get_list('CancelSpotInstanceRequests', params,
[('item', Instance)], verb='POST')
def get_spot_datafeed_subscription(self):
"""
Return the current spot instance data feed subscription
associated with this account, if any.
:rtype: :class:`boto.ec2.spotdatafeedsubscription.SpotDatafeedSubscription`
:return: The datafeed subscription object or None
"""
return self.get_object('DescribeSpotDatafeedSubscription',
None, SpotDatafeedSubscription, verb='POST')
def create_spot_datafeed_subscription(self, bucket, prefix):
"""
Create a spot instance datafeed subscription for this account.
:type bucket: str or unicode
:param bucket: The name of the bucket where spot instance data
will be written. The account issuing this request
must have FULL_CONTROL access to the bucket
specified in the request.
:type prefix: str or unicode
:param prefix: An optional prefix that will be pre-pended to all
data files written to the bucket.
:rtype: :class:`boto.ec2.spotdatafeedsubscription.SpotDatafeedSubscription`
:return: The datafeed subscription object or None
"""
params = {'Bucket' : bucket}
if prefix:
params['Prefix'] = prefix
return self.get_object('CreateSpotDatafeedSubscription',
params, SpotDatafeedSubscription, verb='POST')
def delete_spot_datafeed_subscription(self):
"""
Delete the current spot instance data feed subscription
associated with this account
:rtype: bool
:return: True if successful
"""
return self.get_status('DeleteSpotDatafeedSubscription', None, verb='POST')
# Zone methods
def get_all_zones(self, zones=None, filters=None):
"""
Get all Availability Zones associated with the current region.
:type zones: list
:param zones: Optional list of zones. If this list is present,
only the Zones associated with these zone names
will be returned.
:type filters: dict
:param filters: Optional filters that can be used to limit
the results returned. Filters are provided
in the form of a dictionary consisting of
filter names as the key and filter values
as the value. The set of allowable filter
names/values is dependent on the request
being performed. Check the EC2 API guide
for details.
:rtype: list of :class:`boto.ec2.zone.Zone`
:return: The requested Zone objects
"""
params = {}
if zones:
self.build_list_params(params, zones, 'ZoneName')
if filters:
self.build_filter_params(params, filters)
return self.get_list('DescribeAvailabilityZones', params, [('item', Zone)], verb='POST')
# Address methods
def get_all_addresses(self, addresses=None, filters=None):
"""
Get all EIP's associated with the current credentials.
:type addresses: list
:param addresses: Optional list of addresses. If this list is present,
only the Addresses associated with these addresses
will be returned.
:type filters: dict
:param filters: Optional filters that can be used to limit
the results returned. Filters are provided
in the form of a dictionary consisting of
filter names as the key and filter values
as the value. The set of allowable filter
names/values is dependent on the request
being performed. Check the EC2 API guide
for details.
:rtype: list of :class:`boto.ec2.address.Address`
:return: The requested Address objects
"""
params = {}
if addresses:
self.build_list_params(params, addresses, 'PublicIp')
if filters:
self.build_filter_params(params, filters)
return self.get_list('DescribeAddresses', params, [('item', Address)], verb='POST')
def allocate_address(self):
"""
Allocate a new Elastic IP address and associate it with your account.
:rtype: :class:`boto.ec2.address.Address`
:return: The newly allocated Address
"""
return self.get_object('AllocateAddress', {}, Address, verb='POST')
def associate_address(self, instance_id, public_ip):
"""
Associate an Elastic IP address with a currently running instance.
:type instance_id: string
:param instance_id: The ID of the instance
:type public_ip: string
:param public_ip: The public IP address
:rtype: bool
:return: True if successful
"""
params = {'InstanceId' : instance_id, 'PublicIp' : public_ip}
return self.get_status('AssociateAddress', params, verb='POST')
def disassociate_address(self, public_ip):
"""
Disassociate an Elastic IP address from a currently running instance.
:type public_ip: string
:param public_ip: The public IP address
:rtype: bool
:return: True if successful
"""
params = {'PublicIp' : public_ip}
return self.get_status('DisassociateAddress', params, verb='POST')
def release_address(self, public_ip):
"""
Free up an Elastic IP address
:type public_ip: string
:param public_ip: The public IP address
:rtype: bool
:return: True if successful
"""
params = {'PublicIp' : public_ip}
return self.get_status('ReleaseAddress', params, verb='POST')
# Volume methods
def get_all_volumes(self, volume_ids=None, filters=None):
"""
Get all Volumes associated with the current credentials.
:type volume_ids: list
:param volume_ids: Optional list of volume ids. If this list is present,
only the volumes associated with these volume ids
will be returned.
:type filters: dict
:param filters: Optional filters that can be used to limit
the results returned. Filters are provided
in the form of a dictionary consisting of
filter names as the key and filter values
as the value. The set of allowable filter
names/values is dependent on the request
being performed. Check the EC2 API guide
for details.
:rtype: list of :class:`boto.ec2.volume.Volume`
:return: The requested Volume objects
"""
params = {}
if volume_ids:
self.build_list_params(params, volume_ids, 'VolumeId')
if filters:
self.build_filter_params(params, filters)
return self.get_list('DescribeVolumes', params, [('item', Volume)], verb='POST')
def create_volume(self, size, zone, snapshot=None):
"""
Create a new EBS Volume.
:type size: int
:param size: The size of the new volume, in GiB
:type zone: string or :class:`boto.ec2.zone.Zone`
:param zone: The availability zone in which the Volume will be created.
:type snapshot: string or :class:`boto.ec2.snapshot.Snapshot`
:param snapshot: The snapshot from which the new Volume will be created.
"""
if isinstance(zone, Zone):
zone = zone.name
params = {'AvailabilityZone' : zone}
if size:
params['Size'] = size
if snapshot:
if isinstance(snapshot, Snapshot):
snapshot = snapshot.id
params['SnapshotId'] = snapshot
return self.get_object('CreateVolume', params, Volume, verb='POST')
def delete_volume(self, volume_id):
"""
Delete an EBS volume.
:type volume_id: str
:param volume_id: The ID of the volume to be delete.
:rtype: bool
:return: True if successful
"""
params = {'VolumeId': volume_id}
return self.get_status('DeleteVolume', params, verb='POST')
def attach_volume(self, volume_id, instance_id, device):
"""
Attach an EBS volume to an EC2 instance.
:type volume_id: str
:param volume_id: The ID of the EBS volume to be attached.
:type instance_id: str
:param instance_id: The ID of the EC2 instance to which it will
be attached.
:type device: str
:param device: The device on the instance through which the
volume will be exposted (e.g. /dev/sdh)
:rtype: bool
:return: True if successful
"""
params = {'InstanceId' : instance_id,
'VolumeId' : volume_id,
'Device' : device}
return self.get_status('AttachVolume', params, verb='POST')
def detach_volume(self, volume_id, instance_id=None,
device=None, force=False):
"""
Detach an EBS volume from an EC2 instance.
:type volume_id: str
:param volume_id: The ID of the EBS volume to be attached.
:type instance_id: str
:param instance_id: The ID of the EC2 instance from which it will
be detached.
:type device: str
:param device: The device on the instance through which the
volume is exposted (e.g. /dev/sdh)
:type force: bool
:param force: Forces detachment if the previous detachment attempt did
not occur cleanly. This option can lead to data loss or
a corrupted file system. Use this option only as a last
resort to detach a volume from a failed instance. The
instance will not have an opportunity to flush file system
caches nor file system meta data. If you use this option,
you must perform file system check and repair procedures.
:rtype: bool
:return: True if successful
"""
params = {'VolumeId' : volume_id}
if instance_id:
params['InstanceId'] = instance_id
if device:
params['Device'] = device
if force:
params['Force'] = 'true'
return self.get_status('DetachVolume', params, verb='POST')
# Snapshot methods
def get_all_snapshots(self, snapshot_ids=None,
owner=None, restorable_by=None,
filters=None):
"""
Get all EBS Snapshots associated with the current credentials.
:type snapshot_ids: list
:param snapshot_ids: Optional list of snapshot ids. If this list is
present, only the Snapshots associated with
these snapshot ids will be returned.
:type owner: str
:param owner: If present, only the snapshots owned by the specified user
will be returned. Valid values are:
* self
* amazon
* AWS Account ID
:type restorable_by: str
:param restorable_by: If present, only the snapshots that are restorable
by the specified account id will be returned.
:type filters: dict
:param filters: Optional filters that can be used to limit
the results returned. Filters are provided
in the form of a dictionary consisting of
filter names as the key and filter values
as the value. The set of allowable filter
names/values is dependent on the request
being performed. Check the EC2 API guide
for details.
:rtype: list of :class:`boto.ec2.snapshot.Snapshot`
:return: The requested Snapshot objects
"""
params = {}
if snapshot_ids:
self.build_list_params(params, snapshot_ids, 'SnapshotId')
if owner:
params['Owner'] = owner
if restorable_by:
params['RestorableBy'] = restorable_by
if filters:
self.build_filter_params(params, filters)
return self.get_list('DescribeSnapshots', params, [('item', Snapshot)], verb='POST')
def create_snapshot(self, volume_id, description=None):
"""
Create a snapshot of an existing EBS Volume.
:type volume_id: str
:param volume_id: The ID of the volume to be snapshot'ed
:type description: str
:param description: A description of the snapshot.
Limited to 255 characters.
:rtype: bool
:return: True if successful
"""
params = {'VolumeId' : volume_id}
if description:
params['Description'] = description[0:255]
snapshot = self.get_object('CreateSnapshot', params, Snapshot, verb='POST')
volume = self.get_all_volumes([volume_id])[0]
volume_name = volume.tags.get('Name')
if volume_name:
snapshot.add_tag('Name', volume_name)
return snapshot
def delete_snapshot(self, snapshot_id):
params = {'SnapshotId': snapshot_id}
return self.get_status('DeleteSnapshot', params, verb='POST')
def trim_snapshots(self, hourly_backups = 8, daily_backups = 7, weekly_backups = 4):
"""
Trim excess snapshots, based on when they were taken. More current snapshots are
retained, with the number retained decreasing as you move back in time.
If ebs volumes have a 'Name' tag with a value, their snapshots will be assigned the same
tag when they are created. The values of the 'Name' tags for snapshots are used by this
function to group snapshots taken from the same volume (or from a series of like-named
volumes over time) for trimming.
For every group of like-named snapshots, this function retains the newest and oldest
snapshots, as well as, by default, the first snapshots taken in each of the last eight
hours, the first snapshots taken in each of the last seven days, the first snapshots
taken in the last 4 weeks (counting Midnight Sunday morning as the start of the week),
and the first snapshot from the first Sunday of each month forever.
:type hourly_backups: int
:param hourly_backups: How many recent hourly backups should be saved.
:type daily_backups: int
:param daily_backups: How many recent daily backups should be saved.
:type weekly_backups: int
:param weekly_backups: How many recent weekly backups should be saved.
"""
# This function first builds up an ordered list of target times that snapshots should be saved for
# (last 8 hours, last 7 days, etc.). Then a map of snapshots is constructed, with the keys being
# the snapshot / volume names and the values being arrays of chornologically sorted snapshots.
# Finally, for each array in the map, we go through the snapshot array and the target time array
# in an interleaved fashion, deleting snapshots whose start_times don't immediately follow a
# target time (we delete a snapshot if there's another snapshot that was made closer to the
# preceding target time).
now = datetime.utcnow() # work with UTC time, which is what the snapshot start time is reported in
last_hour = datetime(now.year, now.month, now.day, now.hour)
last_midnight = datetime(now.year, now.month, now.day)
last_sunday = datetime(now.year, now.month, now.day) - timedelta(days = (now.weekday() + 1) % 7)
start_of_month = datetime(now.year, now.month, 1)
target_backup_times = []
oldest_snapshot_date = datetime(2007, 1, 1) # there are no snapshots older than 1/1/2007
for hour in range(0, hourly_backups):
target_backup_times.append(last_hour - timedelta(hours = hour))
for day in range(0, daily_backups):
target_backup_times.append(last_midnight - timedelta(days = day))
for week in range(0, weekly_backups):
target_backup_times.append(last_sunday - timedelta(weeks = week))
one_day = timedelta(days = 1)
while start_of_month > oldest_snapshot_date:
# append the start of the month to the list of snapshot dates to save:
target_backup_times.append(start_of_month)
# there's no timedelta setting for one month, so instead:
# decrement the day by one, so we go to the final day of the previous month...
start_of_month -= one_day
# ... and then go to the first day of that previous month:
start_of_month = datetime(start_of_month.year, start_of_month.month, 1)
temp = []
for t in target_backup_times:
if temp.__contains__(t) == False:
temp.append(t)
target_backup_times = temp
target_backup_times.sort() # make the oldeest dates first, and make sure the month start and last four week's
# start are in the proper order
# get all the snapshots, sort them by date and time, and organize them into one array for each volume:
all_snapshots = self.get_all_snapshots(owner = 'self')
all_snapshots.sort(cmp = lambda x, y: cmp(x.start_time, y.start_time)) # oldest first
snaps_for_each_volume = {}
for snap in all_snapshots:
# the snapshot name and the volume name are the same. The snapshot name is set from the volume
# name at the time the snapshot is taken
volume_name = snap.tags.get('Name')
if volume_name:
# only examine snapshots that have a volume name
snaps_for_volume = snaps_for_each_volume.get(volume_name)
if not snaps_for_volume:
snaps_for_volume = []
snaps_for_each_volume[volume_name] = snaps_for_volume
snaps_for_volume.append(snap)
# Do a running comparison of snapshot dates to desired time periods, keeping the oldest snapshot in each
# time period and deleting the rest:
for volume_name in snaps_for_each_volume:
snaps = snaps_for_each_volume[volume_name]
snaps = snaps[:-1] # never delete the newest snapshot, so remove it from consideration
time_period_number = 0
snap_found_for_this_time_period = False
for snap in snaps:
check_this_snap = True
while check_this_snap and time_period_number < target_backup_times.__len__():
snap_date = datetime.strptime(snap.start_time, '%Y-%m-%dT%H:%M:%S.000Z')
if snap_date < target_backup_times[time_period_number]:
# the snap date is before the cutoff date. Figure out if it's the first snap in this
# date range and act accordingly (since both date the date ranges and the snapshots
# are sorted chronologically, we know this snapshot isn't in an earlier date range):
if snap_found_for_this_time_period == True:
if not snap.tags.get('preserve_snapshot'):
# as long as the snapshot wasn't marked with the 'preserve_snapshot' tag, delete it:
try:
self.delete_snapshot(snap.id)
boto.log.info('Trimmed snapshot %s (%s)' % (snap.tags['Name'], snap.start_time))
except EC2ResponseError:
boto.log.error('Attempt to trim snapshot %s (%s) failed. Possible result of a race condition with trimming on another server?' % (snap.tags['Name'], snap.start_time))
# go on and look at the next snapshot, leaving the time period alone
else:
# this was the first snapshot found for this time period. Leave it alone and look at the
# next snapshot:
snap_found_for_this_time_period = True
check_this_snap = False
else:
# the snap is after the cutoff date. Check it against the next cutoff date
time_period_number += 1
snap_found_for_this_time_period = False
def get_snapshot_attribute(self, snapshot_id,
attribute='createVolumePermission'):
"""
Get information about an attribute of a snapshot. Only one attribute
can be specified per call.
:type snapshot_id: str
:param snapshot_id: The ID of the snapshot.
:type attribute: str
:param attribute: The requested attribute. Valid values are:
* createVolumePermission
:rtype: list of :class:`boto.ec2.snapshotattribute.SnapshotAttribute`
:return: The requested Snapshot attribute
"""
params = {'Attribute' : attribute}
if snapshot_id:
params['SnapshotId'] = snapshot_id
return self.get_object('DescribeSnapshotAttribute', params,
SnapshotAttribute, verb='POST')
def modify_snapshot_attribute(self, snapshot_id,
attribute='createVolumePermission',
operation='add', user_ids=None, groups=None):
"""
Changes an attribute of an image.
:type snapshot_id: string
:param snapshot_id: The snapshot id you wish to change
:type attribute: string
:param attribute: The attribute you wish to change. Valid values are:
createVolumePermission
:type operation: string
:param operation: Either add or remove (this is required for changing
snapshot ermissions)
:type user_ids: list
:param user_ids: The Amazon IDs of users to add/remove attributes
:type groups: list
:param groups: The groups to add/remove attributes. The only valid
value at this time is 'all'.
"""
params = {'SnapshotId' : snapshot_id,
'Attribute' : attribute,
'OperationType' : operation}
if user_ids:
self.build_list_params(params, user_ids, 'UserId')
if groups:
self.build_list_params(params, groups, 'UserGroup')
return self.get_status('ModifySnapshotAttribute', params, verb='POST')
def reset_snapshot_attribute(self, snapshot_id,
attribute='createVolumePermission'):
"""
Resets an attribute of a snapshot to its default value.
:type snapshot_id: string
:param snapshot_id: ID of the snapshot
:type attribute: string
:param attribute: The attribute to reset
:rtype: bool
:return: Whether the operation succeeded or not
"""
params = {'SnapshotId' : snapshot_id,
'Attribute' : attribute}
return self.get_status('ResetSnapshotAttribute', params, verb='POST')
# Keypair methods
def get_all_key_pairs(self, keynames=None, filters=None):
"""
Get all key pairs associated with your account.
:type keynames: list
:param keynames: A list of the names of keypairs to retrieve.
If not provided, all key pairs will be returned.
:type filters: dict
:param filters: Optional filters that can be used to limit
the results returned. Filters are provided
in the form of a dictionary consisting of
filter names as the key and filter values
as the value. The set of allowable filter
names/values is dependent on the request
being performed. Check the EC2 API guide
for details.
:rtype: list
:return: A list of :class:`boto.ec2.keypair.KeyPair`
"""
params = {}
if keynames:
self.build_list_params(params, keynames, 'KeyName')
if filters:
self.build_filter_params(params, filters)
return self.get_list('DescribeKeyPairs', params, [('item', KeyPair)], verb='POST')
def get_key_pair(self, keyname):
"""
Convenience method to retrieve a specific keypair (KeyPair).
:type image_id: string
:param image_id: the ID of the Image to retrieve
:rtype: :class:`boto.ec2.keypair.KeyPair`
:return: The KeyPair specified or None if it is not found
"""
try:
return self.get_all_key_pairs(keynames=[keyname])[0]
except IndexError: # None of those key pairs available
return None
def create_key_pair(self, key_name):
"""
Create a new key pair for your account.
This will create the key pair within the region you
are currently connected to.
:type key_name: string
:param key_name: The name of the new keypair
:rtype: :class:`boto.ec2.keypair.KeyPair`
:return: The newly created :class:`boto.ec2.keypair.KeyPair`.
The material attribute of the new KeyPair object
will contain the the unencrypted PEM encoded RSA private key.
"""
params = {'KeyName':key_name}
return self.get_object('CreateKeyPair', params, KeyPair, verb='POST')
def delete_key_pair(self, key_name):
"""
Delete a key pair from your account.
:type key_name: string
:param key_name: The name of the keypair to delete
"""
params = {'KeyName':key_name}
return self.get_status('DeleteKeyPair', params, verb='POST')
def import_key_pair(self, key_name, public_key_material):
"""
mports the public key from an RSA key pair that you created
with a third-party tool.
Supported formats:
* OpenSSH public key format (e.g., the format
in ~/.ssh/authorized_keys)
* Base64 encoded DER format
* SSH public key file format as specified in RFC4716
DSA keys are not supported. Make sure your key generator is
set up to create RSA keys.
Supported lengths: 1024, 2048, and 4096.
:type key_name: string
:param key_name: The name of the new keypair
:type public_key_material: string
:param public_key_material: The public key. You must base64 encode
the public key material before sending
it to AWS.
:rtype: :class:`boto.ec2.keypair.KeyPair`
:return: The newly created :class:`boto.ec2.keypair.KeyPair`.
The material attribute of the new KeyPair object
will contain the the unencrypted PEM encoded RSA private key.
"""
public_key_material = base64.b64encode(public_key_material)
params = {'KeyName' : key_name,
'PublicKeyMaterial' : public_key_material}
return self.get_object('ImportKeyPair', params, KeyPair, verb='POST')
# SecurityGroup methods
def get_all_security_groups(self, groupnames=None, filters=None):
"""
Get all security groups associated with your account in a region.
:type groupnames: list
:param groupnames: A list of the names of security groups to retrieve.
If not provided, all security groups will be
returned.
:type filters: dict
:param filters: Optional filters that can be used to limit
the results returned. Filters are provided
in the form of a dictionary consisting of
filter names as the key and filter values
as the value. The set of allowable filter
names/values is dependent on the request
being performed. Check the EC2 API guide
for details.
:rtype: list
:return: A list of :class:`boto.ec2.securitygroup.SecurityGroup`
"""
params = {}
if groupnames:
self.build_list_params(params, groupnames, 'GroupName')
if filters:
self.build_filter_params(params, filters)
return self.get_list('DescribeSecurityGroups', params,
[('item', SecurityGroup)], verb='POST')
def create_security_group(self, name, description):
"""
Create a new security group for your account.
This will create the security group within the region you
are currently connected to.
:type name: string
:param name: The name of the new security group
:type description: string
:param description: The description of the new security group
:rtype: :class:`boto.ec2.securitygroup.SecurityGroup`
:return: The newly created :class:`boto.ec2.keypair.KeyPair`.
"""
params = {'GroupName':name, 'GroupDescription':description}
group = self.get_object('CreateSecurityGroup', params, SecurityGroup, verb='POST')
group.name = name
group.description = description
return group
def delete_security_group(self, name):
"""
Delete a security group from your account.
:type key_name: string
:param key_name: The name of the keypair to delete
"""
params = {'GroupName':name}
return self.get_status('DeleteSecurityGroup', params, verb='POST')
def authorize_security_group_deprecated(self, group_name,
src_security_group_name=None,
src_security_group_owner_id=None,
ip_protocol=None,
from_port=None, to_port=None,
cidr_ip=None):
"""
NOTE: This method uses the old-style request parameters
that did not allow a port to be specified when
authorizing a group.
:type group_name: string
:param group_name: The name of the security group you are adding
the rule to.
:type src_security_group_name: string
:param src_security_group_name: The name of the security group you are
granting access to.
:type src_security_group_owner_id: string
:param src_security_group_owner_id: The ID of the owner of the security
group you are granting access to.
:type ip_protocol: string
:param ip_protocol: Either tcp | udp | icmp
:type from_port: int
:param from_port: The beginning port number you are enabling
:type to_port: int
:param to_port: The ending port number you are enabling
:type to_port: string
:param to_port: The CIDR block you are providing access to.
See http://goo.gl/Yj5QC
:rtype: bool
:return: True if successful.
"""
params = {'GroupName':group_name}
if src_security_group_name:
params['SourceSecurityGroupName'] = src_security_group_name
if src_security_group_owner_id:
params['SourceSecurityGroupOwnerId'] = src_security_group_owner_id
if ip_protocol:
params['IpProtocol'] = ip_protocol
if from_port:
params['FromPort'] = from_port
if to_port:
params['ToPort'] = to_port
if cidr_ip:
params['CidrIp'] = cidr_ip
return self.get_status('AuthorizeSecurityGroupIngress', params)
def authorize_security_group(self, group_name, src_security_group_name=None,
src_security_group_owner_id=None,
ip_protocol=None, from_port=None, to_port=None,
cidr_ip=None):
"""
Add a new rule to an existing security group.
You need to pass in either src_security_group_name and
src_security_group_owner_id OR ip_protocol, from_port, to_port,
and cidr_ip. In other words, either you are authorizing another
group or you are authorizing some ip-based rule.
:type group_name: string
:param group_name: The name of the security group you are adding
the rule to.
:type src_security_group_name: string
:param src_security_group_name: The name of the security group you are
granting access to.
:type src_security_group_owner_id: string
:param src_security_group_owner_id: The ID of the owner of the security
group you are granting access to.
:type ip_protocol: string
:param ip_protocol: Either tcp | udp | icmp
:type from_port: int
:param from_port: The beginning port number you are enabling
:type to_port: int
:param to_port: The ending port number you are enabling
:type cidr_ip: string
:param cidr_ip: The CIDR block you are providing access to.
See http://goo.gl/Yj5QC
:rtype: bool
:return: True if successful.
"""
if src_security_group_name:
if from_port is None and to_port is None and ip_protocol is None:
return self.authorize_security_group_deprecated(
group_name, src_security_group_name,
src_security_group_owner_id)
params = {'GroupName':group_name}
if src_security_group_name:
param_name = 'IpPermissions.1.Groups.1.GroupName'
params[param_name] = src_security_group_name
if src_security_group_owner_id:
param_name = 'IpPermissions.1.Groups.1.UserId'
params[param_name] = src_security_group_owner_id
if ip_protocol:
params['IpPermissions.1.IpProtocol'] = ip_protocol
if from_port:
params['IpPermissions.1.FromPort'] = from_port
if to_port:
params['IpPermissions.1.ToPort'] = to_port
if cidr_ip:
params['IpPermissions.1.IpRanges.1.CidrIp'] = cidr_ip
return self.get_status('AuthorizeSecurityGroupIngress',
params, verb='POST')
def revoke_security_group_deprecated(self, group_name,
src_security_group_name=None,
src_security_group_owner_id=None,
ip_protocol=None,
from_port=None, to_port=None,
cidr_ip=None):
"""
NOTE: This method uses the old-style request parameters
that did not allow a port to be specified when
authorizing a group.
Remove an existing rule from an existing security group.
You need to pass in either src_security_group_name and
src_security_group_owner_id OR ip_protocol, from_port, to_port,
and cidr_ip. In other words, either you are revoking another
group or you are revoking some ip-based rule.
:type group_name: string
:param group_name: The name of the security group you are removing
the rule from.
:type src_security_group_name: string
:param src_security_group_name: The name of the security group you are
revoking access to.
:type src_security_group_owner_id: string
:param src_security_group_owner_id: The ID of the owner of the security
group you are revoking access to.
:type ip_protocol: string
:param ip_protocol: Either tcp | udp | icmp
:type from_port: int
:param from_port: The beginning port number you are disabling
:type to_port: int
:param to_port: The ending port number you are disabling
:type to_port: string
:param to_port: The CIDR block you are revoking access to.
http://goo.gl/Yj5QC
:rtype: bool
:return: True if successful.
"""
params = {'GroupName':group_name}
if src_security_group_name:
params['SourceSecurityGroupName'] = src_security_group_name
if src_security_group_owner_id:
params['SourceSecurityGroupOwnerId'] = src_security_group_owner_id
if ip_protocol:
params['IpProtocol'] = ip_protocol
if from_port:
params['FromPort'] = from_port
if to_port:
params['ToPort'] = to_port
if cidr_ip:
params['CidrIp'] = cidr_ip
return self.get_status('RevokeSecurityGroupIngress', params)
def revoke_security_group(self, group_name, src_security_group_name=None,
src_security_group_owner_id=None,
ip_protocol=None, from_port=None, to_port=None,
cidr_ip=None):
"""
Remove an existing rule from an existing security group.
You need to pass in either src_security_group_name and
src_security_group_owner_id OR ip_protocol, from_port, to_port,
and cidr_ip. In other words, either you are revoking another
group or you are revoking some ip-based rule.
:type group_name: string
:param group_name: The name of the security group you are removing
the rule from.
:type src_security_group_name: string
:param src_security_group_name: The name of the security group you are
revoking access to.
:type src_security_group_owner_id: string
:param src_security_group_owner_id: The ID of the owner of the security
group you are revoking access to.
:type ip_protocol: string
:param ip_protocol: Either tcp | udp | icmp
:type from_port: int
:param from_port: The beginning port number you are disabling
:type to_port: int
:param to_port: The ending port number you are disabling
:type cidr_ip: string
:param cidr_ip: The CIDR block you are revoking access to.
See http://goo.gl/Yj5QC
:rtype: bool
:return: True if successful.
"""
if src_security_group_name:
if from_port is None and to_port is None and ip_protocol is None:
return self.revoke_security_group_deprecated(
group_name, src_security_group_name,
src_security_group_owner_id)
params = {'GroupName':group_name}
if src_security_group_name:
param_name = 'IpPermissions.1.Groups.1.GroupName'
params[param_name] = src_security_group_name
if src_security_group_owner_id:
param_name = 'IpPermissions.1.Groups.1.UserId'
params[param_name] = src_security_group_owner_id
if ip_protocol:
params['IpPermissions.1.IpProtocol'] = ip_protocol
if from_port:
params['IpPermissions.1.FromPort'] = from_port
if to_port:
params['IpPermissions.1.ToPort'] = to_port
if cidr_ip:
params['IpPermissions.1.IpRanges.1.CidrIp'] = cidr_ip
return self.get_status('RevokeSecurityGroupIngress',
params, verb='POST')
#
# Regions
#
def get_all_regions(self, region_names=None, filters=None):
"""
Get all available regions for the EC2 service.
:type region_names: list of str
:param region_names: Names of regions to limit output
:type filters: dict
:param filters: Optional filters that can be used to limit
the results returned. Filters are provided
in the form of a dictionary consisting of
filter names as the key and filter values
as the value. The set of allowable filter
names/values is dependent on the request
being performed. Check the EC2 API guide
for details.
:rtype: list
:return: A list of :class:`boto.ec2.regioninfo.RegionInfo`
"""
params = {}
if region_names:
self.build_list_params(params, region_names, 'RegionName')
if filters:
self.build_filter_params(params, filters)
regions = self.get_list('DescribeRegions', params, [('item', RegionInfo)], verb='POST')
for region in regions:
region.connection_cls = EC2Connection
return regions
#
# Reservation methods
#
def get_all_reserved_instances_offerings(self, reserved_instances_id=None,
instance_type=None,
availability_zone=None,
product_description=None,
filters=None):
"""
Describes Reserved Instance offerings that are available for purchase.
:type reserved_instances_id: str
:param reserved_instances_id: Displays Reserved Instances with the
specified offering IDs.
:type instance_type: str
:param instance_type: Displays Reserved Instances of the specified
instance type.
:type availability_zone: str
:param availability_zone: Displays Reserved Instances within the
specified Availability Zone.
:type product_description: str
:param product_description: Displays Reserved Instances with the
specified product description.
:type filters: dict
:param filters: Optional filters that can be used to limit
the results returned. Filters are provided
in the form of a dictionary consisting of
filter names as the key and filter values
as the value. The set of allowable filter
names/values is dependent on the request
being performed. Check the EC2 API guide
for details.
:rtype: list
:return: A list of :class:`boto.ec2.reservedinstance.ReservedInstancesOffering`
"""
params = {}
if reserved_instances_id:
params['ReservedInstancesId'] = reserved_instances_id
if instance_type:
params['InstanceType'] = instance_type
if availability_zone:
params['AvailabilityZone'] = availability_zone
if product_description:
params['ProductDescription'] = product_description
if filters:
self.build_filter_params(params, filters)
return self.get_list('DescribeReservedInstancesOfferings',
params, [('item', ReservedInstancesOffering)], verb='POST')
def get_all_reserved_instances(self, reserved_instances_id=None,
filters=None):
"""
Describes Reserved Instance offerings that are available for purchase.
:type reserved_instance_ids: list
:param reserved_instance_ids: A list of the reserved instance ids that
will be returned. If not provided, all
reserved instances will be returned.
:type filters: dict
:param filters: Optional filters that can be used to limit
the results returned. Filters are provided
in the form of a dictionary consisting of
filter names as the key and filter values
as the value. The set of allowable filter
names/values is dependent on the request
being performed. Check the EC2 API guide
for details.
:rtype: list
:return: A list of :class:`boto.ec2.reservedinstance.ReservedInstance`
"""
params = {}
if reserved_instances_id:
self.build_list_params(params, reserved_instances_id,
'ReservedInstancesId')
if filters:
self.build_filter_params(params, filters)
return self.get_list('DescribeReservedInstances',
params, [('item', ReservedInstance)], verb='POST')
def purchase_reserved_instance_offering(self, reserved_instances_offering_id,
instance_count=1):
"""
Purchase a Reserved Instance for use with your account.
** CAUTION **
This request can result in large amounts of money being charged to your
AWS account. Use with caution!
:type reserved_instances_offering_id: string
:param reserved_instances_offering_id: The offering ID of the Reserved
Instance to purchase
:type instance_count: int
:param instance_count: The number of Reserved Instances to purchase.
Default value is 1.
:rtype: :class:`boto.ec2.reservedinstance.ReservedInstance`
:return: The newly created Reserved Instance
"""
params = {'ReservedInstancesOfferingId' : reserved_instances_offering_id,
'InstanceCount' : instance_count}
return self.get_object('PurchaseReservedInstancesOffering', params,
ReservedInstance, verb='POST')
#
# Monitoring
#
def monitor_instances(self, instance_ids):
"""
Enable CloudWatch monitoring for the supplied instances.
:type instance_id: list of strings
:param instance_id: The instance ids
:rtype: list
:return: A list of :class:`boto.ec2.instanceinfo.InstanceInfo`
"""
params = {}
self.build_list_params(params, instance_ids, 'InstanceId')
return self.get_list('MonitorInstances', params,
[('item', InstanceInfo)], verb='POST')
def monitor_instance(self, instance_id):
"""
Deprecated Version, maintained for backward compatibility.
Enable CloudWatch monitoring for the supplied instance.
:type instance_id: string
:param instance_id: The instance id
:rtype: list
:return: A list of :class:`boto.ec2.instanceinfo.InstanceInfo`
"""
return self.monitor_instances([instance_id])
def unmonitor_instances(self, instance_ids):
"""
Disable CloudWatch monitoring for the supplied instance.
:type instance_id: list of string
:param instance_id: The instance id
:rtype: list
:return: A list of :class:`boto.ec2.instanceinfo.InstanceInfo`
"""
params = {}
self.build_list_params(params, instance_ids, 'InstanceId')
return self.get_list('UnmonitorInstances', params,
[('item', InstanceInfo)], verb='POST')
def unmonitor_instance(self, instance_id):
"""
Deprecated Version, maintained for backward compatibility.
Disable CloudWatch monitoring for the supplied instance.
:type instance_id: string
:param instance_id: The instance id
:rtype: list
:return: A list of :class:`boto.ec2.instanceinfo.InstanceInfo`
"""
return self.unmonitor_instances([instance_id])
#
# Bundle Windows Instances
#
def bundle_instance(self, instance_id,
s3_bucket,
s3_prefix,
s3_upload_policy):
"""
Bundle Windows instance.
:type instance_id: string
:param instance_id: The instance id
:type s3_bucket: string
:param s3_bucket: The bucket in which the AMI should be stored.
:type s3_prefix: string
:param s3_prefix: The beginning of the file name for the AMI.
:type s3_upload_policy: string
:param s3_upload_policy: Base64 encoded policy that specifies condition
and permissions for Amazon EC2 to upload the
user's image into Amazon S3.
"""
params = {'InstanceId' : instance_id,
'Storage.S3.Bucket' : s3_bucket,
'Storage.S3.Prefix' : s3_prefix,
'Storage.S3.UploadPolicy' : s3_upload_policy}
s3auth = boto.auth.get_auth_handler(None, boto.config,
self.provider, ['s3'])
params['Storage.S3.AWSAccessKeyId'] = self.aws_access_key_id
signature = s3auth.sign_string(s3_upload_policy)
params['Storage.S3.UploadPolicySignature'] = signature
return self.get_object('BundleInstance', params,
BundleInstanceTask, verb='POST')
def get_all_bundle_tasks(self, bundle_ids=None, filters=None):
"""
Retrieve current bundling tasks. If no bundle id is specified, all
tasks are retrieved.
:type bundle_ids: list
:param bundle_ids: A list of strings containing identifiers for
previously created bundling tasks.
:type filters: dict
:param filters: Optional filters that can be used to limit
the results returned. Filters are provided
in the form of a dictionary consisting of
filter names as the key and filter values
as the value. The set of allowable filter
names/values is dependent on the request
being performed. Check the EC2 API guide
for details.
"""
params = {}
if bundle_ids:
self.build_list_params(params, bundle_ids, 'BundleId')
if filters:
self.build_filter_params(params, filters)
return self.get_list('DescribeBundleTasks', params,
[('item', BundleInstanceTask)], verb='POST')
def cancel_bundle_task(self, bundle_id):
"""
Cancel a previously submitted bundle task
:type bundle_id: string
:param bundle_id: The identifier of the bundle task to cancel.
"""
params = {'BundleId' : bundle_id}
return self.get_object('CancelBundleTask', params,
BundleInstanceTask, verb='POST')
def get_password_data(self, instance_id):
"""
Get encrypted administrator password for a Windows instance.
:type instance_id: string
:param instance_id: The identifier of the instance to retrieve the
password for.
"""
params = {'InstanceId' : instance_id}
rs = self.get_object('GetPasswordData', params, ResultSet, verb='POST')
return rs.passwordData
#
# Cluster Placement Groups
#
def get_all_placement_groups(self, groupnames=None, filters=None):
"""
Get all placement groups associated with your account in a region.
:type groupnames: list
:param groupnames: A list of the names of placement groups to retrieve.
If not provided, all placement groups will be
returned.
:type filters: dict
:param filters: Optional filters that can be used to limit
the results returned. Filters are provided
in the form of a dictionary consisting of
filter names as the key and filter values
as the value. The set of allowable filter
names/values is dependent on the request
being performed. Check the EC2 API guide
for details.
:rtype: list
:return: A list of :class:`boto.ec2.placementgroup.PlacementGroup`
"""
params = {}
if groupnames:
self.build_list_params(params, groupnames, 'GroupName')
if filters:
self.build_filter_params(params, filters)
return self.get_list('DescribePlacementGroups', params,
[('item', PlacementGroup)], verb='POST')
def create_placement_group(self, name, strategy='cluster'):
"""
Create a new placement group for your account.
This will create the placement group within the region you
are currently connected to.
:type name: string
:param name: The name of the new placement group
:type strategy: string
:param strategy: The placement strategy of the new placement group.
Currently, the only acceptable value is "cluster".
:rtype: :class:`boto.ec2.placementgroup.PlacementGroup`
:return: The newly created :class:`boto.ec2.keypair.KeyPair`.
"""
params = {'GroupName':name, 'Strategy':strategy}
group = self.get_status('CreatePlacementGroup', params, verb='POST')
return group
def delete_placement_group(self, name):
"""
Delete a placement group from your account.
:type key_name: string
:param key_name: The name of the keypair to delete
"""
params = {'GroupName':name}
return self.get_status('DeletePlacementGroup', params, verb='POST')
# Tag methods
def build_tag_param_list(self, params, tags):
keys = tags.keys()
keys.sort()
i = 1
for key in keys:
value = tags[key]
params['Tag.%d.Key'%i] = key
if value is None:
value = ''
params['Tag.%d.Value'%i] = value
i += 1
def get_all_tags(self, tags=None, filters=None):
"""
Retrieve all the metadata tags associated with your account.
:type tags: list
:param tags: A list of mumble
:type filters: dict
:param filters: Optional filters that can be used to limit
the results returned. Filters are provided
in the form of a dictionary consisting of
filter names as the key and filter values
as the value. The set of allowable filter
names/values is dependent on the request
being performed. Check the EC2 API guide
for details.
:rtype: dict
:return: A dictionary containing metadata tags
"""
params = {}
if tags:
self.build_list_params(params, instance_ids, 'InstanceId')
if filters:
self.build_filter_params(params, filters)
return self.get_list('DescribeTags', params, [('item', Tag)], verb='POST')
def create_tags(self, resource_ids, tags):
"""
Create new metadata tags for the specified resource ids.
:type resource_ids: list
:param resource_ids: List of strings
:type tags: dict
:param tags: A dictionary containing the name/value pairs
"""
params = {}
self.build_list_params(params, resource_ids, 'ResourceId')
self.build_tag_param_list(params, tags)
return self.get_status('CreateTags', params, verb='POST')
def delete_tags(self, resource_ids, tags):
"""
Delete metadata tags for the specified resource ids.
:type resource_ids: list
:param resource_ids: List of strings
:type tags: dict or list
:param tags: Either a dictionary containing name/value pairs
or a list containing just tag names.
If you pass in a dictionary, the values must
match the actual tag values or the tag will
not be deleted.
"""
if isinstance(tags, list):
tags = {}.fromkeys(tags, None)
params = {}
self.build_list_params(params, resource_ids, 'ResourceId')
self.build_tag_param_list(params, tags)
return self.get_status('DeleteTags', params, verb='POST')
| 1 | 7,880 | We try not to change the function footprints unless absolutely necessary. Why is it necessary to have both secuirty_group_ids and security_groups? | boto-boto | py |
@@ -105,7 +105,7 @@ public class JavaSourceFolderProvider implements SourceFolderProvider {
if (Strings.isNullOrEmpty(relativePath)) {
return parentPackagePrefix;
}
- relativePath = relativePath.replaceAll(File.separator, ".");
+ relativePath = relativePath.replace(File.separator, ".");
return Strings.isNullOrEmpty(parentPackagePrefix)
? relativePath
: parentPackagePrefix + "." + relativePath; | 1 | /*
* Copyright 2016 The Bazel Authors. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.idea.blaze.java.sync.projectstructure;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableMap;
import com.google.idea.blaze.base.sync.SourceFolderProvider;
import com.google.idea.blaze.base.util.UrlUtil;
import com.google.idea.blaze.java.sync.model.BlazeContentEntry;
import com.google.idea.blaze.java.sync.model.BlazeJavaSyncData;
import com.google.idea.blaze.java.sync.model.BlazeSourceDirectory;
import com.intellij.openapi.roots.ContentEntry;
import com.intellij.openapi.roots.SourceFolder;
import com.intellij.openapi.util.io.FileUtil;
import com.intellij.openapi.vfs.VirtualFileManager;
import java.io.File;
import java.util.HashMap;
import java.util.Map;
import javax.annotation.Nullable;
import org.jetbrains.jps.model.JpsElement;
import org.jetbrains.jps.model.java.JavaResourceRootType;
import org.jetbrains.jps.model.java.JavaSourceRootProperties;
import org.jetbrains.jps.model.module.JpsModuleSourceRoot;
/** Edits source folders in IntelliJ content entries */
public class JavaSourceFolderProvider implements SourceFolderProvider {
private final ImmutableMap<File, BlazeContentEntry> blazeContentEntries;
public JavaSourceFolderProvider(@Nullable BlazeJavaSyncData syncData) {
this.blazeContentEntries = blazeContentEntries(syncData);
}
private static ImmutableMap<File, BlazeContentEntry> blazeContentEntries(
@Nullable BlazeJavaSyncData syncData) {
if (syncData == null) {
return ImmutableMap.of();
}
ImmutableMap.Builder<File, BlazeContentEntry> builder = ImmutableMap.builder();
for (BlazeContentEntry blazeContentEntry : syncData.getImportResult().contentEntries) {
builder.put(blazeContentEntry.contentRoot, blazeContentEntry);
}
return builder.build();
}
@Override
public ImmutableMap<File, SourceFolder> initializeSourceFolders(ContentEntry contentEntry) {
Map<File, SourceFolder> map = new HashMap<>();
BlazeContentEntry javaContentEntry =
blazeContentEntries.get(UrlUtil.urlToFile(contentEntry.getUrl()));
if (javaContentEntry != null) {
for (BlazeSourceDirectory sourceDirectory : javaContentEntry.sources) {
File file = sourceDirectory.getDirectory();
if (map.containsKey(file)) {
continue;
}
SourceFolder sourceFolder = addSourceFolderToContentEntry(contentEntry, sourceDirectory);
map.put(file, sourceFolder);
}
}
return ImmutableMap.copyOf(map);
}
@Override
public SourceFolder setSourceFolderForLocation(
ContentEntry contentEntry, SourceFolder parentFolder, File file, boolean isTestSource) {
SourceFolder sourceFolder;
if (isResource(parentFolder)) {
JavaResourceRootType resourceRootType =
isTestSource ? JavaResourceRootType.TEST_RESOURCE : JavaResourceRootType.RESOURCE;
sourceFolder =
contentEntry.addSourceFolder(UrlUtil.pathToUrl(file.getPath()), resourceRootType);
} else {
sourceFolder = contentEntry.addSourceFolder(UrlUtil.pathToUrl(file.getPath()), isTestSource);
}
sourceFolder.setPackagePrefix(derivePackagePrefix(file, parentFolder));
JpsModuleSourceRoot sourceRoot = sourceFolder.getJpsElement();
JpsElement properties = sourceRoot.getProperties();
if (properties instanceof JavaSourceRootProperties) {
((JavaSourceRootProperties) properties).setForGeneratedSources(isGenerated(parentFolder));
}
return sourceFolder;
}
private static String derivePackagePrefix(File file, SourceFolder parentFolder) {
String parentPackagePrefix = parentFolder.getPackagePrefix();
String parentPath = VirtualFileManager.extractPath(parentFolder.getUrl());
String relativePath =
FileUtil.toCanonicalPath(
FileUtil.getRelativePath(parentPath, file.getPath(), File.separatorChar));
if (Strings.isNullOrEmpty(relativePath)) {
return parentPackagePrefix;
}
relativePath = relativePath.replaceAll(File.separator, ".");
return Strings.isNullOrEmpty(parentPackagePrefix)
? relativePath
: parentPackagePrefix + "." + relativePath;
}
@VisibleForTesting
static boolean isResource(SourceFolder folder) {
return folder.getRootType() instanceof JavaResourceRootType;
}
@VisibleForTesting
static boolean isGenerated(SourceFolder folder) {
JpsElement properties = folder.getJpsElement().getProperties();
return properties instanceof JavaSourceRootProperties
&& ((JavaSourceRootProperties) properties).isForGeneratedSources();
}
private static SourceFolder addSourceFolderToContentEntry(
ContentEntry contentEntry, BlazeSourceDirectory sourceDirectory) {
File sourceDir = sourceDirectory.getDirectory();
// Create the source folder
SourceFolder sourceFolder;
if (sourceDirectory.isResource()) {
sourceFolder =
contentEntry.addSourceFolder(
UrlUtil.pathToUrl(sourceDir.getPath()), JavaResourceRootType.RESOURCE);
} else {
sourceFolder = contentEntry.addSourceFolder(UrlUtil.pathToUrl(sourceDir.getPath()), false);
}
JpsModuleSourceRoot sourceRoot = sourceFolder.getJpsElement();
JpsElement properties = sourceRoot.getProperties();
if (properties instanceof JavaSourceRootProperties) {
JavaSourceRootProperties rootProperties = (JavaSourceRootProperties) properties;
if (sourceDirectory.isGenerated()) {
rootProperties.setForGeneratedSources(true);
}
}
String packagePrefix = sourceDirectory.getPackagePrefix();
if (!Strings.isNullOrEmpty(packagePrefix)) {
sourceFolder.setPackagePrefix(packagePrefix);
}
return sourceFolder;
}
}
| 1 | 5,364 | Thanks! We're already replacing File.separatorChar with '/' in the call to FileUtil#toCanonicalPath above, so the correct fix here should be: `relativePath.replace('/', '.')` I'll make the change upstream. | bazelbuild-intellij | java |
@@ -2,8 +2,8 @@
namespace Shopsys\FrameworkBundle\Model\Product\Exception;
-use Symfony\Component\HttpKernel\Exception\NotFoundHttpException;
+use Symfony\Component\HttpKernel\Exception\GoneHttpException;
-class ProductNotFoundException extends NotFoundHttpException implements ProductException
+class ProductNotFoundException extends GoneHttpException implements ProductException
{
} | 1 | <?php
namespace Shopsys\FrameworkBundle\Model\Product\Exception;
use Symfony\Component\HttpKernel\Exception\NotFoundHttpException;
class ProductNotFoundException extends NotFoundHttpException implements ProductException
{
}
| 1 | 22,325 | It could be nice to be explicit about this change in upgrade notes, what do you think? | shopsys-shopsys | php |
@@ -27,7 +27,7 @@ function logout (req, res, next) {
}
delete companion.providerTokens[providerName]
- tokenService.removeFromCookies(res, companion.options, companion.provider.authProviderName)
+ tokenService.removeFromCookies(res, companion.options, companion.provider.authProvider)
cleanSession()
res.json(Object.assign({ ok: true }, data))
}) | 1 | const tokenService = require('../helpers/jwt')
const { errorToResponse } = require('../provider/error')
/**
*
* @param {object} req
* @param {object} res
*/
function logout (req, res, next) {
const cleanSession = () => {
if (req.session.grant) {
req.session.grant.state = null
req.session.grant.dynamic = null
}
}
const providerName = req.params.providerName
const companion = req.companion
const token = companion.providerTokens ? companion.providerTokens[providerName] : null
if (token) {
companion.provider.logout({ token, companion }, (err, data) => {
if (err) {
const errResp = errorToResponse(err)
if (errResp) {
return res.status(errResp.code).json({ message: errResp.message })
}
return next(err)
}
delete companion.providerTokens[providerName]
tokenService.removeFromCookies(res, companion.options, companion.provider.authProviderName)
cleanSession()
res.json(Object.assign({ ok: true }, data))
})
} else {
cleanSession()
res.json({ ok: true, revoked: false })
}
}
module.exports = logout
| 1 | 13,670 | spotted a bug. | transloadit-uppy | js |
@@ -148,7 +148,7 @@ public class SmartStoreFullTextSearchSpeedTest extends SmartStoreTestCase {
private double queryData(Type textFieldType, int rowsPerAnimal, int matchingRowsPerAnimal) throws JSONException {
long totalQueryTime = 0;
for (String animal : ANIMALS) {
- String prefix = String.format("%07d", (int) Math.random()*matchingRowsPerAnimal);
+ String prefix = String.format("%07d", (int) (Math.random()*(rowsPerAnimal/matchingRowsPerAnimal)));
String stringToMatch = prefix + animal;
QuerySpec querySpec = textFieldType == Type.full_text | 1 | /*
* Copyright (c) 2015, salesforce.com, inc.
* All rights reserved.
* Redistribution and use of this software in source and binary forms, with or
* without modification, are permitted provided that the following conditions
* are met:
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of salesforce.com, inc. nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission of salesforce.com, inc.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.salesforce.androidsdk.store;
import android.content.Context;
import android.util.Log;
import com.salesforce.androidsdk.smartstore.store.DBHelper;
import com.salesforce.androidsdk.smartstore.store.DBOpenHelper;
import com.salesforce.androidsdk.smartstore.store.IndexSpec;
import com.salesforce.androidsdk.smartstore.store.QuerySpec;
import com.salesforce.androidsdk.smartstore.store.SmartStore;
import com.salesforce.androidsdk.smartstore.store.SmartStore.Type;
import net.sqlcipher.database.SQLiteDatabase;
import net.sqlcipher.database.SQLiteOpenHelper;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
/**
* Tests to compare speed of smartstore full-text-search indices with regular indices
*/
public class SmartStoreFullTextSearchSpeedTest extends SmartStoreTestCase {
public static final String TAG = "SmartStoreFTSSpeedTest";
// Animals A..Y
public static final String[] ANIMALS = new String[]{"alligator", "ant", "bear", "bee", "bird", "camel", "cat",
"cheetah", "chicken", "chimpanzee", "cow", "crocodile", "deer", "dog", "dolphin",
"duck", "eagle", "elephant", "fish", "fly", "fox", "frog", "giraffe", "goat",
"goldfish", "hamster", "hippopotamus", "horse", "iguana", "impala", "jaguar", "jellyfish", "kangaroo", "kitten", "lion",
"lobster", "monkey", "nightingale", "octopus", "owl", "panda", "pig", "puppy", "quail", "rabbit", "rat",
"scorpion", "seal", "shark", "sheep", "snail", "snake", "spider", "squirrel",
"tiger", "turtle", "umbrellabird", "vulture", "wolf", "xantus", "xerus", "yak"};
public static final String ANIMALS_SOUP = "animals";
public static final String TEXT_COL = "text";
protected String getPasscode() {
return "";
}
public void testSearch1000RowsOneMatch() throws JSONException {
trySearch(40, 1);
}
public void testSearch1000RowsManyMatches() throws JSONException {
trySearch(40, 40);
}
public void testSearch10000RowsOneMatch() throws JSONException {
trySearch(400, 1);
}
public void testSearch10000RowsManyMatches() throws JSONException {
trySearch(400, 400);
}
/*
// Slow - uncomment when collecting performance data
public void testSearch100000RowsOneMatch() throws JSONException {
trySearch(4000, 1);
}
*/
private void trySearch(int rowsPerAnimal, int matchingRowsPerAnimal) throws JSONException {
double totalInsertTimeString = setupData(Type.string, rowsPerAnimal, matchingRowsPerAnimal);
double avgQueryTimeString = queryData(Type.string, rowsPerAnimal, matchingRowsPerAnimal);
store.dropAllSoups();
double totalInsertTimeFullText = setupData(Type.full_text, rowsPerAnimal, matchingRowsPerAnimal);
double avgQueryTimeFullText = queryData(Type.full_text, rowsPerAnimal, matchingRowsPerAnimal);
store.dropAllSoups();
Log.i(TAG, String.format("Search rows=%d matchingRows=%d avgQueryTimeString=%.4fs avgQueryTimeFullText=%.4fs (%.2f%%) totalInsertTimeString=%.3fs totalInsertTimeFullText=%.3fs (%.2f%%)",
rowsPerAnimal * 25,
matchingRowsPerAnimal,
avgQueryTimeString,
avgQueryTimeFullText,
100*avgQueryTimeFullText / avgQueryTimeString,
totalInsertTimeString,
totalInsertTimeFullText,
100*totalInsertTimeFullText / totalInsertTimeString));
}
/**
* @return total insert time in seconds
*/
private double setupData(Type textFieldType, int rowsPerAnimal, int matchingRowsPerAnimal) throws JSONException {
long totalInsertTime = 0;
store.registerSoup(ANIMALS_SOUP, new IndexSpec[]{new IndexSpec(TEXT_COL, textFieldType)});
try {
store.beginTransaction();
for (int i=0; i < 25; i++) {
int charToMatch = i + 'a';
for (int j=0; j < rowsPerAnimal; j++) {
String prefix = String.format("%07d", j % (rowsPerAnimal / matchingRowsPerAnimal));
StringBuilder text = new StringBuilder();
for (String animal : ANIMALS) {
if (animal.charAt(0) == charToMatch) {
text.append(prefix).append(animal).append(" ");
}
}
JSONObject elt = new JSONObject();
elt.put(TEXT_COL, text.toString());
long start = System.nanoTime();
store.create(ANIMALS_SOUP, elt, false);
totalInsertTime += System.nanoTime() - start;
}
}
store.setTransactionSuccessful();
} finally {
store.endTransaction();
}
return nanosToSeconds(totalInsertTime);
}
/**
* @return avg query time in seconds
*/
private double queryData(Type textFieldType, int rowsPerAnimal, int matchingRowsPerAnimal) throws JSONException {
long totalQueryTime = 0;
for (String animal : ANIMALS) {
String prefix = String.format("%07d", (int) Math.random()*matchingRowsPerAnimal);
String stringToMatch = prefix + animal;
QuerySpec querySpec = textFieldType == Type.full_text
? QuerySpec.buildMatchQuerySpec(ANIMALS_SOUP, TEXT_COL, stringToMatch, null, null, rowsPerAnimal)
: QuerySpec.buildLikeQuerySpec(ANIMALS_SOUP, TEXT_COL, "%" + stringToMatch + "%", null, null, rowsPerAnimal);
long start = System.nanoTime();
JSONArray results = store.query(querySpec, 0);
totalQueryTime += System.nanoTime() - start;
validateResults(matchingRowsPerAnimal, stringToMatch, results);
}
return nanosToSeconds(totalQueryTime)/ANIMALS.length;
}
private void validateResults(int expectedRows, String stringToMatch, JSONArray results) throws JSONException {
assertEquals("Wrong number of results", expectedRows, results.length());
for (int i=0; i<results.length(); i++) {
String text = results.getJSONObject(i).getString(TEXT_COL);
assertTrue("Invalid result [" + text + "] for search on [" + stringToMatch + "]", text.contains(stringToMatch));
}
}
private double nanosToSeconds(long nanos) {
return nanos / 1000000000.0;
}
}
| 1 | 14,767 | We were always using 0000000 prefix | forcedotcom-SalesforceMobileSDK-Android | java |
@@ -306,7 +306,9 @@ void signalHandler(int sig) {
}
{
auto gJobMgr = nebula::meta::JobManager::getInstance();
- gJobMgr->shutDown();
+ if (gJobMgr) {
+ gJobMgr->shutDown();
+ }
}
if (gKVStore) {
gKVStore->stop(); | 1 | /* Copyright (c) 2018 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License,
* attached with Common Clause Condition 1.0, found in the LICENSES directory.
*/
#include "base/Base.h"
#include "common/base/SignalHandler.h"
#include <thrift/lib/cpp2/server/ThriftServer.h>
#include "meta/MetaServiceHandler.h"
#include "meta/MetaHttpIngestHandler.h"
#include "meta/MetaHttpDownloadHandler.h"
#include "meta/MetaHttpReplaceHostHandler.h"
#include "webservice/Router.h"
#include "webservice/WebService.h"
#include "network/NetworkUtils.h"
#include "process/ProcessUtils.h"
#include "hdfs/HdfsHelper.h"
#include "hdfs/HdfsCommandHelper.h"
#include "thread/GenericThreadPool.h"
#include "kvstore/PartManager.h"
#include "meta/ClusterIdMan.h"
#include "kvstore/NebulaStore.h"
#include "meta/ActiveHostsMan.h"
#include "meta/processors/jobMan/JobManager.h"
#include "meta/RootUserMan.h"
using nebula::operator<<;
using nebula::ProcessUtils;
using nebula::Status;
using nebula::web::PathParams;
DEFINE_int32(port, 45500, "Meta daemon listening port");
DEFINE_bool(reuse_port, true, "Whether to turn on the SO_REUSEPORT option");
DEFINE_string(data_path, "", "Root data path");
DEFINE_string(meta_server_addrs,
"",
"It is a list of IPs split by comma, used in cluster deployment"
"the ips number is equal to the replica number."
"If empty, it means it's a single node");
DEFINE_string(local_ip, "", "Local ip specified for NetworkUtils::getLocalIP");
DEFINE_int32(num_io_threads, 16, "Number of IO threads");
DEFINE_int32(meta_http_thread_num, 3, "Number of meta daemon's http thread");
DEFINE_int32(num_worker_threads, 32, "Number of workers");
DEFINE_string(pid_file, "pids/nebula-metad.pid", "File to hold the process id");
DEFINE_bool(daemonize, true, "Whether run as a daemon process");
DECLARE_bool(check_leader);
static std::unique_ptr<apache::thrift::ThriftServer> gServer;
static std::unique_ptr<nebula::kvstore::KVStore> gKVStore;
static void signalHandler(int sig);
static Status setupSignalHandler();
namespace nebula {
namespace meta {
const std::string kClusterIdKey = "__meta_cluster_id_key__"; // NOLINT
} // namespace meta
} // namespace nebula
nebula::ClusterID gClusterId = 0;
std::unique_ptr<nebula::kvstore::KVStore> initKV(std::vector<nebula::HostAddr> peers,
nebula::HostAddr localhost) {
auto partMan
= std::make_unique<nebula::kvstore::MemPartManager>();
// The meta server has only one space (0), one part (0)
partMan->addPart(nebula::meta::kDefaultSpaceId,
nebula::meta::kDefaultPartId,
std::move(peers));
// folly IOThreadPoolExecutor
auto ioPool = std::make_shared<folly::IOThreadPoolExecutor>(FLAGS_num_io_threads);
std::shared_ptr<apache::thrift::concurrency::ThreadManager> threadManager(
apache::thrift::concurrency::PriorityThreadManager::newPriorityThreadManager(
FLAGS_num_worker_threads, true /*stats*/));
threadManager->setNamePrefix("executor");
threadManager->start();
// On metad, we are allowed to read on follower
FLAGS_check_leader = false;
nebula::kvstore::KVOptions options;
options.dataPaths_ = {FLAGS_data_path};
options.partMan_ = std::move(partMan);
auto kvstore = std::make_unique<nebula::kvstore::NebulaStore>(
std::move(options),
ioPool,
localhost,
threadManager);
if (!(kvstore->init())) {
LOG(ERROR) << "Nebula store init failed";
return nullptr;
}
LOG(INFO) << "Waiting for the leader elected...";
nebula::HostAddr leader;
while (true) {
auto ret = kvstore->partLeader(nebula::meta::kDefaultSpaceId,
nebula::meta::kDefaultPartId);
if (!nebula::ok(ret)) {
LOG(ERROR) << "Nebula store init failed";
return nullptr;
}
leader = nebula::value(ret);
if (leader != nebula::HostAddr(0, 0)) {
break;
}
LOG(INFO) << "Leader has not been elected, sleep 1s";
sleep(1);
}
gClusterId = nebula::meta::ClusterIdMan::getClusterIdFromKV(kvstore.get(),
nebula::meta::kClusterIdKey);
if (gClusterId == 0) {
if (leader == localhost) {
LOG(INFO) << "I am leader, create cluster Id";
gClusterId = nebula::meta::ClusterIdMan::create(FLAGS_meta_server_addrs);
if (!nebula::meta::ClusterIdMan::persistInKV(kvstore.get(),
nebula::meta::kClusterIdKey,
gClusterId)) {
LOG(ERROR) << "Persist cluster failed!";
return nullptr;
}
} else {
LOG(INFO) << "I am follower, wait for the leader's clusterId";
while (gClusterId == 0) {
LOG(INFO) << "Waiting for the leader's clusterId";
sleep(1);
gClusterId = nebula::meta::ClusterIdMan::getClusterIdFromKV(
kvstore.get(),
nebula::meta::kClusterIdKey);
}
}
}
LOG(INFO) << "Nebula store init succeeded, clusterId " << gClusterId;
return kvstore;
}
Status initWebService(nebula::WebService* svc,
nebula::kvstore::KVStore* kvstore,
nebula::hdfs::HdfsCommandHelper* helper,
nebula::thread::GenericThreadPool* pool) {
LOG(INFO) << "Starting Meta HTTP Service";
auto& router = svc->router();
router.get("/download-dispatch").handler([kvstore, helper, pool](PathParams&&) {
auto handler = new nebula::meta::MetaHttpDownloadHandler();
handler->init(kvstore, helper, pool);
return handler;
});
router.get("/ingest-dispatch").handler([kvstore, pool](PathParams&&) {
auto handler = new nebula::meta::MetaHttpIngestHandler();
handler->init(kvstore, pool);
return handler;
});
router.get("/replace").handler([kvstore](PathParams &&) {
auto handler = new nebula::meta::MetaHttpReplaceHostHandler();
handler->init(kvstore);
return handler;
});
return svc->start();
}
int main(int argc, char *argv[]) {
// Detect if the server has already been started
// Check pid before glog init, in case of user may start daemon twice
// the 2nd will make the 1st failed to output log anymore
gflags::ParseCommandLineFlags(&argc, &argv, false);
auto pidPath = FLAGS_pid_file;
auto status = ProcessUtils::isPidAvailable(pidPath);
if (!status.ok()) {
LOG(ERROR) << status;
return EXIT_FAILURE;
}
google::SetVersionString(nebula::versionString());
folly::init(&argc, &argv, true);
if (FLAGS_data_path.empty()) {
LOG(ERROR) << "Meta Data Path should not empty";
return EXIT_FAILURE;
}
if (FLAGS_daemonize) {
google::SetStderrLogging(google::FATAL);
} else {
google::SetStderrLogging(google::INFO);
}
if (FLAGS_daemonize) {
status = ProcessUtils::daemonize(pidPath);
if (!status.ok()) {
LOG(ERROR) << status;
return EXIT_FAILURE;
}
} else {
status = ProcessUtils::makePidFile(pidPath);
if (!status.ok()) {
LOG(ERROR) << status;
return EXIT_FAILURE;
}
}
auto result = nebula::network::NetworkUtils::getLocalIP(FLAGS_local_ip);
if (!result.ok()) {
LOG(ERROR) << "Get local ip failed! status:" << result.status();
return EXIT_FAILURE;
}
auto hostAddrRet = nebula::network::NetworkUtils::toHostAddr(result.value(), FLAGS_port);
if (!hostAddrRet.ok()) {
LOG(ERROR) << "Bad local host addr, status:" << hostAddrRet.status();
return EXIT_FAILURE;
}
auto& localhost = hostAddrRet.value();
auto peersRet = nebula::network::NetworkUtils::toHosts(FLAGS_meta_server_addrs);
if (!peersRet.ok()) {
LOG(ERROR) << "Can't get peers address, status:" << peersRet.status();
return EXIT_FAILURE;
}
gKVStore = initKV(peersRet.value(), localhost);
if (gKVStore == nullptr) {
LOG(ERROR) << "Init kv failed!";
return EXIT_FAILURE;
}
LOG(INFO) << "Start http service";
auto helper = std::make_unique<nebula::hdfs::HdfsCommandHelper>();
auto pool = std::make_unique<nebula::thread::GenericThreadPool>();
pool->start(FLAGS_meta_http_thread_num, "http thread pool");
auto webSvc = std::make_unique<nebula::WebService>();
status = initWebService(webSvc.get(), gKVStore.get(), helper.get(), pool.get());
if (!status.ok()) {
LOG(ERROR) << "Init web service failed: " << status;
return EXIT_FAILURE;
}
{
nebula::meta::JobManager* jobMgr = nebula::meta::JobManager::getInstance();
if (!jobMgr->init(gKVStore.get())) {
LOG(ERROR) << "Init job manager failed";
return EXIT_FAILURE;
}
}
{
/**
* Only leader part needed.
*/
auto ret = gKVStore->partLeader(nebula::meta::kDefaultSpaceId,
nebula::meta::kDefaultPartId);
if (!nebula::ok(ret)) {
LOG(ERROR) << "Part leader get failed";
return EXIT_FAILURE;
}
if (nebula::value(ret) == localhost) {
LOG(INFO) << "Check and init root user";
if (!nebula::meta::RootUserMan::isUserExists(gKVStore.get())) {
if (!nebula::meta::RootUserMan::initRootUser(gKVStore.get())) {
LOG(ERROR) << "Init root user failed";
return EXIT_FAILURE;
}
}
}
}
// Setup the signal handlers
status = setupSignalHandler();
if (!status.ok()) {
LOG(ERROR) << status;
return EXIT_FAILURE;
}
auto handler = std::make_shared<nebula::meta::MetaServiceHandler>(gKVStore.get(), gClusterId);
LOG(INFO) << "The meta deamon start on " << localhost;
try {
gServer = std::make_unique<apache::thrift::ThriftServer>();
gServer->setPort(FLAGS_port);
gServer->setReusePort(FLAGS_reuse_port);
gServer->setIdleTimeout(std::chrono::seconds(0)); // No idle timeout on client connection
gServer->setInterface(std::move(handler));
gServer->serve(); // Will wait until the server shuts down
} catch (const std::exception &e) {
LOG(ERROR) << "Exception thrown: " << e.what();
return EXIT_FAILURE;
}
LOG(INFO) << "The meta Daemon stopped";
return EXIT_SUCCESS;
}
Status setupSignalHandler() {
return nebula::SignalHandler::install(
{SIGINT, SIGTERM},
[](nebula::SignalHandler::GeneralSignalInfo *info) {
signalHandler(info->sig());
});
}
void signalHandler(int sig) {
switch (sig) {
case SIGINT:
case SIGTERM:
FLOG_INFO("Signal %d(%s) received, stopping this server", sig, ::strsignal(sig));
if (gServer) {
gServer->stop();
}
{
auto gJobMgr = nebula::meta::JobManager::getInstance();
gJobMgr->shutDown();
}
if (gKVStore) {
gKVStore->stop();
gKVStore.reset();
}
break;
default:
FLOG_ERROR("Signal %d(%s) received but ignored", sig, ::strsignal(sig));
}
}
| 1 | 29,987 | Here we also need to determine if gJobMgr has called the init function. | vesoft-inc-nebula | cpp |
@@ -0,0 +1,18 @@
+module Mongoid
+ module Matcher
+
+ # @api private
+ module All
+ module_function def matches?(exists, value, condition)
+ condition.any? && condition.all? do |c|
+ case c
+ when ::Regexp, BSON::Regexp::Raw
+ Regex.matches_array_or_scalar?(value, c)
+ else
+ EqImpl.matches?(true, value, c, '$all')
+ end
+ end
+ end
+ end
+ end
+end | 1 | 1 | 12,499 | Out of curiosity, why is `condition.any?` also necessary here? | mongodb-mongoid | rb |
|
@@ -12,14 +12,16 @@ namespace meta {
std::unordered_map<GraphSpaceID, std::shared_ptr<HostManager>> HostManager::hostManagers_;
-
// static
std::shared_ptr<const HostManager> HostManager::get(GraphSpaceID space) {
auto it = hostManagers_.find(space);
if (it != hostManagers_.end()) {
return it->second;
} else {
- return std::shared_ptr<const HostManager>();
+ auto* hmPtr = new HostManager(space);
+ std::shared_ptr<HostManager> hm(hmPtr);
+ hostManagers_.emplace(space, hm);
+ return hm;
}
}
| 1 | /* Copyright (c) 2018 - present, VE Software Inc. All rights reserved
*
* This source code is licensed under Apache 2.0 License
* (found in the LICENSE.Apache file in the root directory)
*/
#include "base/Base.h"
#include "meta/HostManager.h"
namespace nebula {
namespace meta {
std::unordered_map<GraphSpaceID, std::shared_ptr<HostManager>> HostManager::hostManagers_;
// static
std::shared_ptr<const HostManager> HostManager::get(GraphSpaceID space) {
auto it = hostManagers_.find(space);
if (it != hostManagers_.end()) {
return it->second;
} else {
return std::shared_ptr<const HostManager>();
}
}
size_t HostManager::numHosts() const {
auto sm = PartManager::get(space_);
if (!sm) {
LOG(ERROR) << "Cannot find PartManager for the graph space " << space_;
return 0;
} else {
return sm->numHosts();
}
}
const std::vector<HostAddr>& HostManager::allHosts() const {
static const std::vector<HostAddr> emptyHostList;
auto sm = PartManager::get(space_);
if (!sm) {
LOG(ERROR) << "Cannot find PartManager for the graph space " << space_;
return emptyHostList;
} else {
return sm->allHosts();
}
}
HostAddr HostManager::hostForId(int64_t id, PartitionID& part) const {
// TODO Now always return the first host. We need to return the leader
// when we know it
auto sm = PartManager::get(space_);
CHECK_NE(!sm, true);
part = sm->partId(id);
auto hosts = sm->hostsForPart(part);
CHECK_GT(hosts.size(), 0U);
// TODO We need to use the leader here
return hosts.front();
}
} // namespace meta
} // namespace nebula
| 1 | 14,917 | Always use `std::make_shared` whenever possible, it will save you one memory allocation. | vesoft-inc-nebula | cpp |
@@ -30,7 +30,7 @@ var DBATag = "v0.2.0"
var RouterImage = "drud/nginx-proxy" // Note that this is overridden by make
// RouterTag defines the tag used for the router.
-var RouterTag = "v0.3.0" // Note that this is overridden by make
+var RouterTag = "router-expose" // Note that this is overridden by make
// COMMIT is the actual committish, supplied by make
var COMMIT = "COMMIT should be overridden" | 1 | package version
// VERSION is supplied with the git committish this is built from
var VERSION = ""
// IMPORTANT: These versions are overridden by version ldflags specifications VERSION_VARIABLES in the Makefile
// DdevVersion is the current version of ddev, by default the git committish (should be current git tag)
var DdevVersion = "v0.3.0-dev" // Note that this is overridden by make
// WebImg defines the default web image used for applications.
var WebImg = "drud/nginx-php-fpm7-local" // Note that this is overridden by make
// WebTag defines the default web image tag for drud dev
var WebTag = "v0.4.0" // Note that this is overridden by make
// DBImg defines the default db image used for applications.
var DBImg = "drud/mysql-docker-local-57" // Note that this is overridden by make
// DBTag defines the default db image tag for drud dev
var DBTag = "v0.3.0" // Note that this is overridden by make
// DBAImg defines the default phpmyadmin image tag used for applications.
var DBAImg = "drud/phpmyadmin"
// DBATag defines the default phpmyadmin image tag used for applications.
var DBATag = "v0.2.0"
// RouterImage defines the image used for the router.
var RouterImage = "drud/nginx-proxy" // Note that this is overridden by make
// RouterTag defines the tag used for the router.
var RouterTag = "v0.3.0" // Note that this is overridden by make
// COMMIT is the actual committish, supplied by make
var COMMIT = "COMMIT should be overridden"
// BUILDINFO is information with date and context, supplied by make
var BUILDINFO = "BUILDINFO should have new info"
| 1 | 11,156 | Update to real tag before pull. | drud-ddev | go |
@@ -73,7 +73,7 @@ public class ITZipkinHealth {
// ensure we don't track health in prometheus
assertThat(scrape())
- .doesNotContain("health");
+ .doesNotContain("health_check");
}
String scrape() throws InterruptedException { | 1 | /*
* Copyright 2015-2019 The OpenZipkin Authors
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package zipkin2.server.internal.health;
import com.jayway.jsonpath.JsonPath;
import com.linecorp.armeria.server.Server;
import io.micrometer.prometheus.PrometheusMeterRegistry;
import java.io.IOException;
import okhttp3.OkHttpClient;
import okhttp3.Request;
import okhttp3.Response;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.junit4.SpringRunner;
import zipkin.server.ZipkinServer;
import zipkin2.storage.InMemoryStorage;
import static org.assertj.core.api.Assertions.assertThat;
import static zipkin2.server.internal.ITZipkinServer.url;
@SpringBootTest(
classes = ZipkinServer.class,
webEnvironment = SpringBootTest.WebEnvironment.NONE, // RANDOM_PORT requires spring-web
properties = {
"server.port=0",
"spring.config.name=zipkin-server"
}
)
@RunWith(SpringRunner.class)
public class ITZipkinHealth {
@Autowired InMemoryStorage storage;
@Autowired PrometheusMeterRegistry registry;
@Autowired Server server;
OkHttpClient client = new OkHttpClient.Builder().followRedirects(true).build();
@Before public void init() {
storage.clear();
}
@Test public void healthIsOK() throws Exception {
Response health = get("/health");
assertThat(health.isSuccessful()).isTrue();
assertThat(health.body().contentType())
.hasToString("application/json; charset=utf-8");
assertThat(health.body().string()).isEqualTo(""
+ "{\n"
+ " \"status\" : \"UP\",\n"
+ " \"zipkin\" : {\n"
+ " \"status\" : \"UP\",\n"
+ " \"details\" : {\n"
+ " \"InMemoryStorage{}\" : {\n"
+ " \"status\" : \"UP\"\n"
+ " }\n"
+ " }\n"
+ " }\n"
+ "}"
);
// ensure we don't track health in prometheus
assertThat(scrape())
.doesNotContain("health");
}
String scrape() throws InterruptedException {
Thread.sleep(100);
return registry.scrape();
}
@Test public void readsHealth() throws Exception {
String json = getAsString("/health");
assertThat(readString(json, "$.status"))
.isIn("UP", "DOWN", "UNKNOWN");
assertThat(readString(json, "$.zipkin.status"))
.isIn("UP", "DOWN", "UNKNOWN");
}
private String getAsString(String path) throws IOException {
Response response = get(path);
assertThat(response.isSuccessful())
.withFailMessage(response.toString())
.isTrue();
return response.body().string();
}
private Response get(String path) throws IOException {
return client.newCall(new Request.Builder().url(url(server, path)).build()).execute();
}
static String readString(String json, String jsonPath) {
return JsonPath.compile(jsonPath).read(json);
}
}
| 1 | 16,959 | Is there any better string that only exposed by prometheus? We have changed the meter tags to contain method and service name. For example `scrape()` contains `method=getHealth` and `service=server.internal.health.ITzipkinHealth` which made this test failed. | openzipkin-zipkin | java |
@@ -33,7 +33,7 @@ namespace Datadog.Trace.Vendors.Serilog.Core
/// be disposed to flush any events buffered within it. Most application
/// code should depend on <see cref="ILogger"/>, not this class.
/// </summary>
- internal sealed class Logger : ILogger, ILogEventSink, IDisposable
+ internal sealed class Logger : ILogger, ILogEventSink, IDisposable, ICoreLogger
{
static readonly object[] NoPropertyValues = new object[0];
| 1 | //------------------------------------------------------------------------------
// <auto-generated />
// This file was automatically generated by the UpdateVendors tool.
//------------------------------------------------------------------------------
// Copyright 2013-2016 Serilog Contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
using System;
using System.Collections.Generic;
using Datadog.Trace.Vendors.Serilog.Capturing;
using Datadog.Trace.Vendors.Serilog.Core.Enrichers;
using Datadog.Trace.Vendors.Serilog.Core.Pipeline;
using Datadog.Trace.Vendors.Serilog.Debugging;
using Datadog.Trace.Vendors.Serilog.Events;
#pragma warning disable Serilog004 // Constant MessageTemplate verifier
namespace Datadog.Trace.Vendors.Serilog.Core
{
/// <summary>
/// The core Serilog logging pipeline. A <see cref="Logger"/> must
/// be disposed to flush any events buffered within it. Most application
/// code should depend on <see cref="ILogger"/>, not this class.
/// </summary>
internal sealed class Logger : ILogger, ILogEventSink, IDisposable
{
static readonly object[] NoPropertyValues = new object[0];
readonly MessageTemplateProcessor _messageTemplateProcessor;
readonly ILogEventSink _sink;
readonly Action _dispose;
readonly ILogEventEnricher _enricher;
// It's important that checking minimum level is a very
// quick (CPU-cacheable) read in the simple case, hence
// we keep a separate field from the switch, which may
// not be specified. If it is, we'll set _minimumLevel
// to its lower limit and fall through to the secondary check.
readonly LogEventLevel _minimumLevel;
readonly LoggingLevelSwitch _levelSwitch;
readonly LevelOverrideMap _overrideMap;
internal Logger(
MessageTemplateProcessor messageTemplateProcessor,
LogEventLevel minimumLevel,
ILogEventSink sink,
ILogEventEnricher enricher,
Action dispose = null,
LevelOverrideMap overrideMap = null)
: this(messageTemplateProcessor, minimumLevel, sink, enricher, dispose, null, overrideMap)
{
}
internal Logger(
MessageTemplateProcessor messageTemplateProcessor,
LoggingLevelSwitch levelSwitch,
ILogEventSink sink,
ILogEventEnricher enricher,
Action dispose = null,
LevelOverrideMap overrideMap = null)
: this(messageTemplateProcessor, LevelAlias.Minimum, sink, enricher, dispose, levelSwitch, overrideMap)
{
}
// The messageTemplateProcessor, sink and enricher are required. Argument checks are dropped because
// throwing from here breaks the logger's no-throw contract, and callers are all in this file anyway.
Logger(
MessageTemplateProcessor messageTemplateProcessor,
LogEventLevel minimumLevel,
ILogEventSink sink,
ILogEventEnricher enricher,
Action dispose = null,
LoggingLevelSwitch levelSwitch = null,
LevelOverrideMap overrideMap = null)
{
_messageTemplateProcessor = messageTemplateProcessor;
_minimumLevel = minimumLevel;
_sink = sink;
_dispose = dispose;
_levelSwitch = levelSwitch;
_overrideMap = overrideMap;
_enricher = enricher;
}
internal bool HasOverrideMap => _overrideMap != null;
/// <summary>
/// Create a logger that enriches log events via the provided enrichers.
/// </summary>
/// <param name="enricher">Enricher that applies in the context.</param>
/// <returns>A logger that will enrich log events as specified.</returns>
public ILogger ForContext(ILogEventEnricher enricher)
{
if (enricher == null)
return this; // No context here, so little point writing to SelfLog.
return new Logger(
_messageTemplateProcessor,
_minimumLevel,
this,
enricher,
null,
_levelSwitch,
_overrideMap);
}
/// <summary>
/// Create a logger that enriches log events via the provided enrichers.
/// </summary>
/// <param name="enrichers">Enrichers that apply in the context.</param>
/// <returns>A logger that will enrich log events as specified.</returns>
public ILogger ForContext(IEnumerable<ILogEventEnricher> enrichers)
{
if (enrichers == null)
return this; // No context here, so little point writing to SelfLog.
return ForContext(new SafeAggregateEnricher(enrichers));
}
/// <summary>
/// Create a logger that enriches log events with the specified property.
/// </summary>
/// <param name="propertyName">The name of the property. Must be non-empty.</param>
/// <param name="value">The property value.</param>
/// <param name="destructureObjects">If true, the value will be serialized as a structured
/// object if possible; if false, the object will be recorded as a scalar or simple array.</param>
/// <returns>A logger that will enrich log events as specified.</returns>
public ILogger ForContext(string propertyName, object value, bool destructureObjects = false)
{
if (!LogEventProperty.IsValidName(propertyName))
{
SelfLog.WriteLine("Attempt to call ForContext() with invalid property name `{0}` (value: `{1}`)", propertyName, value);
return this;
}
// It'd be nice to do the destructuring lazily, but unfortunately `value` may be mutated between
// now and the first log event written...
// A future optimization opportunity may be to implement ILogEventEnricher on LogEventProperty to
// remove one more allocation.
var enricher = new FixedPropertyEnricher(_messageTemplateProcessor.CreateProperty(propertyName, value, destructureObjects));
var minimumLevel = _minimumLevel;
var levelSwitch = _levelSwitch;
if (_overrideMap != null && propertyName == Constants.SourceContextPropertyName)
{
var context = value as string;
if (context != null)
_overrideMap.GetEffectiveLevel(context, out minimumLevel, out levelSwitch);
}
return new Logger(
_messageTemplateProcessor,
minimumLevel,
this,
enricher,
null,
levelSwitch,
_overrideMap);
}
/// <summary>
/// Create a logger that marks log events as being from the specified
/// source type.
/// </summary>
/// <param name="source">Type generating log messages in the context.</param>
/// <returns>A logger that will enrich log events as specified.</returns>
public ILogger ForContext(Type source)
{
if (source == null)
return this; // Little point in writing to SelfLog here because we don't have any contextual information
return ForContext(Constants.SourceContextPropertyName, source.FullName);
}
/// <summary>
/// Create a logger that marks log events as being from the specified
/// source type.
/// </summary>
/// <typeparam name="TSource">Type generating log messages in the context.</typeparam>
/// <returns>A logger that will enrich log events as specified.</returns>
public ILogger ForContext<TSource>()
{
return ForContext(typeof(TSource));
}
/// <summary>
/// Write a log event with the specified level.
/// </summary>
/// <param name="level">The level of the event.</param>
/// <param name="messageTemplate">Message template describing the event.</param>
[MessageTemplateFormatMethod("messageTemplate")]
public void Write(LogEventLevel level, string messageTemplate)
{
// Avoid the array allocation and any boxing allocations when the level isn't enabled
if (IsEnabled(level))
{
Write(level, messageTemplate, NoPropertyValues);
}
}
/// <summary>
/// Write a log event with the specified level.
/// </summary>
/// <param name="level">The level of the event.</param>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValue">Object positionally formatted into the message template.</param>
[MessageTemplateFormatMethod("messageTemplate")]
public void Write<T>(LogEventLevel level, string messageTemplate, T propertyValue)
{
// Avoid the array allocation and any boxing allocations when the level isn't enabled
if (IsEnabled(level))
{
Write(level, messageTemplate, new object[] { propertyValue });
}
}
/// <summary>
/// Write a log event with the specified level.
/// </summary>
/// <param name="level">The level of the event.</param>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValue0">Object positionally formatted into the message template.</param>
/// <param name="propertyValue1">Object positionally formatted into the message template.</param>
[MessageTemplateFormatMethod("messageTemplate")]
public void Write<T0, T1>(LogEventLevel level, string messageTemplate, T0 propertyValue0, T1 propertyValue1)
{
// Avoid the array allocation and any boxing allocations when the level isn't enabled
if (IsEnabled(level))
{
Write(level, messageTemplate, new object[] { propertyValue0, propertyValue1 });
}
}
/// <summary>
/// Write a log event with the specified level.
/// </summary>
/// <param name="level">The level of the event.</param>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValue0">Object positionally formatted into the message template.</param>
/// <param name="propertyValue1">Object positionally formatted into the message template.</param>
/// <param name="propertyValue2">Object positionally formatted into the message template.</param>
[MessageTemplateFormatMethod("messageTemplate")]
public void Write<T0, T1, T2>(LogEventLevel level, string messageTemplate, T0 propertyValue0, T1 propertyValue1, T2 propertyValue2)
{
// Avoid the array allocation and any boxing allocations when the level isn't enabled
if (IsEnabled(level))
{
Write(level, messageTemplate, new object[] { propertyValue0, propertyValue1, propertyValue2 });
}
}
/// <summary>
/// Write a log event with the specified level.
/// </summary>
/// <param name="level">The level of the event.</param>
/// <param name="messageTemplate"></param>
/// <param name="propertyValues"></param>
[MessageTemplateFormatMethod("messageTemplate")]
public void Write(LogEventLevel level, string messageTemplate, params object[] propertyValues)
{
Write(level, (Exception)null, messageTemplate, propertyValues);
}
/// <summary>
/// Determine if events at the specified level will be passed through
/// to the log sinks.
/// </summary>
/// <param name="level">Level to check.</param>
/// <returns>True if the level is enabled; otherwise, false.</returns>
public bool IsEnabled(LogEventLevel level)
{
if ((int)level < (int)_minimumLevel)
return false;
return _levelSwitch == null ||
(int)level >= (int)_levelSwitch.MinimumLevel;
}
/// <summary>
/// Write a log event with the specified level and associated exception.
/// </summary>
/// <param name="level">The level of the event.</param>
/// <param name="exception">Exception related to the event.</param>
/// <param name="messageTemplate">Message template describing the event.</param>
[MessageTemplateFormatMethod("messageTemplate")]
public void Write(LogEventLevel level, Exception exception, string messageTemplate)
{
// Avoid the array allocation and any boxing allocations when the level isn't enabled
if (IsEnabled(level))
{
Write(level, exception, messageTemplate, NoPropertyValues);
}
}
/// <summary>
/// Write a log event with the specified level and associated exception.
/// </summary>
/// <param name="level">The level of the event.</param>
/// <param name="exception">Exception related to the event.</param>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValue">Object positionally formatted into the message template.</param>
[MessageTemplateFormatMethod("messageTemplate")]
public void Write<T>(LogEventLevel level, Exception exception, string messageTemplate, T propertyValue)
{
// Avoid the array allocation and any boxing allocations when the level isn't enabled
if (IsEnabled(level))
{
Write(level, exception, messageTemplate, new object[] { propertyValue });
}
}
/// <summary>
/// Write a log event with the specified level and associated exception.
/// </summary>
/// <param name="level">The level of the event.</param>
/// <param name="exception">Exception related to the event.</param>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValue0">Object positionally formatted into the message template.</param>
/// <param name="propertyValue1">Object positionally formatted into the message template.</param>
[MessageTemplateFormatMethod("messageTemplate")]
public void Write<T0, T1>(LogEventLevel level, Exception exception, string messageTemplate, T0 propertyValue0, T1 propertyValue1)
{
// Avoid the array allocation and any boxing allocations when the level isn't enabled
if (IsEnabled(level))
{
Write(level, exception, messageTemplate, new object[] { propertyValue0, propertyValue1 });
}
}
/// <summary>
/// Write a log event with the specified level and associated exception.
/// </summary>
/// <param name="level">The level of the event.</param>
/// <param name="exception">Exception related to the event.</param>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValue0">Object positionally formatted into the message template.</param>
/// <param name="propertyValue1">Object positionally formatted into the message template.</param>
/// <param name="propertyValue2">Object positionally formatted into the message template.</param>
[MessageTemplateFormatMethod("messageTemplate")]
public void Write<T0, T1, T2>(LogEventLevel level, Exception exception, string messageTemplate, T0 propertyValue0, T1 propertyValue1, T2 propertyValue2)
{
// Avoid the array allocation and any boxing allocations when the level isn't enabled
if (IsEnabled(level))
{
Write(level, exception, messageTemplate, new object[] { propertyValue0, propertyValue1, propertyValue2 });
}
}
/// <summary>
/// Write a log event with the specified level and associated exception.
/// </summary>
/// <param name="level">The level of the event.</param>
/// <param name="exception">Exception related to the event.</param>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValues">Objects positionally formatted into the message template.</param>
[MessageTemplateFormatMethod("messageTemplate")]
public void Write(LogEventLevel level, Exception exception, string messageTemplate, params object[] propertyValues)
{
if (!IsEnabled(level)) return;
if (messageTemplate == null) return;
// Catch a common pitfall when a single non-object array is cast to object[]
if (propertyValues != null &&
propertyValues.GetType() != typeof(object[]))
propertyValues = new object[] { propertyValues };
MessageTemplate parsedTemplate;
IEnumerable<LogEventProperty> boundProperties;
_messageTemplateProcessor.Process(messageTemplate, propertyValues, out parsedTemplate, out boundProperties);
var logEvent = new LogEvent(DateTimeOffset.Now, level, exception, parsedTemplate, boundProperties);
Dispatch(logEvent);
}
/// <summary>
/// Write an event to the log.
/// </summary>
/// <param name="logEvent">The event to write.</param>
public void Write(LogEvent logEvent)
{
if (logEvent == null) return;
if (!IsEnabled(logEvent.Level)) return;
Dispatch(logEvent);
}
void ILogEventSink.Emit(LogEvent logEvent)
{
if (logEvent == null) throw new ArgumentNullException(nameof(logEvent));
// Bypasses the level check so that child loggers
// using this one as a sink can increase verbosity.
Dispatch(logEvent);
}
void Dispatch(LogEvent logEvent)
{
// The enricher may be a "safe" aggregate one, but is most commonly bare and so
// the exception handling from SafeAggregateEnricher is duplicated here.
try
{
_enricher.Enrich(logEvent, _messageTemplateProcessor);
}
catch (Exception ex)
{
SelfLog.WriteLine("Exception {0} caught while enriching {1} with {2}.", ex, logEvent, _enricher);
}
_sink.Emit(logEvent);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Verbose"/> level.
/// </summary>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <example>
/// Log.Verbose("Staring into space, wondering if we're alone.");
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Verbose(string messageTemplate)
{
Write(LogEventLevel.Verbose, messageTemplate, NoPropertyValues);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Verbose"/> level.
/// </summary>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValue">Object positionally formatted into the message template.</param>
/// <example>
/// Log.Verbose("Staring into space, wondering if we're alone.");
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Verbose<T>(string messageTemplate, T propertyValue)
{
Write(LogEventLevel.Verbose, messageTemplate, propertyValue);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Verbose"/> level.
/// </summary>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValue0">Object positionally formatted into the message template.</param>
/// <param name="propertyValue1">Object positionally formatted into the message template.</param>
/// <example>
/// Log.Verbose("Staring into space, wondering if we're alone.");
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Verbose<T0, T1>(string messageTemplate, T0 propertyValue0, T1 propertyValue1)
{
Write(LogEventLevel.Verbose, messageTemplate, propertyValue0, propertyValue1);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Verbose"/> level.
/// </summary>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValue0">Object positionally formatted into the message template.</param>
/// <param name="propertyValue1">Object positionally formatted into the message template.</param>
/// <param name="propertyValue2">Object positionally formatted into the message template.</param>
/// <example>
/// Log.Verbose("Staring into space, wondering if we're alone.");
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Verbose<T0, T1, T2>(string messageTemplate, T0 propertyValue0, T1 propertyValue1, T2 propertyValue2)
{
Write(LogEventLevel.Verbose, messageTemplate, propertyValue0, propertyValue1, propertyValue2);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Verbose"/> level and associated exception.
/// </summary>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValues">Objects positionally formatted into the message template.</param>
/// <example>
/// Log.Verbose("Staring into space, wondering if we're alone.");
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Verbose(string messageTemplate, params object[] propertyValues)
{
Verbose((Exception)null, messageTemplate, propertyValues);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Verbose"/> level and associated exception.
/// </summary>
/// <param name="exception">Exception related to the event.</param>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <example>
/// Log.Verbose(ex, "Staring into space, wondering where this comet came from.");
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Verbose(Exception exception, string messageTemplate)
{
Write(LogEventLevel.Verbose, exception, messageTemplate, NoPropertyValues);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Verbose"/> level and associated exception.
/// </summary>
/// <param name="exception">Exception related to the event.</param>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValue">Object positionally formatted into the message template.</param>
/// <example>
/// Log.Verbose(ex, "Staring into space, wondering where this comet came from.");
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Verbose<T>(Exception exception, string messageTemplate, T propertyValue)
{
Write(LogEventLevel.Verbose, exception, messageTemplate, propertyValue);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Verbose"/> level and associated exception.
/// </summary>
/// <param name="exception">Exception related to the event.</param>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValue0">Object positionally formatted into the message template.</param>
/// <param name="propertyValue1">Object positionally formatted into the message template.</param>
/// <example>
/// Log.Verbose(ex, "Staring into space, wondering where this comet came from.");
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Verbose<T0, T1>(Exception exception, string messageTemplate, T0 propertyValue0, T1 propertyValue1)
{
Write(LogEventLevel.Verbose, exception, messageTemplate, propertyValue0, propertyValue1);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Verbose"/> level and associated exception.
/// </summary>
/// <param name="exception">Exception related to the event.</param>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValue0">Object positionally formatted into the message template.</param>
/// <param name="propertyValue1">Object positionally formatted into the message template.</param>
/// <param name="propertyValue2">Object positionally formatted into the message template.</param>
/// <example>
/// Log.Verbose(ex, "Staring into space, wondering where this comet came from.");
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Verbose<T0, T1, T2>(Exception exception, string messageTemplate, T0 propertyValue0, T1 propertyValue1, T2 propertyValue2)
{
Write(LogEventLevel.Verbose, exception, messageTemplate, propertyValue0, propertyValue1, propertyValue2);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Verbose"/> level and associated exception.
/// </summary>
/// <param name="exception">Exception related to the event.</param>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValues">Objects positionally formatted into the message template.</param>
/// <example>
/// Log.Verbose(ex, "Staring into space, wondering where this comet came from.");
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Verbose(Exception exception, string messageTemplate, params object[] propertyValues)
{
Write(LogEventLevel.Verbose, exception, messageTemplate, propertyValues);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Debug"/> level.
/// </summary>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <example>
/// Log.Debug("Starting up at {StartedAt}.", DateTime.Now);
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Debug(string messageTemplate)
{
Write(LogEventLevel.Debug, messageTemplate, NoPropertyValues);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Debug"/> level.
/// </summary>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValue">Object positionally formatted into the message template.</param>
/// <example>
/// Log.Debug("Starting up at {StartedAt}.", DateTime.Now);
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Debug<T>(string messageTemplate, T propertyValue)
{
Write(LogEventLevel.Debug, messageTemplate, propertyValue);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Debug"/> level.
/// </summary>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValue0">Object positionally formatted into the message template.</param>
/// <param name="propertyValue1">Object positionally formatted into the message template.</param>
/// <example>
/// Log.Debug("Starting up at {StartedAt}.", DateTime.Now);
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Debug<T0, T1>(string messageTemplate, T0 propertyValue0, T1 propertyValue1)
{
Write(LogEventLevel.Debug, messageTemplate, propertyValue0, propertyValue1);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Debug"/> level.
/// </summary>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValue0">Object positionally formatted into the message template.</param>
/// <param name="propertyValue1">Object positionally formatted into the message template.</param>
/// <param name="propertyValue2">Object positionally formatted into the message template.</param>
/// <example>
/// Log.Debug("Starting up at {StartedAt}.", DateTime.Now);
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Debug<T0, T1, T2>(string messageTemplate, T0 propertyValue0, T1 propertyValue1, T2 propertyValue2)
{
Write(LogEventLevel.Debug, messageTemplate, propertyValue0, propertyValue1, propertyValue2);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Debug"/> level and associated exception.
/// </summary>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValues">Objects positionally formatted into the message template.</param>
/// <example>
/// Log.Debug("Starting up at {StartedAt}.", DateTime.Now);
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Debug(string messageTemplate, params object[] propertyValues)
{
Debug((Exception)null, messageTemplate, propertyValues);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Debug"/> level and associated exception.
/// </summary>
/// <param name="exception">Exception related to the event.</param>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <example>
/// Log.Debug(ex, "Swallowing a mundane exception.");
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Debug(Exception exception, string messageTemplate)
{
Write(LogEventLevel.Debug, exception, messageTemplate, NoPropertyValues);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Debug"/> level and associated exception.
/// </summary>
/// <param name="exception">Exception related to the event.</param>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValue">Object positionally formatted into the message template.</param>
/// <example>
/// Log.Debug(ex, "Swallowing a mundane exception.");
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Debug<T>(Exception exception, string messageTemplate, T propertyValue)
{
Write(LogEventLevel.Debug, exception, messageTemplate, propertyValue);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Debug"/> level and associated exception.
/// </summary>
/// <param name="exception">Exception related to the event.</param>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValue0">Object positionally formatted into the message template.</param>
/// <param name="propertyValue1">Object positionally formatted into the message template.</param>
/// <example>
/// Log.Debug(ex, "Swallowing a mundane exception.");
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Debug<T0, T1>(Exception exception, string messageTemplate, T0 propertyValue0, T1 propertyValue1)
{
Write(LogEventLevel.Debug, exception, messageTemplate, propertyValue0, propertyValue1);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Debug"/> level and associated exception.
/// </summary>
/// <param name="exception">Exception related to the event.</param>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValue0">Object positionally formatted into the message template.</param>
/// <param name="propertyValue1">Object positionally formatted into the message template.</param>
/// <param name="propertyValue2">Object positionally formatted into the message template.</param>
/// <example>
/// Log.Debug(ex, "Swallowing a mundane exception.");
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Debug<T0, T1, T2>(Exception exception, string messageTemplate, T0 propertyValue0, T1 propertyValue1, T2 propertyValue2)
{
Write(LogEventLevel.Debug, exception, messageTemplate, propertyValue0, propertyValue1, propertyValue2);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Debug"/> level and associated exception.
/// </summary>
/// <param name="exception">Exception related to the event.</param>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValues">Objects positionally formatted into the message template.</param>
/// <example>
/// Log.Debug(ex, "Swallowing a mundane exception.");
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Debug(Exception exception, string messageTemplate, params object[] propertyValues)
{
Write(LogEventLevel.Debug, exception, messageTemplate, propertyValues);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Information"/> level.
/// </summary>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <example>
/// Log.Information("Processed {RecordCount} records in {TimeMS}.", records.Length, sw.ElapsedMilliseconds);
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Information(string messageTemplate)
{
Write(LogEventLevel.Information, messageTemplate, NoPropertyValues);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Information"/> level.
/// </summary>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValue">Object positionally formatted into the message template.</param>
/// <example>
/// Log.Information("Processed {RecordCount} records in {TimeMS}.", records.Length, sw.ElapsedMilliseconds);
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Information<T>(string messageTemplate, T propertyValue)
{
Write(LogEventLevel.Information, messageTemplate, propertyValue);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Information"/> level.
/// </summary>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValue0">Object positionally formatted into the message template.</param>
/// <param name="propertyValue1">Object positionally formatted into the message template.</param>
/// <example>
/// Log.Information("Processed {RecordCount} records in {TimeMS}.", records.Length, sw.ElapsedMilliseconds);
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Information<T0, T1>(string messageTemplate, T0 propertyValue0, T1 propertyValue1)
{
Write(LogEventLevel.Information, messageTemplate, propertyValue0, propertyValue1);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Information"/> level.
/// </summary>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValue0">Object positionally formatted into the message template.</param>
/// <param name="propertyValue1">Object positionally formatted into the message template.</param>
/// <param name="propertyValue2">Object positionally formatted into the message template.</param>
/// <example>
/// Log.Information("Processed {RecordCount} records in {TimeMS}.", records.Length, sw.ElapsedMilliseconds);
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Information<T0, T1, T2>(string messageTemplate, T0 propertyValue0, T1 propertyValue1, T2 propertyValue2)
{
Write(LogEventLevel.Information, messageTemplate, propertyValue0, propertyValue1, propertyValue2);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Information"/> level and associated exception.
/// </summary>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValues">Objects positionally formatted into the message template.</param>
/// <example>
/// Log.Information("Processed {RecordCount} records in {TimeMS}.", records.Length, sw.ElapsedMilliseconds);
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Information(string messageTemplate, params object[] propertyValues)
{
Information((Exception)null, messageTemplate, propertyValues);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Information"/> level and associated exception.
/// </summary>
/// <param name="exception">Exception related to the event.</param>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <example>
/// Log.Information(ex, "Processed {RecordCount} records in {TimeMS}.", records.Length, sw.ElapsedMilliseconds);
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Information(Exception exception, string messageTemplate)
{
Write(LogEventLevel.Information, exception, messageTemplate, NoPropertyValues);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Information"/> level and associated exception.
/// </summary>
/// <param name="exception">Exception related to the event.</param>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValue">Object positionally formatted into the message template.</param>
/// <example>
/// Log.Information(ex, "Processed {RecordCount} records in {TimeMS}.", records.Length, sw.ElapsedMilliseconds);
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Information<T>(Exception exception, string messageTemplate, T propertyValue)
{
Write(LogEventLevel.Information, exception, messageTemplate, propertyValue);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Information"/> level and associated exception.
/// </summary>
/// <param name="exception">Exception related to the event.</param>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValue0">Object positionally formatted into the message template.</param>
/// <param name="propertyValue1">Object positionally formatted into the message template.</param>
/// <example>
/// Log.Information(ex, "Processed {RecordCount} records in {TimeMS}.", records.Length, sw.ElapsedMilliseconds);
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Information<T0, T1>(Exception exception, string messageTemplate, T0 propertyValue0, T1 propertyValue1)
{
Write(LogEventLevel.Information, exception, messageTemplate, propertyValue0, propertyValue1);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Information"/> level and associated exception.
/// </summary>
/// <param name="exception">Exception related to the event.</param>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValue0">Object positionally formatted into the message template.</param>
/// <param name="propertyValue1">Object positionally formatted into the message template.</param>
/// <param name="propertyValue2">Object positionally formatted into the message template.</param>
/// <example>
/// Log.Information(ex, "Processed {RecordCount} records in {TimeMS}.", records.Length, sw.ElapsedMilliseconds);
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Information<T0, T1, T2>(Exception exception, string messageTemplate, T0 propertyValue0, T1 propertyValue1, T2 propertyValue2)
{
Write(LogEventLevel.Information, exception, messageTemplate, propertyValue0, propertyValue1, propertyValue2);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Information"/> level and associated exception.
/// </summary>
/// <param name="exception">Exception related to the event.</param>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValues">Objects positionally formatted into the message template.</param>
/// <example>
/// Log.Information(ex, "Processed {RecordCount} records in {TimeMS}.", records.Length, sw.ElapsedMilliseconds);
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Information(Exception exception, string messageTemplate, params object[] propertyValues)
{
Write(LogEventLevel.Information, exception, messageTemplate, propertyValues);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Warning"/> level.
/// </summary>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <example>
/// Log.Warning("Skipped {SkipCount} records.", skippedRecords.Length);
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Warning(string messageTemplate)
{
Write(LogEventLevel.Warning, messageTemplate, NoPropertyValues);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Warning"/> level.
/// </summary>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValue">Object positionally formatted into the message template.</param>
/// <example>
/// Log.Warning("Skipped {SkipCount} records.", skippedRecords.Length);
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Warning<T>(string messageTemplate, T propertyValue)
{
Write(LogEventLevel.Warning, messageTemplate, propertyValue);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Warning"/> level.
/// </summary>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValue0">Object positionally formatted into the message template.</param>
/// <param name="propertyValue1">Object positionally formatted into the message template.</param>
/// <example>
/// Log.Warning("Skipped {SkipCount} records.", skippedRecords.Length);
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Warning<T0, T1>(string messageTemplate, T0 propertyValue0, T1 propertyValue1)
{
Write(LogEventLevel.Warning, messageTemplate, propertyValue0, propertyValue1);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Warning"/> level.
/// </summary>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValue0">Object positionally formatted into the message template.</param>
/// <param name="propertyValue1">Object positionally formatted into the message template.</param>
/// <param name="propertyValue2">Object positionally formatted into the message template.</param>
/// <example>
/// Log.Warning("Skipped {SkipCount} records.", skippedRecords.Length);
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Warning<T0, T1, T2>(string messageTemplate, T0 propertyValue0, T1 propertyValue1, T2 propertyValue2)
{
Write(LogEventLevel.Warning, messageTemplate, propertyValue0, propertyValue1, propertyValue2);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Warning"/> level and associated exception.
/// </summary>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValues">Objects positionally formatted into the message template.</param>
/// <example>
/// Log.Warning("Skipped {SkipCount} records.", skippedRecords.Length);
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Warning(string messageTemplate, params object[] propertyValues)
{
Warning((Exception)null, messageTemplate, propertyValues);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Warning"/> level and associated exception.
/// </summary>
/// <param name="exception">Exception related to the event.</param>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <example>
/// Log.Warning(ex, "Skipped {SkipCount} records.", skippedRecords.Length);
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Warning(Exception exception, string messageTemplate)
{
Write(LogEventLevel.Warning, exception, messageTemplate, NoPropertyValues);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Warning"/> level and associated exception.
/// </summary>
/// <param name="exception">Exception related to the event.</param>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValue">Object positionally formatted into the message template.</param>
/// <example>
/// Log.Warning(ex, "Skipped {SkipCount} records.", skippedRecords.Length);
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Warning<T>(Exception exception, string messageTemplate, T propertyValue)
{
Write(LogEventLevel.Warning, exception, messageTemplate, propertyValue);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Warning"/> level and associated exception.
/// </summary>
/// <param name="exception">Exception related to the event.</param>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValue0">Object positionally formatted into the message template.</param>
/// <param name="propertyValue1">Object positionally formatted into the message template.</param>
/// <example>
/// Log.Warning(ex, "Skipped {SkipCount} records.", skippedRecords.Length);
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Warning<T0, T1>(Exception exception, string messageTemplate, T0 propertyValue0, T1 propertyValue1)
{
Write(LogEventLevel.Warning, exception, messageTemplate, propertyValue0, propertyValue1);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Warning"/> level and associated exception.
/// </summary>
/// <param name="exception">Exception related to the event.</param>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValue0">Object positionally formatted into the message template.</param>
/// <param name="propertyValue1">Object positionally formatted into the message template.</param>
/// <param name="propertyValue2">Object positionally formatted into the message template.</param>
/// <example>
/// Log.Warning(ex, "Skipped {SkipCount} records.", skippedRecords.Length);
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Warning<T0, T1, T2>(Exception exception, string messageTemplate, T0 propertyValue0, T1 propertyValue1, T2 propertyValue2)
{
Write(LogEventLevel.Warning, exception, messageTemplate, propertyValue0, propertyValue1, propertyValue2);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Warning"/> level and associated exception.
/// </summary>
/// <param name="exception">Exception related to the event.</param>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValues">Objects positionally formatted into the message template.</param>
/// <example>
/// Log.Warning(ex, "Skipped {SkipCount} records.", skippedRecords.Length);
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Warning(Exception exception, string messageTemplate, params object[] propertyValues)
{
Write(LogEventLevel.Warning, exception, messageTemplate, propertyValues);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Error"/> level.
/// </summary>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <example>
/// Log.Error("Failed {ErrorCount} records.", brokenRecords.Length);
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Error(string messageTemplate)
{
Write(LogEventLevel.Error, messageTemplate, NoPropertyValues);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Error"/> level.
/// </summary>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValue">Object positionally formatted into the message template.</param>
/// <example>
/// Log.Error("Failed {ErrorCount} records.", brokenRecords.Length);
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Error<T>(string messageTemplate, T propertyValue)
{
Write(LogEventLevel.Error, messageTemplate, propertyValue);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Error"/> level.
/// </summary>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValue0">Object positionally formatted into the message template.</param>
/// <param name="propertyValue1">Object positionally formatted into the message template.</param>
/// <example>
/// Log.Error("Failed {ErrorCount} records.", brokenRecords.Length);
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Error<T0, T1>(string messageTemplate, T0 propertyValue0, T1 propertyValue1)
{
Write(LogEventLevel.Error, messageTemplate, propertyValue0, propertyValue1);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Error"/> level.
/// </summary>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValue0">Object positionally formatted into the message template.</param>
/// <param name="propertyValue1">Object positionally formatted into the message template.</param>
/// <param name="propertyValue2">Object positionally formatted into the message template.</param>
/// <example>
/// Log.Error("Failed {ErrorCount} records.", brokenRecords.Length);
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Error<T0, T1, T2>(string messageTemplate, T0 propertyValue0, T1 propertyValue1, T2 propertyValue2)
{
Write(LogEventLevel.Error, messageTemplate, propertyValue0, propertyValue1, propertyValue2);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Error"/> level and associated exception.
/// </summary>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValues">Objects positionally formatted into the message template.</param>
/// <example>
/// Log.Error("Failed {ErrorCount} records.", brokenRecords.Length);
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Error(string messageTemplate, params object[] propertyValues)
{
Error((Exception)null, messageTemplate, propertyValues);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Error"/> level and associated exception.
/// </summary>
/// <param name="exception">Exception related to the event.</param>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <example>
/// Log.Error(ex, "Failed {ErrorCount} records.", brokenRecords.Length);
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Error(Exception exception, string messageTemplate)
{
Write(LogEventLevel.Error, exception, messageTemplate, NoPropertyValues);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Error"/> level and associated exception.
/// </summary>
/// <param name="exception">Exception related to the event.</param>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValue">Object positionally formatted into the message template.</param>
/// <example>
/// Log.Error(ex, "Failed {ErrorCount} records.", brokenRecords.Length);
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Error<T>(Exception exception, string messageTemplate, T propertyValue)
{
Write(LogEventLevel.Error, exception, messageTemplate, propertyValue);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Error"/> level and associated exception.
/// </summary>
/// <param name="exception">Exception related to the event.</param>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValue0">Object positionally formatted into the message template.</param>
/// <param name="propertyValue1">Object positionally formatted into the message template.</param>
/// <example>
/// Log.Error(ex, "Failed {ErrorCount} records.", brokenRecords.Length);
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Error<T0, T1>(Exception exception, string messageTemplate, T0 propertyValue0, T1 propertyValue1)
{
Write(LogEventLevel.Error, exception, messageTemplate, propertyValue0, propertyValue1);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Error"/> level and associated exception.
/// </summary>
/// <param name="exception">Exception related to the event.</param>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValue0">Object positionally formatted into the message template.</param>
/// <param name="propertyValue1">Object positionally formatted into the message template.</param>
/// <param name="propertyValue2">Object positionally formatted into the message template.</param>
/// <example>
/// Log.Error(ex, "Failed {ErrorCount} records.", brokenRecords.Length);
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Error<T0, T1, T2>(Exception exception, string messageTemplate, T0 propertyValue0, T1 propertyValue1, T2 propertyValue2)
{
Write(LogEventLevel.Error, exception, messageTemplate, propertyValue0, propertyValue1, propertyValue2);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Error"/> level and associated exception.
/// </summary>
/// <param name="exception">Exception related to the event.</param>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValues">Objects positionally formatted into the message template.</param>
/// <example>
/// Log.Error(ex, "Failed {ErrorCount} records.", brokenRecords.Length);
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Error(Exception exception, string messageTemplate, params object[] propertyValues)
{
Write(LogEventLevel.Error, exception, messageTemplate, propertyValues);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Fatal"/> level.
/// </summary>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <example>
/// Log.Fatal("Process terminating.");
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Fatal(string messageTemplate)
{
Write(LogEventLevel.Fatal, messageTemplate, NoPropertyValues);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Fatal"/> level.
/// </summary>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValue">Object positionally formatted into the message template.</param>
/// <example>
/// Log.Fatal("Process terminating.");
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Fatal<T>(string messageTemplate, T propertyValue)
{
Write(LogEventLevel.Fatal, messageTemplate, propertyValue);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Fatal"/> level.
/// </summary>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValue0">Object positionally formatted into the message template.</param>
/// <param name="propertyValue1">Object positionally formatted into the message template.</param>
/// <example>
/// Log.Fatal("Process terminating.");
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Fatal<T0, T1>(string messageTemplate, T0 propertyValue0, T1 propertyValue1)
{
Write(LogEventLevel.Fatal, messageTemplate, propertyValue0, propertyValue1);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Fatal"/> level.
/// </summary>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValue0">Object positionally formatted into the message template.</param>
/// <param name="propertyValue1">Object positionally formatted into the message template.</param>
/// <param name="propertyValue2">Object positionally formatted into the message template.</param>
/// <example>
/// Log.Fatal("Process terminating.");
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Fatal<T0, T1, T2>(string messageTemplate, T0 propertyValue0, T1 propertyValue1, T2 propertyValue2)
{
Write(LogEventLevel.Fatal, messageTemplate, propertyValue0, propertyValue1, propertyValue2);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Fatal"/> level and associated exception.
/// </summary>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValues">Objects positionally formatted into the message template.</param>
/// <example>
/// Log.Fatal("Process terminating.");
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Fatal(string messageTemplate, params object[] propertyValues)
{
Fatal((Exception)null, messageTemplate, propertyValues);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Fatal"/> level and associated exception.
/// </summary>
/// <param name="exception">Exception related to the event.</param>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <example>
/// Log.Fatal(ex, "Process terminating.");
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Fatal(Exception exception, string messageTemplate)
{
Write(LogEventLevel.Fatal, exception, messageTemplate, NoPropertyValues);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Fatal"/> level and associated exception.
/// </summary>
/// <param name="exception">Exception related to the event.</param>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValue">Object positionally formatted into the message template.</param>
/// <example>
/// Log.Fatal(ex, "Process terminating.");
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Fatal<T>(Exception exception, string messageTemplate, T propertyValue)
{
Write(LogEventLevel.Fatal, exception, messageTemplate, propertyValue);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Fatal"/> level and associated exception.
/// </summary>
/// <param name="exception">Exception related to the event.</param>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValue0">Object positionally formatted into the message template.</param>
/// <param name="propertyValue1">Object positionally formatted into the message template.</param>
/// <example>
/// Log.Fatal(ex, "Process terminating.");
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Fatal<T0, T1>(Exception exception, string messageTemplate, T0 propertyValue0, T1 propertyValue1)
{
Write(LogEventLevel.Fatal, exception, messageTemplate, propertyValue0, propertyValue1);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Fatal"/> level and associated exception.
/// </summary>
/// <param name="exception">Exception related to the event.</param>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValue0">Object positionally formatted into the message template.</param>
/// <param name="propertyValue1">Object positionally formatted into the message template.</param>
/// <param name="propertyValue2">Object positionally formatted into the message template.</param>
/// <example>
/// Log.Fatal(ex, "Process terminating.");
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Fatal<T0, T1, T2>(Exception exception, string messageTemplate, T0 propertyValue0, T1 propertyValue1, T2 propertyValue2)
{
Write(LogEventLevel.Fatal, exception, messageTemplate, propertyValue0, propertyValue1, propertyValue2);
}
/// <summary>
/// Write a log event with the <see cref="LogEventLevel.Fatal"/> level and associated exception.
/// </summary>
/// <param name="exception">Exception related to the event.</param>
/// <param name="messageTemplate">Message template describing the event.</param>
/// <param name="propertyValues">Objects positionally formatted into the message template.</param>
/// <example>
/// Log.Fatal(ex, "Process terminating.");
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public void Fatal(Exception exception, string messageTemplate, params object[] propertyValues)
{
Write(LogEventLevel.Fatal, exception, messageTemplate, propertyValues);
}
/// <summary>
/// Uses configured scalar conversion and destructuring rules to bind a set of properties to a
/// message template. Returns false if the template or values are invalid (<summary>ILogger</summary>
/// methods never throw exceptions).
/// </summary>
/// <param name="messageTemplate">Message template describing an event.</param>
/// <param name="propertyValues">Objects positionally formatted into the message template.</param>
/// <param name="parsedTemplate">The internal representation of the template, which may be used to
/// render the <paramref name="boundProperties"/> as text.</param>
/// <param name="boundProperties">Captured properties from the template and <paramref name="propertyValues"/>.</param>
/// <example>
/// MessageTemplate template;
/// IEnumerable<LogEventProperty> properties>;
/// if (Log.BindMessageTemplate("Hello, {Name}!", new[] { "World" }, out template, out properties)
/// {
/// var propsByName = properties.ToDictionary(p => p.Name, p => p.Value);
/// Console.WriteLine(template.Render(propsByName, null));
/// // -> "Hello, World!"
/// }
/// </example>
[MessageTemplateFormatMethod("messageTemplate")]
public bool BindMessageTemplate(string messageTemplate, object[] propertyValues, out MessageTemplate parsedTemplate, out IEnumerable<LogEventProperty> boundProperties)
{
if (messageTemplate == null)
{
parsedTemplate = null;
boundProperties = null;
return false;
}
_messageTemplateProcessor.Process(messageTemplate, propertyValues, out parsedTemplate, out boundProperties);
return true;
}
/// <summary>
/// Uses configured scalar conversion and destructuring rules to bind a property value to its captured
/// representation.
/// </summary>
/// <returns>True if the property could be bound, otherwise false (<summary>ILogger</summary>
/// <param name="propertyName">The name of the property. Must be non-empty.</param>
/// <param name="value">The property value.</param>
/// <param name="destructureObjects">If true, the value will be serialized as a structured
/// object if possible; if false, the object will be recorded as a scalar or simple array.</param>
/// <param name="property">The resulting property.</param>
/// methods never throw exceptions).</returns>
public bool BindProperty(string propertyName, object value, bool destructureObjects, out LogEventProperty property)
{
if (!LogEventProperty.IsValidName(propertyName))
{
property = null;
return false;
}
property = _messageTemplateProcessor.CreateProperty(propertyName, value, destructureObjects);
return true;
}
/// <summary>
/// Close and flush the logging pipeline.
/// </summary>
public void Dispose()
{
_dispose?.Invoke();
}
/// <summary>
/// An <see cref="ILogger"/> instance that efficiently ignores all method calls.
/// </summary>
public static ILogger None { get; } = SilentLogger.Instance;
}
}
| 1 | 16,493 | Adding ICoreLogger here lets us pull this into Core as a strategy | DataDog-dd-trace-dotnet | .cs |
@@ -38,7 +38,7 @@ type UsageStats struct {
// ContainerMetadata contains meta-data information for a container.
type ContainerMetadata struct {
- DockerID string `json:"-"`
+ DockerID string
}
// StatsContainer abstracts methods to gather and aggregate utilization data for a container. | 1 | // Copyright 2014-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package stats
import (
"time"
ecsengine "github.com/aws/amazon-ecs-agent/agent/engine"
"github.com/aws/amazon-ecs-agent/agent/stats/resolver"
"golang.org/x/net/context"
)
// ContainerStats encapsulates the raw CPU and memory utilization from cgroup fs.
type ContainerStats struct {
cpuUsage uint64
memoryUsage uint64
timestamp time.Time
}
// UsageStats abstracts the format in which the queue stores data.
type UsageStats struct {
CPUUsagePerc float32 `json:"cpuUsagePerc"`
MemoryUsageInMegs uint32 `json:"memoryUsageInMegs"`
Timestamp time.Time `json:"timestamp"`
cpuUsage uint64
}
// ContainerMetadata contains meta-data information for a container.
type ContainerMetadata struct {
DockerID string `json:"-"`
}
// StatsContainer abstracts methods to gather and aggregate utilization data for a container.
type StatsContainer struct {
containerMetadata *ContainerMetadata
ctx context.Context
cancel context.CancelFunc
client ecsengine.DockerClient
statsQueue *Queue
resolver resolver.ContainerMetadataResolver
}
// taskDefinition encapsulates family and version strings for a task definition
type taskDefinition struct {
family string
version string
}
| 1 | 18,417 | why did you change this? | aws-amazon-ecs-agent | go |
@@ -138,9 +138,15 @@ module RSpec::Core
# @param colorizer [#wrap] An object to colorize the message_lines by
# @return [Array(String)] The example failure message colorized
def colorized_message_lines(colorizer = ::RSpec::Core::Formatters::ConsoleCodes)
- message_lines.map do |line|
- colorizer.wrap line, RSpec.configuration.failure_color
- end
+ message_lines
+ .map do |line|
+ colorizer.wrap line, :failure
+ end
+ .tap do |lines|
+ unless exception_class_name =~ /RSpec/
+ lines[1] = colorizer.wrap(exception_class_name, :failure) + ":"
+ end
+ end
end
# Returns the failures formatted backtrace. | 1 | RSpec::Support.require_rspec_core "formatters/helpers"
module RSpec::Core
# Notifications are value objects passed to formatters to provide them
# with information about a particular event of interest.
module Notifications
# The `StartNotification` represents a notification sent by the reporter
# when the suite is started. It contains the expected amount of examples
# to be executed, and the load time of RSpec.
#
# @attr count [Fixnum] the number counted
# @attr load_time [Float] the number of seconds taken to boot RSpec
# and load the spec files
StartNotification = Struct.new(:count, :load_time)
# The `ExampleNotification` represents notifications sent by the reporter
# which contain information about the current (or soon to be) example.
# It is used by formatters to access information about that example.
#
# @example
# def example_started(notification)
# puts "Hey I started #{notification.example.description}"
# end
#
# @attr example [RSpec::Core::Example] the current example
ExampleNotification = Struct.new(:example) do
# @private
def self.for(example)
if example.execution_result.pending_fixed?
PendingExampleFixedNotification.new(example)
elsif example.execution_result.status == :failed
FailedExampleNotification.new(example)
else
new(example)
end
end
private_class_method :new
end
# The `ExamplesNotification` represents notifications sent by the reporter
# which contain information about the suites examples.
#
# @example
# def stop(notification)
# puts "Hey I ran #{notification.examples.size}"
# end
#
class ExamplesNotification
def initialize(reporter)
@reporter = reporter
end
# @return [Array(RSpec::Core::Example)] list of examples
def examples
@reporter.examples
end
# @return [Array(RSpec::Core::Example)] list of failed examples
def failed_examples
@reporter.failed_examples
end
# @return [Array(RSpec::Core::Example)] list of pending examples
def pending_examples
@reporter.pending_examples
end
# @return [Array(Rspec::Core::Notifications::ExampleNotification]
# returns examples as notifications
def notifications
@notifications ||= format(examples)
end
# @return [Array(Rspec::Core::Notifications::FailedExampleNotification]
# returns failed examples as notifications
def failure_notifications
@failed_notifications ||= format(failed_examples)
end
private
def format(examples)
examples.map do |example|
ExampleNotification.for(example)
end
end
end
# The `FailedExampleNotification` extends `ExampleNotification` with
# things useful for failed specs.
#
# @example
# def example_failed(notification)
# puts "Hey I failed :("
# puts "Here's my stack trace"
# puts notification.exception.backtrace.join("\n")
# end
#
# @attr [RSpec::Core::Example] example the current example
# @see ExampleNotification
class FailedExampleNotification < ExampleNotification
public_class_method :new
# @return [Exception] The example failure
def exception
example.execution_result.exception
end
# @return [String] The example description
def description
example.full_description
end
# Returns the message generated for this failure line by line.
#
# @return [Array(String)] The example failure message
def message_lines
@lines ||=
begin
lines = ["Failure/Error: #{read_failed_line.strip}"]
lines << "#{exception_class_name}:" unless exception_class_name =~ /RSpec/
exception.message.to_s.split("\n").each do |line|
lines << " #{line}" if exception.message
end
if shared_group
lines << "Shared Example Group: \"#{shared_group.metadata[:shared_group_name]}\"" +
" called from #{backtrace_formatter.backtrace_line(shared_group.location)}"
end
lines
end
end
# Returns the message generated for this failure colorized line by line.
#
# @param colorizer [#wrap] An object to colorize the message_lines by
# @return [Array(String)] The example failure message colorized
def colorized_message_lines(colorizer = ::RSpec::Core::Formatters::ConsoleCodes)
message_lines.map do |line|
colorizer.wrap line, RSpec.configuration.failure_color
end
end
# Returns the failures formatted backtrace.
#
# @return [Array(String)] the examples backtrace lines
def formatted_backtrace
backtrace_formatter.format_backtrace(exception.backtrace, example.metadata)
end
# Returns the failures colorized formatted backtrace.
#
# @param colorizer [#wrap] An object to colorize the message_lines by
# @return [Array(String)] the examples colorized backtrace lines
def colorized_formatted_backtrace(colorizer = ::RSpec::Core::Formatters::ConsoleCodes)
formatted_backtrace.map do |backtrace_info|
colorizer.wrap "# #{backtrace_info}", RSpec.configuration.detail_color
end
end
private
def backtrace_formatter
RSpec.configuration.backtrace_formatter
end
def exception_class_name
name = exception.class.name.to_s
name ="(anonymous error class)" if name == ''
name
end
def shared_group
@shared_group ||= group_and_parent_groups.find { |group| group.metadata[:shared_group_name] }
end
def group_and_parent_groups
example.example_group.parent_groups + [example.example_group]
end
def read_failed_line
unless matching_line = find_failed_line
return "Unable to find matching line from backtrace"
end
file_path, line_number = matching_line.match(/(.+?):(\d+)(|:\d+)/)[1..2]
if File.exist?(file_path)
File.readlines(file_path)[line_number.to_i - 1] ||
"Unable to find matching line in #{file_path}"
else
"Unable to find #{file_path} to read failed line"
end
rescue SecurityError
"Unable to read failed line"
end
def find_failed_line
path = File.expand_path(example.file_path)
exception.backtrace.detect do |line|
match = line.match(/(.+?):(\d+)(|:\d+)/)
match && match[1].downcase == path.downcase
end
end
end
# The `PendingExampleFixedNotification` extends `ExampleNotification` with
# things useful for specs that pass when they are expected to fail.
#
# @attr [RSpec::Core::Example] example the current example
# @see ExampleNotification
class PendingExampleFixedNotification < FailedExampleNotification
public_class_method :new
# Returns the examples description
#
# @return [String] The example description
def description
"#{example.full_description} FIXED"
end
# Returns the message generated for this failure line by line.
#
# @return [Array(String)] The example failure message
def message_lines
["Expected pending '#{example.execution_result.pending_message}' to fail. No Error was raised."]
end
# Returns the message generated for this failure colorized line by line.
#
# @param colorizer [#wrap] An object to colorize the message_lines by
# @return [Array(String)] The example failure message colorized
def colorized_message_lines(colorizer = ::RSpec::Core::Formatters::ConsoleCodes)
message_lines.map { |line| colorizer.wrap(line, RSpec.configuration.fixed_color) }
end
end
# The `GroupNotification` represents notifications sent by the reporter which
# contain information about the currently running (or soon to be) example group
# It is used by formatters to access information about that group.
#
# @example
# def example_group_started(notification)
# puts "Hey I started #{notification.group.description}"
# end
# @attr group [RSpec::Core::ExampleGroup] the current group
GroupNotification = Struct.new(:group)
# The `MessageNotification` encapsulates generic messages that the reporter
# sends to formatters.
#
# @attr message [String] the message
MessageNotification = Struct.new(:message)
# The `SeedNotification` holds the seed used to randomize examples and
# wether that seed has been used or not.
#
# @attr seed [Fixnum] the seed used to randomize ordering
# @attr used [Boolean] wether the seed has been used or not
SeedNotification = Struct.new(:seed, :used) do
# @api
# @return [Boolean] has the seed been used?
def seed_used?
!!used
end
private :used
end
# The `SummaryNotification` holds information about the results of running
# a test suite. It is used by formatters to provide information at the end
# of the test run.
#
# @attr duration [Float] the time taken (in seconds) to run the suite
# @attr examples [Array(RSpec::Core::Example)] the examples run
# @attr failed_examples [Array(RSpec::Core::Example)] the failed examples
# @attr pending_examples [Array(RSpec::Core::Example)] the pending examples
# @attr load_time [Float] the number of seconds taken to boot RSpec
# and load the spec files
SummaryNotification = Struct.new(:duration, :examples, :failed_examples, :pending_examples, :load_time) do
# @api
# @return [Fixnum] the number of examples run
def example_count
@example_count ||= examples.size
end
# @api
# @return [Fixnum] the number of failed examples
def failure_count
@failure_count ||= failed_examples.size
end
# @api
# @return [Fixnum] the number of pending examples
def pending_count
@pending_count ||= pending_examples.size
end
# @api
# @return [String] A line summarising the results of the spec run.
def summary_line
summary = Formatters::Helpers.pluralize(example_count, "example")
summary << ", " << Formatters::Helpers.pluralize(failure_count, "failure")
summary << ", #{pending_count} pending" if pending_count > 0
summary
end
# @api public
#
# Wraps the summary line with colors based on the configured
# colors for failure, pending, and success. Defaults to red,
# yellow, green accordingly.
#
# @param colorizer [#wrap] An object which supports wrapping text with
# specific colors.
# @return [String] A colorized summary line.
def colorized(colorizer = ::RSpec::Core::Formatters::ConsoleCodes)
if failure_count > 0
colorizer.wrap(summary_line, RSpec.configuration.failure_color)
elsif pending_count > 0
colorizer.wrap(summary_line, RSpec.configuration.pending_color)
else
colorizer.wrap(summary_line, RSpec.configuration.success_color)
end
end
# @api public
#
# Formats failures into a rerunable command format.
#
# @param colorizer [#wrap] An object which supports wrapping text with
# specific colors.
# @return [String] A colorized summary line.
def colorized_rerun_commands(colorizer = ::RSpec::Core::Formatters::ConsoleCodes)
"\nFailed examples:\n\n" +
failed_examples.map do |example|
colorizer.wrap("rspec #{example.location}", RSpec.configuration.failure_color) + " " +
colorizer.wrap("# #{example.full_description}", RSpec.configuration.detail_color)
end.join("\n")
end
# @return [String] a formatted version of the time it took to run the suite
def formatted_duration
Formatters::Helpers.format_duration(duration)
end
# @return [String] a formatted version of the time it took to boot RSpec and
# load the spec files
def formatted_load_time
Formatters::Helpers.format_duration(load_time)
end
end
# The `ProfileNotification` holds information about the results of running
# a test suite when profiling is enabled. It is used by formatters to provide
# information at the end of the test run for profiling information.
#
# @attr duration [Float] the time taken (in seconds) to run the suite
# @attr examples [Array(RSpec::Core::Example)] the examples run
# @attr number_of_examples [Fixnum] the number of examples to profile
ProfileNotification = Struct.new(:duration, :examples, :number_of_examples) do
# @return [Array(RSpec::Core::Example)] the slowest examples
def slowest_examples
@slowest_examples ||=
examples.sort_by do |example|
-example.execution_result.run_time
end.first(number_of_examples)
end
# @return [Float] the time taken (in seconds) to run the slowest examples
def slow_duration
@slow_duration ||=
slowest_examples.inject(0.0) do |i, e|
i + e.execution_result.run_time
end
end
# @return [String] the percentage of total time taken
def percentage
@percentage ||=
begin
time_taken = slow_duration / duration
'%.1f' % ((time_taken.nan? ? 0.0 : time_taken) * 100)
end
end
# @return [Array(RSpec::Core::Example)] the slowest example groups
def slowest_groups
@slowest_groups ||= calculate_slowest_groups
end
private
def calculate_slowest_groups
example_groups = {}
examples.each do |example|
location = example.example_group.parent_groups.last.metadata[:location]
location_hash = example_groups[location] ||= Hash.new(0)
location_hash[:total_time] += example.execution_result.run_time
location_hash[:count] += 1
unless location_hash.has_key?(:description)
location_hash[:description] = example.example_group.top_level_description
end
end
# stop if we've only one example group
return {} if example_groups.keys.length <= 1
example_groups.each_value do |hash|
hash[:average] = hash[:total_time].to_f / hash[:count]
end
example_groups.sort_by { |_, hash| -hash[:average] }.first(number_of_examples)
end
end
# The `DeprecationNotification` is issued by the reporter when a deprecated
# part of RSpec is encountered. It represents information about the deprecated
# call site.
#
# @attr message [String] A custom message about the deprecation
# @attr deprecated [String] A custom message about the deprecation (alias of message)
# @attr replacement [String] An optional replacement for the deprecation
# @attr call_site [String] An optional call site from which the deprecation was issued
DeprecationNotification = Struct.new(:deprecated, :message, :replacement, :call_site) do
private_class_method :new
# @api
# Convenience way to initialize the notification
def self.from_hash(data)
new data[:deprecated], data[:message], data[:replacement], data[:call_site]
end
end
# `NullNotification` represents a placeholder value for notifications that
# currently require no information, but we may wish to extend in future.
class NullNotification
end
end
end
| 1 | 13,161 | This is invalid syntax on 1.8. | rspec-rspec-core | rb |
@@ -408,10 +408,10 @@ M END
rxn = rdChemReactions.ReactionFromSmarts('[C:1]1[O:2][N:3]1>>[C:1][O:2].[N:3]')
r1 = rxn.GetReactantTemplate(0)
sma = Chem.MolToSmarts(r1)
- self.assertEqual(sma, '[C:1]1-,:[O:2]-,:[N:3]-,:1')
+ self.assertEqual(sma, '[C:1]1[O:2][N:3]1')
p1 = rxn.GetProductTemplate(0)
sma = Chem.MolToSmarts(p1)
- self.assertEqual(sma, '[C:1]-,:[O:2]')
+ self.assertEqual(sma, '[C:1][O:2]')
p2 = rxn.GetProductTemplate(1)
sma = Chem.MolToSmarts(p2) | 1 | # $Id$
#
# Copyright (c) 2007-2014, Novartis Institutes for BioMedical Research Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Novartis Institutes for BioMedical Research Inc.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import print_function
import unittest, doctest
import os, sys
from rdkit.six import exec_
from rdkit.six.moves import cPickle
from rdkit import rdBase
from rdkit import Chem
from rdkit.Chem import rdChemReactions
from rdkit import Geometry
from rdkit import RDConfig
from rdkit.Chem.SimpleEnum import Enumerator
def feq(v1, v2, tol2=1e-4):
return abs(v1 - v2) <= tol2
def ptEq(pt1, pt2, tol=1e-4):
return feq(pt1.x, pt2.x, tol) and feq(pt1.y, pt2.y, tol) and feq(pt1.z, pt2.z, tol)
# Boost functions are NOT found by doctest, this "fixes" them
# by adding the doctests to a fake module
import imp
TestPreprocess = imp.new_module("TestPreprocess")
code = """
from rdkit.Chem import rdChemReactions
def PreprocessReaction(*a, **kw):
'''%s
'''
return rdChemReactions.PreprocessReaction(*a, **kw)
""" % "\n".join([x.lstrip() for x in rdChemReactions.PreprocessReaction.__doc__.split("\n")])
exec_(code, TestPreprocess.__dict__)
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite(Enumerator))
tests.addTests(doctest.DocTestSuite(TestPreprocess))
return tests
class TestCase(unittest.TestCase):
def setUp(self):
self.dataDir = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'ChemReactions', 'testData')
def test1Basics(self):
rxna = rdChemReactions.ChemicalReaction()
# also tests empty copy constructor
for rxn in [rxna, rdChemReactions.ChemicalReaction(rxna)]:
self.assertTrue(rxn.GetNumReactantTemplates() == 0)
self.assertTrue(rxn.GetNumProductTemplates() == 0)
r1 = Chem.MolFromSmarts('[C:1](=[O:2])O')
rxn.AddReactantTemplate(r1)
self.assertTrue(rxn.GetNumReactantTemplates() == 1)
r1 = Chem.MolFromSmarts('[N:3]')
rxn.AddReactantTemplate(r1)
self.assertTrue(rxn.GetNumReactantTemplates() == 2)
r1 = Chem.MolFromSmarts('[C:1](=[O:2])[N:3]')
rxn.AddProductTemplate(r1)
self.assertTrue(rxn.GetNumProductTemplates() == 1)
reacts = (Chem.MolFromSmiles('C(=O)O'), Chem.MolFromSmiles('N'))
ps = rxn.RunReactants(reacts)
self.assertTrue(len(ps) == 1)
self.assertTrue(len(ps[0]) == 1)
self.assertTrue(ps[0][0].GetNumAtoms() == 3)
ps = rxn.RunReactants(list(reacts))
self.assertTrue(len(ps) == 1)
self.assertTrue(len(ps[0]) == 1)
self.assertTrue(ps[0][0].GetNumAtoms() == 3)
def test2DaylightParser(self):
rxna = rdChemReactions.ReactionFromSmarts('[C:1](=[O:2])O.[N:3]>>[C:1](=[O:2])[N:3]')
for rxn in [rxna, rdChemReactions.ChemicalReaction(rxna)]:
self.assertTrue(rxn)
self.assertTrue(rxn.GetNumReactantTemplates() == 2)
self.assertTrue(rxn.GetNumProductTemplates() == 1)
self.assertTrue(rxn._getImplicitPropertiesFlag())
reacts = (Chem.MolFromSmiles('C(=O)O'), Chem.MolFromSmiles('N'))
ps = rxn.RunReactants(reacts)
self.assertTrue(len(ps) == 1)
self.assertTrue(len(ps[0]) == 1)
self.assertTrue(ps[0][0].GetNumAtoms() == 3)
reacts = (Chem.MolFromSmiles('CC(=O)OC'), Chem.MolFromSmiles('CN'))
ps = rxn.RunReactants(reacts)
self.assertTrue(len(ps) == 1)
self.assertTrue(len(ps[0]) == 1)
self.assertTrue(ps[0][0].GetNumAtoms() == 5)
def test3MDLParsers(self):
fileN = os.path.join(self.dataDir, 'AmideBond.rxn')
rxna = rdChemReactions.ReactionFromRxnFile(fileN)
print("*" * 44)
print(fileN)
print(rxna)
for rxn in [rxna, rdChemReactions.ChemicalReaction(rxna)]:
self.assertTrue(rxn)
self.assertFalse(rxn._getImplicitPropertiesFlag())
self.assertTrue(rxn.GetNumReactantTemplates() == 2)
self.assertTrue(rxn.GetNumProductTemplates() == 1)
reacts = (Chem.MolFromSmiles('C(=O)O'), Chem.MolFromSmiles('N'))
ps = rxn.RunReactants(reacts)
self.assertTrue(len(ps) == 1)
self.assertTrue(len(ps[0]) == 1)
self.assertTrue(ps[0][0].GetNumAtoms() == 3)
with open(fileN, 'r') as rxnF:
rxnBlock = rxnF.read()
rxn = rdChemReactions.ReactionFromRxnBlock(rxnBlock)
self.assertTrue(rxn)
self.assertTrue(rxn.GetNumReactantTemplates() == 2)
self.assertTrue(rxn.GetNumProductTemplates() == 1)
reacts = (Chem.MolFromSmiles('C(=O)O'), Chem.MolFromSmiles('N'))
ps = rxn.RunReactants(reacts)
self.assertTrue(len(ps) == 1)
self.assertTrue(len(ps[0]) == 1)
self.assertTrue(ps[0][0].GetNumAtoms() == 3)
def test4ErrorHandling(self):
self.assertRaises(
ValueError,
lambda x='[C:1](=[O:2])Q.[N:3]>>[C:1](=[O:2])[N:3]': rdChemReactions.ReactionFromSmarts(x))
self.assertRaises(
ValueError,
lambda x='[C:1](=[O:2])O.[N:3]>>[C:1](=[O:2])[N:3]Q': rdChemReactions.ReactionFromSmarts(x))
self.assertRaises(
ValueError,
lambda x='[C:1](=[O:2])O.[N:3]>>[C:1](=[O:2])[N:3]>>CC': rdChemReactions.ReactionFromSmarts(x))
block = """$RXN
ISIS 082120061354
3 1
$MOL
-ISIS- 08210613542D
3 2 0 0 0 0 0 0 0 0999 V2000
-1.4340 -0.6042 0.0000 C 0 0 0 0 0 0 0 0 0 2 0 0
-0.8639 -0.9333 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
-1.4340 0.0542 0.0000 O 0 0 0 0 0 0 0 0 0 1 0 0
1 2 1 0 0 0 0
1 3 2 0 0 0 0
M END
$MOL
-ISIS- 08210613542D
1 0 0 0 0 0 0 0 0 0999 V2000
2.2125 -0.7833 0.0000 N 0 0 0 0 0 0 0 0 0 3 0 0
M END
$MOL
-ISIS- 08210613542D
3 2 0 0 0 0 0 0 0 0999 V2000
9.5282 -0.8083 0.0000 N 0 0 0 0 0 0 0 0 0 3 0 0
8.9579 -0.4792 0.0000 C 0 0 0 0 0 0 0 0 0 2 0 0
8.9579 0.1792 0.0000 O 0 0 0 0 0 0 0 0 0 1 0 0
1 2 1 0 0 0 0
2 3 2 0 0 0 0
M END
"""
self.assertRaises(ValueError, lambda x=block: rdChemReactions.ReactionFromRxnBlock(x))
block = """$RXN
ISIS 082120061354
2 1
$MOL
-ISIS- 08210613542D
4 2 0 0 0 0 0 0 0 0999 V2000
-1.4340 -0.6042 0.0000 C 0 0 0 0 0 0 0 0 0 2 0 0
-0.8639 -0.9333 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
-1.4340 0.0542 0.0000 O 0 0 0 0 0 0 0 0 0 1 0 0
1 2 1 0 0 0 0
1 3 2 0 0 0 0
M END
$MOL
-ISIS- 08210613542D
1 0 0 0 0 0 0 0 0 0999 V2000
2.2125 -0.7833 0.0000 N 0 0 0 0 0 0 0 0 0 3 0 0
M END
$MOL
-ISIS- 08210613542D
3 2 0 0 0 0 0 0 0 0999 V2000
9.5282 -0.8083 0.0000 N 0 0 0 0 0 0 0 0 0 3 0 0
8.9579 -0.4792 0.0000 C 0 0 0 0 0 0 0 0 0 2 0 0
8.9579 0.1792 0.0000 O 0 0 0 0 0 0 0 0 0 1 0 0
1 2 1 0 0 0 0
2 3 2 0 0 0 0
M END
"""
#self.assertRaises(ValueError,lambda x=block:rdChemReactions.ReactionFromRxnBlock(x))
block = """$RXN
ISIS 082120061354
2 1
$MOL
-ISIS- 08210613542D
3 2 0 0 0 0 0 0 0 0999 V2000
-1.4340 -0.6042 0.0000 C 0 0 0 0 0 0 0 0 0 2 0 0
-0.8639 -0.9333 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0
-1.4340 0.0542 0.0000 O 0 0 0 0 0 0 0 0 0 1 0 0
1 2 1 0 0 0 0
1 3 2 0 0 0 0
M END
$MOL
-ISIS- 08210613542D
1 0 0 0 0 0 0 0 0 0999 V2000
2.2125 -0.7833 0.0000 N 0 0 0 0 0 0 0 0 0 3 0 0
M END
$MOL
-ISIS- 08210613542D
3 1 0 0 0 0 0 0 0 0999 V2000
9.5282 -0.8083 0.0000 N 0 0 0 0 0 0 0 0 0 3 0 0
8.9579 -0.4792 0.0000 C 0 0 0 0 0 0 0 0 0 2 0 0
8.9579 0.1792 0.0000 O 0 0 0 0 0 0 0 0 0 1 0 0
1 2 1 0 0 0 0
2 3 2 0 0 0 0
M END
"""
#self.assertRaises(ValueError,lambda x=block:rdChemReactions.ReactionFromRxnBlock(x))
def test5Validation(self):
rxn = rdChemReactions.ReactionFromSmarts('[C:1](=[O:2])O.[N:3]>>[C:1](=[O:2])[N:3]')
self.assertTrue(rxn)
self.assertTrue(rxn.Validate() == (0, 0))
rxn = rdChemReactions.ReactionFromSmarts('[C:1](=[O:1])O.[N:3]>>[C:1](=[O:2])[N:3]')
self.assertTrue(rxn)
self.assertTrue(rxn.Validate() == (1, 1))
rxn = rdChemReactions.ReactionFromSmarts('[C:1](=[O:2])[O:4].[N:3]>>[C:1](=[O:2])[N:3]')
self.assertTrue(rxn)
self.assertTrue(rxn.Validate() == (1, 0))
rxn = rdChemReactions.ReactionFromSmarts('[C:1](=[O:2])O.[N:3]>>[C:1](=[O:2])[N:3][C:5]')
self.assertTrue(rxn)
self.assertTrue(rxn.Validate() == (1, 0))
def test6Exceptions(self):
rxn = rdChemReactions.ReactionFromSmarts('[C:1]Cl>>[C:1]')
self.assertTrue(rxn)
self.assertRaises(ValueError, lambda x=rxn: x.RunReactants(()))
self.assertRaises(
ValueError, lambda x=rxn: x.RunReactants((Chem.MolFromSmiles('CC'), Chem.MolFromSmiles('C'))))
ps = rxn.RunReactants((Chem.MolFromSmiles('CCCl'), ))
self.assertTrue(len(ps) == 1)
self.assertTrue(len(ps[0]) == 1)
def _test7Leak(self):
rxn = rdChemReactions.ReactionFromSmarts('[C:1]Cl>>[C:1]')
self.assertTrue(rxn)
print('running: ')
for i in range(1e5):
ps = rxn.RunReactants((Chem.MolFromSmiles('CCCl'), ))
self.assertTrue(len(ps) == 1)
self.assertTrue(len(ps[0]) == 1)
if not i % 1000:
print(i)
def test8Properties(self):
rxn = rdChemReactions.ReactionFromSmarts('[O:1]>>[O:1][3#0]')
self.assertTrue(rxn)
ps = rxn.RunReactants((Chem.MolFromSmiles('CO'), ))
self.assertTrue(len(ps) == 1)
self.assertTrue(len(ps[0]) == 1)
Chem.SanitizeMol(ps[0][0])
self.assertEqual(ps[0][0].GetAtomWithIdx(1).GetIsotope(), 3)
def test9AromaticityTransfer(self):
# this was issue 2664121
mol = Chem.MolFromSmiles('c1ccc(C2C3(Cc4c(cccc4)C2)CCCC3)cc1')
rxn = rdChemReactions.ReactionFromSmarts(
'[A:1]1~[*:2]~[*:3]~[*:4]~[*:5]~[A:6]-;@1>>[*:1]~[*:2]~[*:3]~[*:4]~[*:5]~[*:6]')
products = rxn.RunReactants([mol])
self.assertEqual(len(products), 6)
for p in products:
self.assertEqual(len(p), 1)
Chem.SanitizeMol(p[0])
def test10DotSeparation(self):
# 08/05/14
# This test is changed due to a new behavior of the smarts
# reaction parser which now allows using parenthesis in products
# as well. original smiles: '[C:1]1[O:2][N:3]1>>[C:1]1[O:2].[N:3]1'
rxn = rdChemReactions.ReactionFromSmarts('[C:1]1[O:2][N:3]1>>([C:1]1[O:2].[N:3]1)')
mol = Chem.MolFromSmiles('C1ON1')
products = rxn.RunReactants([mol])
self.assertEqual(len(products), 1)
for p in products:
self.assertEqual(len(p), 1)
self.assertEqual(p[0].GetNumAtoms(), 3)
self.assertEqual(p[0].GetNumBonds(), 2)
def test11ImplicitProperties(self):
rxn = rdChemReactions.ReactionFromSmarts('[C:1]O>>[C:1]')
mol = Chem.MolFromSmiles('CCO')
products = rxn.RunReactants([mol])
self.assertEqual(len(products), 1)
for p in products:
self.assertEqual(len(p), 1)
self.assertEqual(Chem.MolToSmiles(p[0]), 'CC')
mol2 = Chem.MolFromSmiles('C[CH-]O')
products = rxn.RunReactants([mol2])
self.assertEqual(len(products), 1)
for p in products:
self.assertEqual(len(p), 1)
self.assertEqual(Chem.MolToSmiles(p[0]), '[CH2-]C')
rxn._setImplicitPropertiesFlag(False)
products = rxn.RunReactants([mol])
self.assertEqual(len(products), 1)
for p in products:
self.assertEqual(len(p), 1)
self.assertEqual(Chem.MolToSmiles(p[0]), 'CC')
products = rxn.RunReactants([mol2])
self.assertEqual(len(products), 1)
for p in products:
self.assertEqual(len(p), 1)
self.assertEqual(Chem.MolToSmiles(p[0]), 'CC')
def test12Pickles(self):
# 08/05/14
# This test is changed due to a new behavior of the smarts
# reaction parser which now allows using parenthesis in products
# as well. original smiles: '[C:1]1[O:2][N:3]1>>[C:1]1[O:2].[N:3]1'
rxn = rdChemReactions.ReactionFromSmarts('[C:1]1[O:2][N:3]1>>([C:1]1[O:2].[N:3]1)')
pkl = cPickle.dumps(rxn)
rxn = cPickle.loads(pkl)
mol = Chem.MolFromSmiles('C1ON1')
products = rxn.RunReactants([mol])
self.assertEqual(len(products), 1)
for p in products:
self.assertEqual(len(p), 1)
self.assertEqual(p[0].GetNumAtoms(), 3)
self.assertEqual(p[0].GetNumBonds(), 2)
rxn = rdChemReactions.ChemicalReaction(rxn.ToBinary())
products = rxn.RunReactants([mol])
self.assertEqual(len(products), 1)
for p in products:
self.assertEqual(len(p), 1)
self.assertEqual(p[0].GetNumAtoms(), 3)
self.assertEqual(p[0].GetNumBonds(), 2)
def test13GetTemplates(self):
rxn = rdChemReactions.ReactionFromSmarts('[C:1]1[O:2][N:3]1>>[C:1][O:2].[N:3]')
r1 = rxn.GetReactantTemplate(0)
sma = Chem.MolToSmarts(r1)
self.assertEqual(sma, '[C:1]1-,:[O:2]-,:[N:3]-,:1')
p1 = rxn.GetProductTemplate(0)
sma = Chem.MolToSmarts(p1)
self.assertEqual(sma, '[C:1]-,:[O:2]')
p2 = rxn.GetProductTemplate(1)
sma = Chem.MolToSmarts(p2)
self.assertEqual(sma, '[N:3]')
self.assertRaises(ValueError, lambda: rxn.GetProductTemplate(2))
self.assertRaises(ValueError, lambda: rxn.GetReactantTemplate(1))
def test14Matchers(self):
rxn = rdChemReactions.ReactionFromSmarts(
'[C;!$(C(-O)-O):1](=[O:2])[O;H,-1].[N;!H0:3]>>[C:1](=[O:2])[N:3]')
self.assertTrue(rxn)
rxn.Initialize()
self.assertTrue(rxn.IsMoleculeReactant(Chem.MolFromSmiles('OC(=O)C')))
self.assertFalse(rxn.IsMoleculeReactant(Chem.MolFromSmiles('OC(=O)O')))
self.assertTrue(rxn.IsMoleculeReactant(Chem.MolFromSmiles('CNC')))
self.assertFalse(rxn.IsMoleculeReactant(Chem.MolFromSmiles('CN(C)C')))
self.assertTrue(rxn.IsMoleculeProduct(Chem.MolFromSmiles('NC(=O)C')))
self.assertTrue(rxn.IsMoleculeProduct(Chem.MolFromSmiles('CNC(=O)C')))
self.assertFalse(rxn.IsMoleculeProduct(Chem.MolFromSmiles('COC(=O)C')))
def test15Replacements(self):
rxn = rdChemReactions.ReactionFromSmarts(
'[{amine}:1]>>[*:1]-C',
replacements={'{amine}': '$([N;!H0;$(N-[#6]);!$(N-[!#6;!#1]);!$(N-C=[O,N,S])])'})
self.assertTrue(rxn)
rxn.Initialize()
reactants = (Chem.MolFromSmiles('CCN'), )
ps = rxn.RunReactants(reactants)
self.assertEqual(len(ps), 1)
self.assertEqual(len(ps[0]), 1)
self.assertEqual(ps[0][0].GetNumAtoms(), 4)
def test16GetReactingAtoms(self):
rxn = rdChemReactions.ReactionFromSmarts("[O:1][C:2].[N:3]>>[N:1][C:2].[N:3]")
self.assertTrue(rxn)
rxn.Initialize()
rAs = rxn.GetReactingAtoms()
self.assertEqual(len(rAs), 2)
self.assertEqual(len(rAs[0]), 1)
self.assertEqual(len(rAs[1]), 0)
rxn = rdChemReactions.ReactionFromSmarts("[O:1]C>>[O:1]C")
self.assertTrue(rxn)
rxn.Initialize()
rAs = rxn.GetReactingAtoms()
self.assertEqual(len(rAs), 1)
self.assertEqual(len(rAs[0]), 2)
rAs = rxn.GetReactingAtoms(True)
self.assertEqual(len(rAs), 1)
self.assertEqual(len(rAs[0]), 1)
def test17AddRecursiveQueriesToReaction(self):
rxn = rdChemReactions.ReactionFromSmarts("[C:1][O:2].[N:3]>>[C:1][N:2]")
self.assertTrue(rxn)
rxn.Initialize()
qs = {'aliphatic': Chem.MolFromSmiles('CC')}
rxn.GetReactantTemplate(0).GetAtomWithIdx(0).SetProp('query', 'aliphatic')
rxn.AddRecursiveQueriesToReaction(qs, 'query')
q = rxn.GetReactantTemplate(0)
m = Chem.MolFromSmiles('CCOC')
self.assertTrue(m.HasSubstructMatch(q))
m = Chem.MolFromSmiles('CO')
self.assertFalse(m.HasSubstructMatch(q))
rxn = rdChemReactions.ReactionFromSmarts("[C:1][O:2].[N:3]>>[C:1][N:2]")
rxn.Initialize()
rxn.GetReactantTemplate(0).GetAtomWithIdx(0).SetProp('query', 'aliphatic')
labels = rxn.AddRecursiveQueriesToReaction(qs, 'query', getLabels=True)
self.assertTrue(len(labels), 1)
def test17bAddRecursiveQueriesToReaction(self):
from rdkit.Chem import FilterCatalog
rxn = rdChemReactions.ReactionFromSmarts("[C:1][O:2].[N:3]>>[C:1][N:2]")
self.assertTrue(rxn)
rxn.Initialize()
rxn.GetReactantTemplate(0).GetAtomWithIdx(0).SetProp('query', 'carboxylicacid')
querydefs = {k.lower(): v
for k, v in FilterCatalog.GetFlattenedFunctionalGroupHierarchy().items()}
self.assertTrue('CarboxylicAcid' in FilterCatalog.GetFlattenedFunctionalGroupHierarchy())
rxn.AddRecursiveQueriesToReaction(querydefs, 'query')
q = rxn.GetReactantTemplate(0)
m = Chem.MolFromSmiles('C(=O)[O-].N')
self.assertTrue(m.HasSubstructMatch(q))
m = Chem.MolFromSmiles('C.N')
self.assertFalse(m.HasSubstructMatch(q))
def test18GithubIssue16(self):
rxn = rdChemReactions.ReactionFromSmarts("[F:1]>>[Cl:1]")
self.assertTrue(rxn)
rxn.Initialize()
self.assertRaises(ValueError, lambda: rxn.RunReactants((None, )))
def test19RemoveUnmappedMoleculesToAgents(self):
rxn = rdChemReactions.ReactionFromSmarts(
"[C:1]=[O:2].[N:3].C(=O)O>[OH2].[Na].[Cl]>[N:3]~[C:1]=[O:2]")
self.failUnless(rxn)
rxn.Initialize()
self.failUnless(rxn.GetNumReactantTemplates() == 3)
self.failUnless(rxn.GetNumProductTemplates() == 1)
self.failUnless(rxn.GetNumAgentTemplates() == 3)
rxn.RemoveUnmappedReactantTemplates()
rxn.RemoveUnmappedProductTemplates()
self.failUnless(rxn.GetNumReactantTemplates() == 2)
self.failUnless(rxn.GetNumProductTemplates() == 1)
self.failUnless(rxn.GetNumAgentTemplates() == 4)
rxn = rdChemReactions.ReactionFromSmarts("[C:1]=[O:2].[N:3].C(=O)O>>[N:3]~[C:1]=[O:2].[OH2]")
self.failUnless(rxn)
rxn.Initialize()
self.failUnless(rxn.GetNumReactantTemplates() == 3)
self.failUnless(rxn.GetNumProductTemplates() == 2)
self.failUnless(rxn.GetNumAgentTemplates() == 0)
agentList = []
rxn.RemoveUnmappedReactantTemplates(moveToAgentTemplates=False, targetList=agentList)
rxn.RemoveUnmappedProductTemplates(targetList=agentList)
self.failUnless(rxn.GetNumReactantTemplates() == 2)
self.failUnless(rxn.GetNumProductTemplates() == 1)
self.failUnless(rxn.GetNumAgentTemplates() == 1)
self.failUnless(len(agentList) == 2)
def test20CheckCopyConstructedReactionAtomProps(self):
RLABEL = "_MolFileRLabel"
amine_rxn = '$RXN\n\n ISIS 090220091541\n\n 2 1\n$MOL\n\n -ISIS- 09020915412D\n\n 3 2 0 0 0 0 0 0 0 0999 V2000\n -2.9083 -0.4708 0.0000 R# 0 0 0 0 0 0 0 0 0 1 0 0\n -2.3995 -0.1771 0.0000 C 0 0 0 0 0 0 0 0 0 2 0 0\n -2.4042 0.4125 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0\n 1 2 1 0 0 0 0\n 2 3 2 0 0 0 0\nV 2 aldehyde\nM RGP 1 1 1\nM END\n$MOL\n\n -ISIS- 09020915412D\n\n 2 1 0 0 0 0 0 0 0 0999 V2000\n 2.8375 -0.2500 0.0000 R# 0 0 0 0 0 0 0 0 0 3 0 0\n 3.3463 0.0438 0.0000 N 0 0 0 0 0 0 0 0 0 4 0 0\n 1 2 1 0 0 0 0\nV 2 amine\nM RGP 1 1 2\nM END\n$MOL\n\n -ISIS- 09020915412D\n\n 4 3 0 0 0 0 0 0 0 0999 V2000\n 13.3088 0.9436 0.0000 C 0 0 0 0 0 0 0 0 0 2 0 0\n 13.8206 1.2321 0.0000 R# 0 0 0 0 0 0 0 0 0 1 0 0\n 13.3028 0.3561 0.0000 N 0 0 0 0 0 0 0 0 0 4 0 0\n 12.7911 0.0676 0.0000 R# 0 0 0 0 0 0 0 0 0 3 0 0\n 1 3 1 0 0 0 0\n 1 2 1 0 0 0 0\n 3 4 1 0 0 0 0\nM RGP 2 2 1 4 2\nM END\n'
rxn = rdChemReactions.ReactionFromRxnBlock(amine_rxn)
res = []
for atom in rxn.GetReactantTemplate(0).GetAtoms():
if atom.HasProp(RLABEL):
res.append((atom.GetIdx(), atom.GetProp(RLABEL)))
rxn2 = rdChemReactions.ChemicalReaction(rxn)
res2 = []
for atom in rxn2.GetReactantTemplate(0).GetAtoms():
if atom.HasProp(RLABEL):
res2.append((atom.GetIdx(), atom.GetProp(RLABEL)))
self.assertEquals(res, res2)
# currently ToBinary does not save atom props
# rxn2 = rdChemReactions.ChemicalReaction(rxn.ToBinary())
def test21CheckRawIters(self):
RLABEL = "_MolFileRLabel"
amine_rxn = '$RXN\n\n ISIS 090220091541\n\n 2 1\n$MOL\n\n -ISIS- 09020915412D\n\n 3 2 0 0 0 0 0 0 0 0999 V2000\n -2.9083 -0.4708 0.0000 R# 0 0 0 0 0 0 0 0 0 1 0 0\n -2.3995 -0.1771 0.0000 C 0 0 0 0 0 0 0 0 0 2 0 0\n -2.4042 0.4125 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0\n 1 2 1 0 0 0 0\n 2 3 2 0 0 0 0\nV 2 aldehyde\nM RGP 1 1 1\nM END\n$MOL\n\n -ISIS- 09020915412D\n\n 2 1 0 0 0 0 0 0 0 0999 V2000\n 2.8375 -0.2500 0.0000 R# 0 0 0 0 0 0 0 0 0 3 0 0\n 3.3463 0.0438 0.0000 N 0 0 0 0 0 0 0 0 0 4 0 0\n 1 2 1 0 0 0 0\nV 2 amine\nM RGP 1 1 2\nM END\n$MOL\n\n -ISIS- 09020915412D\n\n 4 3 0 0 0 0 0 0 0 0999 V2000\n 13.3088 0.9436 0.0000 C 0 0 0 0 0 0 0 0 0 2 0 0\n 13.8206 1.2321 0.0000 R# 0 0 0 0 0 0 0 0 0 1 0 0\n 13.3028 0.3561 0.0000 N 0 0 0 0 0 0 0 0 0 4 0 0\n 12.7911 0.0676 0.0000 R# 0 0 0 0 0 0 0 0 0 3 0 0\n 1 3 1 0 0 0 0\n 1 2 1 0 0 0 0\n 3 4 1 0 0 0 0\nM RGP 2 2 1 4 2\nM END\n'
rxn = rdChemReactions.ReactionFromRxnBlock(amine_rxn)
reactants = rxn.GetReactants()
self.assertEquals(len(reactants), rxn.GetNumReactantTemplates())
products = rxn.GetProducts()
self.assertEquals(len(products), rxn.GetNumProductTemplates())
agents = rxn.GetAgents()
self.assertEquals(len(agents), rxn.GetNumAgentTemplates())
for i in range(rxn.GetNumReactantTemplates()):
p = rxn.GetReactantTemplate(i)
mb1 = Chem.MolToMolBlock(p)
mb2 = Chem.MolToMolBlock(reactants[i])
self.assertEquals(mb1, mb2)
def test22RunSingleReactant(self):
# from
# A Collection of Robust Organic Synthesis Reactions for In Silico Molecule Design
# Markus Hartenfeller,*, Martin Eberle, Peter Meier, Cristina Nieto-Oberhuber,
# Karl-Heinz Altmann, Gisbert Schneider, Edgar Jacoby, and Steffen Renner
# Novartis Institutes for BioMedical Research, Novartis Pharma AG, Forum 1,
# Novartis Campus, CH-4056 Basel, Switzerland Swiss Federal Institute of Technology (ETH)
# Zurich, Switzerland
smirks_thiourea = "[N;$(N-[#6]):3]=[C;$(C=S):1].[N;$(N[#6]);!$(N=*);!$([N-]);!$(N#*);!$([ND3]);!$([ND4]);!$(N[O,N]);!$(N[C,S]=[S,O,N]):2]>>[N:3]-[C:1]-[N+0:2]"
rxn = rdChemReactions.ReactionFromSmarts(smirks_thiourea)
reagents = [Chem.MolFromSmiles(x) for x in ['C=CCN=C=S', 'NCc1ncc(Cl)cc1Br']]
res = rxn.RunReactants(reagents)
self.assertTrue(res)
expected_result = [Chem.MolToSmiles(Chem.MolFromSmiles("C=CCNC(N)=S"))]
expected_result.sort()
sidechains_expected_result = [Chem.MolToSmiles(
Chem.MolFromSmiles("[*:1]=S.[*:3]CC=C"), isomericSmiles=True)]
sidechains_nodummy_expected_result = [[0, [3, ], [1, ]], [3, [1, ], [2, ]]]
sidechains_nodummy = []
sidechains_expected_result.sort()
for addDummy in [True, False]:
res = rxn.RunReactant(reagents[0], 0)
assert res
result = []
sidechains = []
for match in res:
for mol in match:
result.append(Chem.MolToSmiles(mol, isomericSmiles=True))
sidechain = rdChemReactions.ReduceProductToSideChains(mol, addDummy)
sidechains.append(Chem.MolToSmiles(sidechain, isomericSmiles=True))
if not addDummy:
for atom in sidechain.GetAtoms():
if atom.HasProp("_rgroupAtomMaps"):
sidechains_nodummy.append([atom.GetIdx(),
eval(atom.GetProp("_rgroupAtomMaps")),
eval(atom.GetProp("_rgroupBonds")), ])
result.sort()
sidechains.sort()
if addDummy:
self.assertEquals(result, expected_result)
self.assertEquals(sidechains, sidechains_expected_result)
else:
self.assertEquals(sidechains_nodummy, sidechains_nodummy_expected_result)
expected_result = [Chem.MolToSmiles(Chem.MolFromSmiles("NCNCc1ncc(Cl)cc1Br"))]
expected_result.sort()
sidechains_expected_result = [Chem.MolToSmiles(
Chem.MolFromSmiles("[*:2]Cc1ncc(Cl)cc1Br"), isomericSmiles=True)]
sidechains_expected_result.sort()
res = rxn.RunReactant(reagents[1], 1)
result = []
sidechains = []
for match in res:
for mol in match:
result.append(Chem.MolToSmiles(mol, isomericSmiles=True))
sidechains.append(
Chem.MolToSmiles(rdChemReactions.ReduceProductToSideChains(mol), isomericSmiles=True))
result.sort()
self.assertEquals(result, expected_result)
self.assertEquals(sidechains, sidechains_expected_result)
self.assertFalse(rxn.RunReactant(reagents[0], 1))
self.assertFalse(rxn.RunReactant(reagents[1], 0))
# try a broken ring based side-chain
sidechains_expected_result = ['c1ccc2c(c1)nc1n2CC[*:2]1']
reactant = Chem.MolFromSmiles('c1ccc2c(c1)nc1n2CCN1')
res = rxn.RunReactant(reactant, 1)
result = []
sidechains = []
for match in res:
for mol in match:
result.append(Chem.MolToSmiles(mol, isomericSmiles=True))
sidechains.append(
Chem.MolToSmiles(rdChemReactions.ReduceProductToSideChains(mol), isomericSmiles=True))
sidechain = rdChemReactions.ReduceProductToSideChains(mol, addDummyAtoms=False)
self.assertEquals(sidechains, sidechains_expected_result)
def test23CheckNonProduct(self):
smirks_thiourea = "[N;$(N-[#6]):3]=[C;$(C=S):1].[N;$(N[#6]);!$(N=*);!$([N-]);!$(N#*);!$([ND3]);!$([ND4]);!$(N[O,N]);!$(N[C,S]=[S,O,N]):2]>>[N:3]-[C:1]-[N+0:2]"
rxn = rdChemReactions.ReactionFromSmarts(smirks_thiourea)
mol = Chem.MolFromSmiles("CCCCCCCC")
m = rdChemReactions.ReduceProductToSideChains(mol)
self.assertTrue(m.GetNumAtoms() == 0)
mol = Chem.AddHs(mol)
m = rdChemReactions.ReduceProductToSideChains(mol)
self.assertTrue(m.GetNumAtoms() == 0)
def testPreprocess(self):
testFile = os.path.join(RDConfig.RDCodeDir, 'Chem', 'SimpleEnum', 'test_data', 'boronic1.rxn')
rxn = rdChemReactions.ReactionFromRxnFile(testFile)
rxn.Initialize()
res = rdChemReactions.PreprocessReaction(rxn)
self.assertEquals(res, (0, 0, 2, 1, (((0, 'halogen.bromine.aromatic'), ), (
(1, 'boronicacid'), ))))
def testProperties(self):
smirks_thiourea = "[N;$(N-[#6]):3]=[C;$(C=S):1].[N;$(N[#6]);!$(N=*);!$([N-]);!$(N#*);!$([ND3]);!$([ND4]);!$(N[O,N]);!$(N[C,S]=[S,O,N]):2]>>[N:3]-[C:1]-[N+0:2]"
rxn = rdChemReactions.ReactionFromSmarts(smirks_thiourea)
self.assertFalse(rxn.HasProp("fooprop"))
rxn.SetProp("fooprop","bar",computed=True)
rxn.SetIntProp("intprop",3)
self.assertTrue(rxn.HasProp("fooprop"))
self.assertTrue(rxn.HasProp("intprop"))
self.assertEquals(rxn.GetIntProp("intprop"),3)
nrxn = rdChemReactions.ChemicalReaction(rxn.ToBinary())
self.assertFalse(nrxn.HasProp("fooprop"))
nrxn = rdChemReactions.ChemicalReaction(rxn.ToBinary(Chem.PropertyPickleOptions.AllProps))
self.assertTrue(nrxn.HasProp("fooprop"))
nrxn.ClearComputedProps()
self.assertFalse(nrxn.HasProp("fooprop"))
self.assertTrue(nrxn.HasProp("intprop"))
self.assertEquals(nrxn.GetIntProp("intprop"),3)
if __name__ == '__main__':
unittest.main(verbosity=True)
| 1 | 18,325 | This isn't part of the PR, but I can't find in the smarts definition that "[C][C]" == "[C]-,:[C]" There is a line saying essentially not to specify undefined items (but that's a bracket versus non bracket thing). | rdkit-rdkit | cpp |
@@ -77,14 +77,14 @@ var (
// UnsignedMessage is an exchange of information between two actors modeled
// as a function call.
-// Messages are the equivalent of transactions in Ethereum.
type UnsignedMessage struct {
+ _ struct{} `cbor:",toarray"`
To address.Address `json:"to"`
From address.Address `json:"from"`
// When receiving a message from a user account the nonce in
// the message must match the expected nonce in the from actor.
// This prevents replay attacks.
- CallSeqNum Uint64 `json:"callSeqNum"`
+ CallSeqNum uint64 `json:"callSeqNum"`
Value AttoFIL `json:"value"`
| 1 | package types
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"math/big"
"github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-amt-ipld/v2"
"github.com/filecoin-project/specs-actors/actors/abi"
specsbig "github.com/filecoin-project/specs-actors/actors/abi/big"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-datastore"
blockstore "github.com/ipfs/go-ipfs-blockstore"
cbor "github.com/ipfs/go-ipld-cbor"
ipld "github.com/ipfs/go-ipld-format"
errPkg "github.com/pkg/errors"
"github.com/filecoin-project/go-filecoin/internal/pkg/cborutil"
e "github.com/filecoin-project/go-filecoin/internal/pkg/enccid"
"github.com/filecoin-project/go-filecoin/internal/pkg/encoding"
typegen "github.com/whyrusleeping/cbor-gen"
)
// MethodID is an identifier of a method (in an actor).
type MethodID Uint64
const (
// InvalidMethodID is the value of an invalid method id.
// Note: this is not in the spec
InvalidMethodID = MethodID(0xFFFFFFFFFFFFFFFF)
// SendMethodID is the method ID for sending money to an actor.
SendMethodID = MethodID(0)
// ConstructorMethodID is the method ID used to initialize an actor's state.
ConstructorMethodID = MethodID(1)
)
// GasUnits represents number of units of gas consumed
type GasUnits = Uint64
// ZeroGas is the zero value for Gas.
const ZeroGas = GasUnits(0)
// BlockGasLimit is the maximum amount of gas that can be used to execute messages in a single block
var BlockGasLimit = NewGasUnits(10000000)
// EmptyMessagesCID is the cid of an empty collection of messages.
var EmptyMessagesCID cid.Cid
// EmptyReceiptsCID is the cid of an empty collection of receipts.
var EmptyReceiptsCID cid.Cid
// EmptyTxMetaCID is the cid of a TxMeta wrapping empty cids
var EmptyTxMetaCID cid.Cid
func init() {
tmpCst := cborutil.NewIpldStore(blockstore.NewBlockstore(datastore.NewMapDatastore()))
emptyAMTCid, err := amt.FromArray(context.Background(), tmpCst, []typegen.CBORMarshaler{})
if err != nil {
panic("could not create CID for empty AMT")
}
EmptyMessagesCID = emptyAMTCid
EmptyReceiptsCID = emptyAMTCid
EmptyTxMetaCID, err = tmpCst.Put(context.Background(), TxMeta{SecpRoot: e.NewCid(EmptyMessagesCID), BLSRoot: e.NewCid(EmptyMessagesCID)})
if err != nil {
panic("could not create CID for empty TxMeta")
}
}
var (
// ErrInvalidMessageLength is returned when the message length does not match the expected length.
ErrInvalidMessageLength = errors.New("invalid message length")
)
// UnsignedMessage is an exchange of information between two actors modeled
// as a function call.
// Messages are the equivalent of transactions in Ethereum.
type UnsignedMessage struct {
To address.Address `json:"to"`
From address.Address `json:"from"`
// When receiving a message from a user account the nonce in
// the message must match the expected nonce in the from actor.
// This prevents replay attacks.
CallSeqNum Uint64 `json:"callSeqNum"`
Value AttoFIL `json:"value"`
Method MethodID `json:"method"`
Params []byte `json:"params"`
GasPrice AttoFIL `json:"gasPrice"`
GasLimit GasUnits `json:"gasLimit"`
// Pay attention to Equals() if updating this struct.
}
// NewUnsignedMessage creates a new message.
func NewUnsignedMessage(from, to address.Address, nonce uint64, value AttoFIL, method MethodID, params []byte) *UnsignedMessage {
return &UnsignedMessage{
From: from,
To: to,
CallSeqNum: Uint64(nonce),
Value: value,
Method: method,
Params: params,
}
}
// NewMeteredMessage adds gas price and gas limit to the message
func NewMeteredMessage(from, to address.Address, nonce uint64, value AttoFIL, method MethodID, params []byte, price AttoFIL, limit GasUnits) *UnsignedMessage {
return &UnsignedMessage{
From: from,
To: to,
CallSeqNum: Uint64(nonce),
Value: value,
Method: method,
Params: params,
GasPrice: price,
GasLimit: limit,
}
}
// Unmarshal a message from the given bytes.
func (msg *UnsignedMessage) Unmarshal(b []byte) error {
return encoding.Decode(b, msg)
}
// Marshal the message into bytes.
func (msg *UnsignedMessage) Marshal() ([]byte, error) {
return encoding.Encode(msg)
}
// ToNode converts the Message to an IPLD node.
func (msg *UnsignedMessage) ToNode() (ipld.Node, error) {
// Use 32 byte / 256 bit digest.
obj, err := cbor.WrapObject(msg, DefaultHashFunction, -1)
if err != nil {
return nil, err
}
return obj, nil
}
// Cid returns the canonical CID for the message.
// TODO: can we avoid returning an error?
func (msg *UnsignedMessage) Cid() (cid.Cid, error) {
obj, err := msg.ToNode()
if err != nil {
return cid.Undef, errPkg.Wrap(err, "failed to marshal to cbor")
}
return obj.Cid(), nil
}
// OnChainLen returns the amount of bytes used to represent the message on chain.
func (msg *UnsignedMessage) OnChainLen() uint32 {
panic("byteme")
}
func (msg *UnsignedMessage) String() string {
errStr := "(error encoding Message)"
cid, err := msg.Cid()
if err != nil {
return errStr
}
js, err := json.MarshalIndent(msg, "", " ")
if err != nil {
return errStr
}
return fmt.Sprintf("Message cid=[%v]: %s", cid, string(js))
}
// Equals tests whether two messages are equal
func (msg *UnsignedMessage) Equals(other *UnsignedMessage) bool {
return msg.To == other.To &&
msg.From == other.From &&
msg.CallSeqNum == other.CallSeqNum &&
msg.Value.Equal(other.Value) &&
msg.Method == other.Method &&
msg.GasPrice.Equal(other.GasPrice) &&
msg.GasLimit == other.GasLimit &&
bytes.Equal(msg.Params, other.Params)
}
// NewGasPrice constructs a gas price (in AttoFIL) from the given number.
func NewGasPrice(price int64) AttoFIL {
return NewAttoFIL(big.NewInt(price))
}
// NewGasUnits constructs a new GasUnits from the given number.
func NewGasUnits(cost uint64) GasUnits {
return Uint64(cost)
}
// TxMeta tracks the merkleroots of both secp and bls messages separately
type TxMeta struct {
_ struct{} `cbor:",toarray"`
SecpRoot e.Cid `json:"secpRoot"`
BLSRoot e.Cid `json:"blsRoot"`
}
// String returns a readable printing string of TxMeta
func (m TxMeta) String() string {
return fmt.Sprintf("secp: %s, bls: %s", m.SecpRoot.String(), m.BLSRoot.String())
}
// String returns a readable string.
func (id MethodID) String() string {
return fmt.Sprintf("%v", (uint64)(id))
}
// Cost returns the cost of the gas given the price.
func (x GasUnits) Cost(price abi.TokenAmount) abi.TokenAmount {
// turn the gas into a bigint
bigx := abi.NewTokenAmount((int64)(x))
// cost = gas * price
return specsbig.Mul(bigx, price)
}
| 1 | 22,851 | Is this required for tuple encoding? This is confusing. | filecoin-project-venus | go |
@@ -112,6 +112,7 @@ public class FlowRunnerManager implements EventListener,
private final ExecutorLoader executorLoader;
private final ProjectLoader projectLoader;
private final JobTypeManager jobtypeManager;
+ private final FlowPreparer flowPreparer;
private final Props azkabanProps;
private final File executionDirectory; | 1 | /*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.execapp;
import java.io.File;
import java.io.FilenameFilter;
import java.io.IOException;
import java.lang.Thread.State;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.TimeUnit;
import org.apache.commons.io.FileUtils;
import org.apache.log4j.Logger;
import azkaban.event.Event;
import azkaban.event.EventListener;
import azkaban.execapp.event.FlowWatcher;
import azkaban.execapp.event.LocalFlowWatcher;
import azkaban.execapp.event.RemoteFlowWatcher;
import azkaban.execapp.metric.NumFailedFlowMetric;
import azkaban.executor.ExecutableFlow;
import azkaban.executor.ExecutionOptions;
import azkaban.executor.ExecutorLoader;
import azkaban.executor.ExecutorManagerException;
import azkaban.jobtype.JobTypeManager;
import azkaban.jobtype.JobTypeManagerException;
import azkaban.metric.MetricReportManager;
import azkaban.project.ProjectLoader;
import azkaban.project.ProjectWhitelist;
import azkaban.project.ProjectWhitelist.WhitelistType;
import azkaban.utils.FileIOUtils;
import azkaban.utils.FileIOUtils.JobMetaData;
import azkaban.utils.FileIOUtils.LogData;
import azkaban.utils.JSONUtils;
import azkaban.utils.Pair;
import azkaban.utils.Props;
import azkaban.utils.ThreadPoolExecutingListener;
import azkaban.utils.TrackingThreadPool;
/**
* Execution manager for the server side execution.
*
* When a flow is submitted to FlowRunnerManager, it is the
* {@link Status.PREPARING} status. When a flow is about to be executed by
* FlowRunner, its status is updated to {@link Status.RUNNING}
*
* Two main data structures are used in this class to maintain flows.
*
* runningFlows: this is used as a bookkeeping for submitted flows in
* FlowRunnerManager. It has nothing to do with the executor service that is
* used to execute the flows. This bookkeeping is used at the time of canceling
* or killing a flow. The flows in this data structure is removed in the
* handleEvent method.
*
* submittedFlows: this is used to keep track the execution of the flows, so it
* has the mapping between a Future<?> and an execution id. This would allow us
* to find out the execution ids of the flows that are in the Status.PREPARING
* status. The entries in this map is removed once the flow execution is
* completed.
*
*
*/
public class FlowRunnerManager implements EventListener,
ThreadPoolExecutingListener {
private static final Logger logger = Logger.getLogger(FlowRunnerManager.class);
private static final String EXECUTOR_USE_BOUNDED_THREADPOOL_QUEUE = "executor.use.bounded.threadpool.queue";
private static final String EXECUTOR_THREADPOOL_WORKQUEUE_SIZE = "executor.threadpool.workqueue.size";
private static final String EXECUTOR_FLOW_THREADS = "executor.flow.threads";
private static final String FLOW_NUM_JOB_THREADS = "flow.num.job.threads";
// recently finished secs to clean up. 1 minute
private static final int RECENTLY_FINISHED_TIME_TO_LIVE = 60 * 1000;
private static final int DEFAULT_NUM_EXECUTING_FLOWS = 30;
private static final int DEFAULT_FLOW_NUM_JOB_TREADS = 10;
// this map is used to store the flows that have been submitted to
// the executor service. Once a flow has been submitted, it is either
// in the queue waiting to be executed or in executing state.
private final Map<Future<?>, Integer> submittedFlows = new ConcurrentHashMap<>();
private final Map<Integer, FlowRunner> runningFlows = new ConcurrentHashMap<>();
private final Map<Integer, ExecutableFlow> recentlyFinishedFlows = new ConcurrentHashMap<>();
private final Map<Pair<Integer, Integer>, ProjectVersion> installedProjects;
private final TrackingThreadPool executorService;
private final CleanerThread cleanerThread;
private final ExecutorLoader executorLoader;
private final ProjectLoader projectLoader;
private final JobTypeManager jobtypeManager;
private final Props azkabanProps;
private final File executionDirectory;
private final File projectDirectory;
private final Object executionDirDeletionSync = new Object();
private int numThreads = DEFAULT_NUM_EXECUTING_FLOWS;
private int threadPoolQueueSize = -1;
private int numJobThreadPerFlow = DEFAULT_FLOW_NUM_JOB_TREADS;
private Props globalProps;
private long lastCleanerThreadCheckTime = -1;
private long executionDirRetention = 1 * 24 * 60 * 60 * 1000; // 1 Day
// We want to limit the log sizes to about 20 megs
private String jobLogChunkSize = "5MB";
private int jobLogNumFiles = 4;
// If true, jobs will validate proxy user against a list of valid proxy users.
private boolean validateProxyUser = false;
// date time of the the last flow submitted.
private long lastFlowSubmittedDate = 0;
// whether the current executor is active
private volatile boolean isExecutorActive = false;
public FlowRunnerManager(Props props, ExecutorLoader executorLoader,
ProjectLoader projectLoader, ClassLoader parentClassLoader) throws IOException {
azkabanProps = props;
// JobWrappingFactory.init(props, getClass().getClassLoader());
executionDirRetention = props.getLong("execution.dir.retention", executionDirRetention);
logger.info("Execution dir retention set to " + executionDirRetention + " ms");
executionDirectory = new File(props.getString("azkaban.execution.dir", "executions"));
if (!executionDirectory.exists()) {
executionDirectory.mkdirs();
}
projectDirectory = new File(props.getString("azkaban.project.dir", "projects"));
if (!projectDirectory.exists()) {
projectDirectory.mkdirs();
}
installedProjects = loadExistingProjects();
// azkaban.temp.dir
numThreads = props.getInt(EXECUTOR_FLOW_THREADS, DEFAULT_NUM_EXECUTING_FLOWS);
numJobThreadPerFlow = props.getInt(FLOW_NUM_JOB_THREADS, DEFAULT_FLOW_NUM_JOB_TREADS);
executorService = createExecutorService(numThreads);
this.executorLoader = executorLoader;
this.projectLoader = projectLoader;
this.jobLogChunkSize = azkabanProps.getString("job.log.chunk.size", "5MB");
this.jobLogNumFiles = azkabanProps.getInt("job.log.backup.index", 4);
this.validateProxyUser = azkabanProps.getBoolean("proxy.user.lock.down", false);
cleanerThread = new CleanerThread();
cleanerThread.start();
String globalPropsPath = props.getString("executor.global.properties", null);
if (globalPropsPath != null) {
globalProps = new Props(null, globalPropsPath);
}
jobtypeManager =
new JobTypeManager(props.getString(
AzkabanExecutorServer.JOBTYPE_PLUGIN_DIR,
JobTypeManager.DEFAULT_JOBTYPEPLUGINDIR), globalProps,
parentClassLoader);
}
private TrackingThreadPool createExecutorService(int nThreads) {
boolean useNewThreadPool =
azkabanProps.getBoolean(EXECUTOR_USE_BOUNDED_THREADPOOL_QUEUE, false);
logger.info("useNewThreadPool: " + useNewThreadPool);
if (useNewThreadPool) {
threadPoolQueueSize =
azkabanProps.getInt(EXECUTOR_THREADPOOL_WORKQUEUE_SIZE, nThreads);
logger.info("workQueueSize: " + threadPoolQueueSize);
// using a bounded queue for the work queue. The default rejection policy
// {@ThreadPoolExecutor.AbortPolicy} is used
TrackingThreadPool executor =
new TrackingThreadPool(nThreads, nThreads, 0L, TimeUnit.MILLISECONDS,
new LinkedBlockingQueue<Runnable>(threadPoolQueueSize), this);
return executor;
} else {
// the old way of using unbounded task queue.
// if the running tasks are taking a long time or stuck, this queue
// will be very very long.
return new TrackingThreadPool(nThreads, nThreads, 0L,
TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>(), this);
}
}
private Map<Pair<Integer, Integer>, ProjectVersion> loadExistingProjects() {
Map<Pair<Integer, Integer>, ProjectVersion> allProjects =
new HashMap<Pair<Integer, Integer>, ProjectVersion>();
for (File project : projectDirectory.listFiles(new FilenameFilter() {
String pattern = "[0-9]+\\.[0-9]+";
@Override
public boolean accept(File dir, String name) {
return name.matches(pattern);
}
})) {
if (project.isDirectory()) {
try {
String fileName = new File(project.getAbsolutePath()).getName();
int projectId = Integer.parseInt(fileName.split("\\.")[0]);
int versionNum = Integer.parseInt(fileName.split("\\.")[1]);
ProjectVersion version =
new ProjectVersion(projectId, versionNum, project);
allProjects.put(new Pair<Integer, Integer>(projectId, versionNum),
version);
} catch (Exception e) {
e.printStackTrace();
}
}
}
return allProjects;
}
public void setExecutorActive(boolean isActive) {
this.isExecutorActive = isActive;
}
public long getLastFlowSubmittedTime(){
// Note: this is not thread safe and may result in providing dirty data.
// we will provide this data as is for now and will revisit if there
// is a string justification for change.
return lastFlowSubmittedDate;
}
public Props getGlobalProps() {
return globalProps;
}
public void setGlobalProps(Props globalProps) {
this.globalProps = globalProps;
}
private class CleanerThread extends Thread {
// Every hour, clean execution dir.
private static final long EXECUTION_DIR_CLEAN_INTERVAL_MS = 60 * 60 * 1000;
// Every 5 mins clean the old project dir
private static final long OLD_PROJECT_DIR_INTERVAL_MS = 5 * 60 * 1000;
// Every 2 mins clean the recently finished list
private static final long RECENTLY_FINISHED_INTERVAL_MS = 2 * 60 * 1000;
private boolean shutdown = false;
private long lastExecutionDirCleanTime = -1;
private long lastOldProjectCleanTime = -1;
private long lastRecentlyFinishedCleanTime = -1;
public CleanerThread() {
this.setName("FlowRunnerManager-Cleaner-Thread");
setDaemon(true);
}
@SuppressWarnings("unused")
public void shutdown() {
shutdown = true;
this.interrupt();
}
public void run() {
while (!shutdown) {
synchronized (this) {
try {
lastCleanerThreadCheckTime = System.currentTimeMillis();
logger.info("# of executing flows: " + getNumRunningFlows());
// Cleanup old stuff.
long currentTime = System.currentTimeMillis();
if (currentTime - RECENTLY_FINISHED_INTERVAL_MS > lastRecentlyFinishedCleanTime) {
logger.info("Cleaning recently finished");
cleanRecentlyFinished();
lastRecentlyFinishedCleanTime = currentTime;
}
if (currentTime - OLD_PROJECT_DIR_INTERVAL_MS > lastOldProjectCleanTime && isExecutorActive) {
logger.info("Cleaning old projects");
cleanOlderProjects();
lastOldProjectCleanTime = currentTime;
}
if (currentTime - EXECUTION_DIR_CLEAN_INTERVAL_MS > lastExecutionDirCleanTime) {
logger.info("Cleaning old execution dirs");
cleanOlderExecutionDirs();
lastExecutionDirCleanTime = currentTime;
}
wait(RECENTLY_FINISHED_TIME_TO_LIVE);
} catch (InterruptedException e) {
logger.info("Interrupted. Probably to shut down.");
} catch (Throwable t) {
logger.warn(
"Uncaught throwable, please look into why it is not caught", t);
}
}
}
}
private void cleanOlderExecutionDirs() {
File dir = executionDirectory;
final long pastTimeThreshold =
System.currentTimeMillis() - executionDirRetention;
File[] executionDirs = dir.listFiles(path -> path.isDirectory() && path.lastModified() < pastTimeThreshold);
for (File exDir : executionDirs) {
try {
int execId = Integer.valueOf(exDir.getName());
if (runningFlows.containsKey(execId)
|| recentlyFinishedFlows.containsKey(execId)) {
continue;
}
} catch (NumberFormatException e) {
logger.error("Can't delete exec dir " + exDir.getName()
+ " it is not a number");
continue;
}
synchronized (executionDirDeletionSync) {
try {
FileUtils.deleteDirectory(exDir);
} catch (IOException e) {
logger.error("Error cleaning execution dir " + exDir.getPath(), e);
}
}
}
}
private void cleanRecentlyFinished() {
long cleanupThreshold =
System.currentTimeMillis() - RECENTLY_FINISHED_TIME_TO_LIVE;
ArrayList<Integer> executionToKill = new ArrayList<Integer>();
for (ExecutableFlow flow : recentlyFinishedFlows.values()) {
if (flow.getEndTime() < cleanupThreshold) {
executionToKill.add(flow.getExecutionId());
}
}
for (Integer id : executionToKill) {
logger.info("Cleaning execution " + id
+ " from recently finished flows list.");
recentlyFinishedFlows.remove(id);
}
}
private void cleanOlderProjects() {
Map<Integer, ArrayList<ProjectVersion>> projectVersions =
new HashMap<Integer, ArrayList<ProjectVersion>>();
for (ProjectVersion version : installedProjects.values()) {
ArrayList<ProjectVersion> versionList =
projectVersions.get(version.getProjectId());
if (versionList == null) {
versionList = new ArrayList<ProjectVersion>();
projectVersions.put(version.getProjectId(), versionList);
}
versionList.add(version);
}
HashSet<Pair<Integer, Integer>> activeProjectVersions =
new HashSet<Pair<Integer, Integer>>();
for (FlowRunner runner : runningFlows.values()) {
ExecutableFlow flow = runner.getExecutableFlow();
activeProjectVersions.add(new Pair<Integer, Integer>(flow
.getProjectId(), flow.getVersion()));
}
for (Map.Entry<Integer, ArrayList<ProjectVersion>> entry : projectVersions
.entrySet()) {
// Integer projectId = entry.getKey();
ArrayList<ProjectVersion> installedVersions = entry.getValue();
// Keep one version of the project around.
if (installedVersions.size() == 1) {
continue;
}
Collections.sort(installedVersions);
for (int i = 0; i < installedVersions.size() - 1; ++i) {
ProjectVersion version = installedVersions.get(i);
Pair<Integer, Integer> versionKey =
new Pair<Integer, Integer>(version.getProjectId(),
version.getVersion());
if (!activeProjectVersions.contains(versionKey)) {
try {
logger.info("Removing old unused installed project "
+ version.getProjectId() + ":" + version.getVersion());
version.deleteDirectory();
installedProjects.remove(new Pair<Integer, Integer>(version
.getProjectId(), version.getVersion()));
} catch (IOException e) {
e.printStackTrace();
}
installedVersions.remove(versionKey);
}
}
}
}
}
public void submitFlow(int execId) throws ExecutorManagerException {
// Load file and submit
if (runningFlows.containsKey(execId)) {
throw new ExecutorManagerException("Execution " + execId
+ " is already running.");
}
ExecutableFlow flow = null;
flow = executorLoader.fetchExecutableFlow(execId);
if (flow == null) {
throw new ExecutorManagerException("Error loading flow with exec "
+ execId);
}
// Sets up the project files and execution directory.
setupFlow(flow);
// Setup flow runner
FlowWatcher watcher = null;
ExecutionOptions options = flow.getExecutionOptions();
if (options.getPipelineExecutionId() != null) {
Integer pipelineExecId = options.getPipelineExecutionId();
FlowRunner runner = runningFlows.get(pipelineExecId);
if (runner != null) {
watcher = new LocalFlowWatcher(runner);
} else {
watcher = new RemoteFlowWatcher(pipelineExecId, executorLoader);
}
}
int numJobThreads = numJobThreadPerFlow;
if (options.getFlowParameters().containsKey(FLOW_NUM_JOB_THREADS)) {
try {
int numJobs =
Integer.valueOf(options.getFlowParameters().get(
FLOW_NUM_JOB_THREADS));
if (numJobs > 0 && (numJobs <= numJobThreads || ProjectWhitelist
.isProjectWhitelisted(flow.getProjectId(),
WhitelistType.NumJobPerFlow))) {
numJobThreads = numJobs;
}
} catch (Exception e) {
throw new ExecutorManagerException(
"Failed to set the number of job threads "
+ options.getFlowParameters().get(FLOW_NUM_JOB_THREADS)
+ " for flow " + execId, e);
}
}
FlowRunner runner =
new FlowRunner(flow, executorLoader, projectLoader, jobtypeManager, azkabanProps);
runner.setFlowWatcher(watcher)
.setJobLogSettings(jobLogChunkSize, jobLogNumFiles)
.setValidateProxyUser(validateProxyUser)
.setNumJobThreads(numJobThreads).addListener(this);
configureFlowLevelMetrics(runner);
// Check again.
if (runningFlows.containsKey(execId)) {
throw new ExecutorManagerException("Execution " + execId
+ " is already running.");
}
// Finally, queue the sucker.
runningFlows.put(execId, runner);
try {
// The executorService already has a queue.
// The submit method below actually returns an instance of FutureTask,
// which implements interface RunnableFuture, which extends both
// Runnable and Future interfaces
Future<?> future = executorService.submit(runner);
// keep track of this future
submittedFlows.put(future, runner.getExecutionId());
// update the last submitted time.
this.lastFlowSubmittedDate = System.currentTimeMillis();
} catch (RejectedExecutionException re) {
throw new ExecutorManagerException(
"Azkaban server can't execute any more flows. "
+ "The number of running flows has reached the system configured limit."
+ "Please notify Azkaban administrators");
}
}
/**
* Configure Azkaban metrics tracking for a new flowRunner instance
*
* @param flowRunner
*/
private void configureFlowLevelMetrics(FlowRunner flowRunner) {
logger.info("Configuring Azkaban metrics tracking for flow runner object");
if (MetricReportManager.isAvailable()) {
MetricReportManager metricManager = MetricReportManager.getInstance();
// Adding NumFailedFlow Metric listener
flowRunner.addListener((NumFailedFlowMetric) metricManager
.getMetricFromName(NumFailedFlowMetric.NUM_FAILED_FLOW_METRIC_NAME));
}
}
private void setupFlow(ExecutableFlow flow) throws ExecutorManagerException {
int execId = flow.getExecutionId();
File execPath = new File(executionDirectory, String.valueOf(execId));
flow.setExecutionPath(execPath.getPath());
logger
.info("Flow " + execId + " submitted with path " + execPath.getPath());
execPath.mkdirs();
// We're setting up the installed projects. First time, it may take a while
// to set up.
Pair<Integer, Integer> projectVersionKey =
new Pair<Integer, Integer>(flow.getProjectId(), flow.getVersion());
// We set up project versions this way
ProjectVersion projectVersion = null;
synchronized (installedProjects) {
projectVersion = installedProjects.get(projectVersionKey);
if (projectVersion == null) {
projectVersion =
new ProjectVersion(flow.getProjectId(), flow.getVersion());
installedProjects.put(projectVersionKey, projectVersion);
}
}
try {
projectVersion.setupProjectFiles(projectLoader, projectDirectory, logger);
projectVersion.copyCreateHardlinkDirectory(execPath);
} catch (Exception e) {
logger.error("Error in setting up project directory "+projectDirectory+", "+e);
if (execPath.exists()) {
try {
FileUtils.deleteDirectory(execPath);
} catch (IOException e1) {
e1.printStackTrace();
}
}
throw new ExecutorManagerException(e);
}
}
public void cancelFlow(int execId, String user)
throws ExecutorManagerException {
FlowRunner runner = runningFlows.get(execId);
if (runner == null) {
throw new ExecutorManagerException("Execution " + execId
+ " is not running.");
}
runner.kill(user);
}
public void pauseFlow(int execId, String user)
throws ExecutorManagerException {
FlowRunner runner = runningFlows.get(execId);
if (runner == null) {
throw new ExecutorManagerException("Execution " + execId
+ " is not running.");
}
runner.pause(user);
}
public void resumeFlow(int execId, String user)
throws ExecutorManagerException {
FlowRunner runner = runningFlows.get(execId);
if (runner == null) {
throw new ExecutorManagerException("Execution " + execId
+ " is not running.");
}
runner.resume(user);
}
public void retryFailures(int execId, String user)
throws ExecutorManagerException {
FlowRunner runner = runningFlows.get(execId);
if (runner == null) {
throw new ExecutorManagerException("Execution " + execId
+ " is not running.");
}
runner.retryFailures(user);
}
public ExecutableFlow getExecutableFlow(int execId) {
FlowRunner runner = runningFlows.get(execId);
if (runner == null) {
return recentlyFinishedFlows.get(execId);
}
return runner.getExecutableFlow();
}
@Override
public void handleEvent(Event event) {
if (event.getType() == Event.Type.FLOW_FINISHED) {
FlowRunner flowRunner = (FlowRunner) event.getRunner();
ExecutableFlow flow = flowRunner.getExecutableFlow();
recentlyFinishedFlows.put(flow.getExecutionId(), flow);
logger.info("Flow " + flow.getExecutionId()
+ " is finished. Adding it to recently finished flows list.");
runningFlows.remove(flow.getExecutionId());
}
}
public LogData readFlowLogs(int execId, int startByte, int length)
throws ExecutorManagerException {
FlowRunner runner = runningFlows.get(execId);
if (runner == null) {
throw new ExecutorManagerException("Running flow " + execId
+ " not found.");
}
File dir = runner.getExecutionDir();
if (dir != null && dir.exists()) {
try {
synchronized (executionDirDeletionSync) {
if (!dir.exists()) {
throw new ExecutorManagerException(
"Execution dir file doesn't exist. Probably has beend deleted");
}
File logFile = runner.getFlowLogFile();
if (logFile != null && logFile.exists()) {
return FileIOUtils.readUtf8File(logFile, startByte, length);
} else {
throw new ExecutorManagerException("Flow log file doesn't exist.");
}
}
} catch (IOException e) {
throw new ExecutorManagerException(e);
}
}
throw new ExecutorManagerException(
"Error reading file. Log directory doesn't exist.");
}
public LogData readJobLogs(int execId, String jobId, int attempt,
int startByte, int length) throws ExecutorManagerException {
FlowRunner runner = runningFlows.get(execId);
if (runner == null) {
throw new ExecutorManagerException("Running flow " + execId
+ " not found.");
}
File dir = runner.getExecutionDir();
if (dir != null && dir.exists()) {
try {
synchronized (executionDirDeletionSync) {
if (!dir.exists()) {
throw new ExecutorManagerException(
"Execution dir file doesn't exist. Probably has beend deleted");
}
File logFile = runner.getJobLogFile(jobId, attempt);
if (logFile != null && logFile.exists()) {
return FileIOUtils.readUtf8File(logFile, startByte, length);
} else {
throw new ExecutorManagerException("Job log file doesn't exist.");
}
}
} catch (IOException e) {
throw new ExecutorManagerException(e);
}
}
throw new ExecutorManagerException(
"Error reading file. Log directory doesn't exist.");
}
public List<Object> readJobAttachments(int execId, String jobId, int attempt)
throws ExecutorManagerException {
FlowRunner runner = runningFlows.get(execId);
if (runner == null) {
throw new ExecutorManagerException("Running flow " + execId
+ " not found.");
}
File dir = runner.getExecutionDir();
if (dir == null || !dir.exists()) {
throw new ExecutorManagerException(
"Error reading file. Log directory doesn't exist.");
}
try {
synchronized (executionDirDeletionSync) {
if (!dir.exists()) {
throw new ExecutorManagerException(
"Execution dir file doesn't exist. Probably has beend deleted");
}
File attachmentFile = runner.getJobAttachmentFile(jobId, attempt);
if (attachmentFile == null || !attachmentFile.exists()) {
return null;
}
@SuppressWarnings("unchecked")
List<Object> jobAttachments =
(ArrayList<Object>) JSONUtils.parseJSONFromFile(attachmentFile);
return jobAttachments;
}
} catch (IOException e) {
throw new ExecutorManagerException(e);
}
}
public JobMetaData readJobMetaData(int execId, String jobId, int attempt,
int startByte, int length) throws ExecutorManagerException {
FlowRunner runner = runningFlows.get(execId);
if (runner == null) {
throw new ExecutorManagerException("Running flow " + execId
+ " not found.");
}
File dir = runner.getExecutionDir();
if (dir != null && dir.exists()) {
try {
synchronized (executionDirDeletionSync) {
if (!dir.exists()) {
throw new ExecutorManagerException(
"Execution dir file doesn't exist. Probably has beend deleted");
}
File metaDataFile = runner.getJobMetaDataFile(jobId, attempt);
if (metaDataFile != null && metaDataFile.exists()) {
return FileIOUtils.readUtf8MetaDataFile(metaDataFile, startByte,
length);
} else {
throw new ExecutorManagerException("Job log file doesn't exist.");
}
}
} catch (IOException e) {
throw new ExecutorManagerException(e);
}
}
throw new ExecutorManagerException(
"Error reading file. Log directory doesn't exist.");
}
public long getLastCleanerThreadCheckTime() {
return lastCleanerThreadCheckTime;
}
public boolean isCleanerThreadActive() {
return this.cleanerThread.isAlive();
}
public State getCleanerThreadState() {
return this.cleanerThread.getState();
}
public boolean isExecutorThreadPoolShutdown() {
return executorService.isShutdown();
}
public int getNumQueuedFlows() {
return executorService.getQueue().size();
}
public int getNumRunningFlows() {
return executorService.getActiveCount();
}
public String getRunningFlowIds() {
// The in progress tasks are actually of type FutureTask
Set<Runnable> inProgressTasks = executorService.getInProgressTasks();
List<Integer> runningFlowIds =
new ArrayList<Integer>(inProgressTasks.size());
for (Runnable task : inProgressTasks) {
// add casting here to ensure it matches the expected type in
// submittedFlows
Integer execId = submittedFlows.get((Future<?>) task);
if (execId != null) {
runningFlowIds.add(execId);
} else {
logger.warn("getRunningFlowIds: got null execId for task: " + task);
}
}
Collections.sort(runningFlowIds);
return runningFlowIds.toString();
}
public String getQueuedFlowIds() {
List<Integer> flowIdList =
new ArrayList<Integer>(executorService.getQueue().size());
for (Runnable task : executorService.getQueue()) {
Integer execId = submittedFlows.get(task);
if (execId != null) {
flowIdList.add(execId);
} else {
logger
.warn("getQueuedFlowIds: got null execId for queuedTask: " + task);
}
}
Collections.sort(flowIdList);
return flowIdList.toString();
}
public int getMaxNumRunningFlows() {
return numThreads;
}
public int getTheadPoolQueueSize() {
return threadPoolQueueSize;
}
public void reloadJobTypePlugins() throws JobTypeManagerException {
jobtypeManager.loadPlugins();
}
public int getTotalNumExecutedFlows() {
return executorService.getTotalTasks();
}
@Override
public void beforeExecute(Runnable r) {
}
@Override
public void afterExecute(Runnable r) {
submittedFlows.remove(r);
}
/**
* This shuts down the flow runner. The call is blocking and awaits execution of all jobs.
*/
public void shutdown() {
logger.warn("Shutting down FlowRunnerManager...");
executorService.shutdown();
boolean result = false;
while (!result) {
logger.info("Awaiting Shutdown. # of executing flows: " + getNumRunningFlows());
try {
result = executorService.awaitTermination(1, TimeUnit.MINUTES);
} catch (InterruptedException e) {
logger.error(e);
}
}
logger.warn("Shutdown FlowRunnerManager complete.");
}
/**
* This attempts shuts down the flow runner immediately (unsafe).
* This doesn't wait for jobs to finish but interrupts all threads.
*/
public void shutdownNow() {
logger.warn("Shutting down FlowRunnerManager now...");
executorService.shutdownNow();
}
}
| 1 | 12,464 | Does it need to be a member variable? It is currently only used in one method. | azkaban-azkaban | java |
@@ -37,7 +37,7 @@ function showNewJoinGroupSelection (button, user, apiClient) {
console.debug('No item is currently playing.');
}
- apiClient.sendSyncPlayCommand(sessionId, 'ListGroups').then(function (response) {
+ apiClient.sendSyncPlayCommand('ListGroups').then(function (response) {
response.json().then(function (groups) {
var menuItems = groups.map(function (group) {
return { | 1 | import events from 'events';
import connectionManager from 'connectionManager';
import playbackManager from 'playbackManager';
import syncPlayManager from 'syncPlayManager';
import loading from 'loading';
import toast from 'toast';
import actionsheet from 'actionsheet';
import globalize from 'globalize';
import playbackPermissionManager from 'playbackPermissionManager';
/**
* Gets active player id.
* @returns {string} The player's id.
*/
function getActivePlayerId () {
var info = playbackManager.getPlayerInfo();
return info ? info.id : null;
}
/**
* Used when user needs to join a group.
* @param {HTMLElement} button - Element where to place the menu.
* @param {Object} user - Current user.
* @param {Object} apiClient - ApiClient.
*/
function showNewJoinGroupSelection (button, user, apiClient) {
const sessionId = getActivePlayerId() || 'none';
const inSession = sessionId !== 'none';
const policy = user.localUser ? user.localUser.Policy : {};
let playingItemId;
try {
const playState = playbackManager.getPlayerState();
playingItemId = playState.NowPlayingItem.Id;
console.debug('Item', playingItemId, 'is currently playing.');
} catch (error) {
playingItemId = '';
console.debug('No item is currently playing.');
}
apiClient.sendSyncPlayCommand(sessionId, 'ListGroups').then(function (response) {
response.json().then(function (groups) {
var menuItems = groups.map(function (group) {
return {
name: group.PlayingItemName,
icon: 'group',
id: group.GroupId,
selected: false,
secondaryText: group.Participants.join(', ')
};
});
if (inSession && policy.SyncPlayAccess === 'CreateAndJoinGroups') {
menuItems.push({
name: globalize.translate('LabelSyncPlayNewGroup'),
icon: 'add',
id: 'new-group',
selected: true,
secondaryText: globalize.translate('LabelSyncPlayNewGroupDescription')
});
}
if (menuItems.length === 0) {
if (inSession && policy.SyncPlayAccess === 'JoinGroups') {
toast({
text: globalize.translate('MessageSyncPlayCreateGroupDenied')
});
} else {
toast({
text: globalize.translate('MessageSyncPlayNoGroupsAvailable')
});
}
loading.hide();
return;
}
var menuOptions = {
title: globalize.translate('HeaderSyncPlaySelectGroup'),
items: menuItems,
positionTo: button,
resolveOnClick: true,
border: true
};
actionsheet.show(menuOptions).then(function (id) {
if (id == 'new-group') {
apiClient.sendSyncPlayCommand(sessionId, 'NewGroup');
} else {
apiClient.sendSyncPlayCommand(sessionId, 'JoinGroup', {
GroupId: id,
PlayingItemId: playingItemId
});
}
}).catch((error) => {
console.error('SyncPlay: unexpected error listing groups:', error);
});
loading.hide();
});
}).catch(function (error) {
console.error(error);
loading.hide();
toast({
text: globalize.translate('MessageSyncPlayErrorAccessingGroups')
});
});
}
/**
* Used when user has joined a group.
* @param {HTMLElement} button - Element where to place the menu.
* @param {Object} user - Current user.
* @param {Object} apiClient - ApiClient.
*/
function showLeaveGroupSelection (button, user, apiClient) {
const sessionId = getActivePlayerId();
if (!sessionId) {
syncPlayManager.signalError();
toast({
text: globalize.translate('MessageSyncPlayErrorNoActivePlayer')
});
showNewJoinGroupSelection(button, user, apiClient);
return;
}
const menuItems = [{
name: globalize.translate('LabelSyncPlayLeaveGroup'),
icon: 'meeting_room',
id: 'leave-group',
selected: true,
secondaryText: globalize.translate('LabelSyncPlayLeaveGroupDescription')
}];
var menuOptions = {
title: globalize.translate('HeaderSyncPlayEnabled'),
items: menuItems,
positionTo: button,
resolveOnClick: true,
border: true
};
actionsheet.show(menuOptions).then(function (id) {
if (id == 'leave-group') {
apiClient.sendSyncPlayCommand(sessionId, 'LeaveGroup');
}
}).catch((error) => {
console.error('SyncPlay: unexpected error showing group menu:', error);
});
loading.hide();
}
// Register to SyncPlay events
let syncPlayEnabled = false;
events.on(syncPlayManager, 'enabled', function (e, enabled) {
syncPlayEnabled = enabled;
});
/**
* Shows a menu to handle SyncPlay groups.
* @param {HTMLElement} button - Element where to place the menu.
*/
export function show (button) {
loading.show();
// TODO: should feature be disabled if playback permission is missing?
playbackPermissionManager.check().then(() => {
console.debug('Playback is allowed.');
}).catch((error) => {
console.error('Playback not allowed!', error);
toast({
text: globalize.translate('MessageSyncPlayPlaybackPermissionRequired')
});
});
const apiClient = connectionManager.currentApiClient();
connectionManager.user(apiClient).then((user) => {
if (syncPlayEnabled) {
showLeaveGroupSelection(button, user, apiClient);
} else {
showNewJoinGroupSelection(button, user, apiClient);
}
}).catch((error) => {
console.error(error);
loading.hide();
toast({
text: globalize.translate('MessageSyncPlayNoGroupsAvailable')
});
});
}
| 1 | 16,307 | @MrTimscampi doesn't this need an update to apiclient?.. | jellyfin-jellyfin-web | js |
@@ -472,7 +472,7 @@ static const byte xop_a_extra[256] = {
*/
int
decode_sizeof(dcontext_t *dcontext, byte *start_pc,
- int *num_prefixes _IF_X64(uint *rip_rel_pos))
+ int *num_prefixes _IF_X86_64(uint *rip_rel_pos))
{
byte *pc = start_pc;
uint opc = (uint)*pc; | 1 | /* **********************************************************
* Copyright (c) 2011-2014 Google, Inc. All rights reserved.
* Copyright (c) 2001-2010 VMware, Inc. All rights reserved.
* **********************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of VMware, Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/* Copyright (c) 2003-2007 Determina Corp. */
/* Copyright (c) 2001-2003 Massachusetts Institute of Technology */
/* Copyright (c) 2001 Hewlett-Packard Company */
/* decode_fast.c -- a partial but fast x86 decoder */
#include "../globals.h"
#include "decode_fast.h"
#include "../link.h"
#include "arch.h"
#include "instr.h"
#include "instr_create.h"
#include "decode.h"
#include "decode_private.h"
#include "disassemble.h"
#ifdef DEBUG
/* case 10450: give messages to clients */
# undef ASSERT_TRUNCATE
# undef ASSERT_BITFIELD_TRUNCATE
# undef ASSERT_NOT_REACHED
# define ASSERT_TRUNCATE DO_NOT_USE_ASSERT_USE_CLIENT_ASSERT_INSTEAD
# define ASSERT_BITFIELD_TRUNCATE DO_NOT_USE_ASSERT_USE_CLIENT_ASSERT_INSTEAD
# define ASSERT_NOT_REACHED DO_NOT_USE_ASSERT_USE_CLIENT_ASSERT_INSTEAD
#endif
/* This file contains tables and functions that help decode x86
instructions so that we can determine the length of the decode
instruction. All code below based on tables in the ``Intel
Architecture Software Developer's Manual,'' Volume 2: Instruction
Set Reference, 1999.
This decoder assumes that we are running in 32-bit, flat-address mode.
*/
/* NOTE that all of the tables in this file are indexed by the (primary
or secondary) opcode byte. The upper opcode nibble defines the rows,
starting with 0 at the top. The lower opcode nibble defines the
columns, starting with 0 at left. */
/* Data table for fixed part of an x86 instruction. The table is
indexed by the 1st (primary) opcode byte. Zero entries are
reserved opcodes. */
static const byte fixed_length[256] = {
1, 1, 1, 1, 2, 5, 1, 1, 1,
1, 1, 1, 2, 5, 1, 1, /* 0 */
1, 1, 1, 1, 2, 5, 1, 1, 1,
1, 1, 1, 2, 5, 1, 1, /* 1 */
1, 1, 1, 1, 2, 5, 1, 1, 1,
1, 1, 1, 2, 5, 1, 1, /* 2 */
1, 1, 1, 1, 2, 5, 1, 1, 1,
1, 1, 1, 2, 5, 1, 1, /* 3 */
1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, /* 4 */
1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, /* 5 */
1, 1, 1, 1, 1, 1, 1, 1, 5,
5, 2, 2, 1, 1, 1, 1, /* 6 */
2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, /* 7 */
2, 5, 2, 2, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, /* 8 */
1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 7, 1, 1, 1, 1, 1, /* 9 */
5, 5, 5, 5, 1, 1, 1, 1, 2,
5, 1, 1, 1, 1, 1, 1, /* A */
2, 2, 2, 2, 2, 2, 2, 2, 5,
5, 5, 5, 5, 5, 5, 5, /* B */
2, 2, 3, 1, 1, 1, 2, 5, 4,
1, 3, 1, 1, 2, 1, 1, /* C */
1, 1, 1, 1, 2, 2, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, /* D */
2, 2, 2, 2, 2, 2, 2, 2, 5,
5, 7, 2, 1, 1, 1, 1, /* E */
1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1 /* F */
/* f6 and f7 OP_test immeds are handled specially in decode_sizeof() */
};
/* Data table for fixed immediate part of an x86 instruction that
depends upon the existence of an operand-size byte. The table is
indexed by the 1st (primary) opcode byte. Entries with non-zero
values indicate opcodes with a variable-length immediate field. We
use this table if we've seen a operand-size prefix byte to adjust
the fixed_length from dword to word.
*/
static const signed char immed_adjustment[256] = {
0, 0, 0, 0, 0, -2, 0, 0, 0, 0, 0, 0, 0, -2, 0, 0, /* 0 */
0, 0, 0, 0, 0, -2, 0, 0, 0, 0, 0, 0, 0, -2, 0, 0, /* 1 */
0, 0, 0, 0, 0, -2, 0, 0, 0, 0, 0, 0, 0, -2, 0, 0, /* 2 */
0, 0, 0, 0, 0, -2, 0, 0, 0, 0, 0, 0, 0, -2, 0, 0, /* 3 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 4 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 5 */
0, 0, 0, 0, 0, 0, 0, 0, -2, -2, 0, 0, 0, 0, 0, 0, /* 6 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 7 */
0, -2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 8 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -2, 0, 0, 0, 0, 0, /* 9 */
0, 0, 0, 0, 0, 0, 0, 0, 0, -2, 0, 0, 0, 0, 0, 0, /* A */
0, 0, 0, 0, 0, 0, 0, 0, -2, -2, -2, -2, -2, -2, -2, -2, /* B */
0, 0, 0, 0, 0, 0, 0, -2, 0, 0, 0, 0, 0, 0, 0, 0, /* C */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* D */
0, 0, 0, 0, 0, 0, 0, 0, -2, -2, -2, -2, 0, 0, 0, 0, /* E */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 /* F */
};
#ifdef X64
/* for x64 Intel, Jz is always a 64-bit addr ("f64" in Intel table) */
static const signed char immed_adjustment_intel64[256] = {
0, 0, 0, 0, 0, -2, 0, 0, 0, 0, 0, 0, 0, -2, 0, 0, /* 0 */
0, 0, 0, 0, 0, -2, 0, 0, 0, 0, 0, 0, 0, -2, 0, 0, /* 1 */
0, 0, 0, 0, 0, -2, 0, 0, 0, 0, 0, 0, 0, -2, 0, 0, /* 2 */
0, 0, 0, 0, 0, -2, 0, 0, 0, 0, 0, 0, 0, -2, 0, 0, /* 3 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 4 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 5 */
0, 0, 0, 0, 0, 0, 0, 0, -2, -2, 0, 0, 0, 0, 0, 0, /* 6 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 7 */
0, -2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 8 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -2, 0, 0, 0, 0, 0, /* 9 */
0, 0, 0, 0, 0, 0, 0, 0, 0, -2, 0, 0, 0, 0, 0, 0, /* A */
0, 0, 0, 0, 0, 0, 0, 0, -2, -2, -2, -2, -2, -2, -2, -2, /* B */
0, 0, 0, 0, 0, 0, 0, -2, 0, 0, 0, 0, 0, 0, 0, 0, /* C */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* D */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -2, -2, 0, 0, 0, 0, /* E */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 /* F */
};
#endif
/* Data table for fixed immediate part of an x86 instruction that
* depends upon the existence of an address-size byte. The table is
* indexed by the 1st (primary) opcode byte.
* The value here is doubled for x64 mode.
*/
static const signed char disp_adjustment[256] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 2 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 3 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 4 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 5 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 6 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 7 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 8 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 9 */
-2, -2, -2, -2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* A */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* B */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* C */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* D */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* E */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 /* F */
};
#ifdef X64
/* Data table for immediate adjustments that only apply when
* in x64 mode. We fit two types of adjustments in here:
* default-size adjustments (positive numbers) and rex.w-prefix-based
* adjustments (negative numbers, to be made positive when applied).
*/
static const char x64_adjustment[256] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 2 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 3 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 4 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 5 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 6 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 7 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 8 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 9 */
4, 4, 4, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* A */
0, 0, 0, 0, 0, 0, 0, 0, -4, -4, -4, -4, -4, -4, -4, -4, /* B */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* C */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* D */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* E */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 /* F */
};
#endif
/* Prototypes for the functions that calculate the variable
* part of the x86 instruction length. */
static int
sizeof_modrm(dcontext_t *dcontext, byte *pc, bool addr16 _IF_X64(byte **rip_rel_pc));
static int
sizeof_fp_op(dcontext_t *dcontext, byte *pc, bool addr16 _IF_X64(byte **rip_rel_pc));
static int
sizeof_escape(dcontext_t *dcontext, byte *pc, bool addr16 _IF_X64(byte **rip_rel_pc));
static int
sizeof_3byte_38(dcontext_t *dcontext, byte *pc, bool addr16,
bool vex _IF_X64(byte **rip_rel_pc));
static int
sizeof_3byte_3a(dcontext_t *dcontext, byte *pc, bool addr16 _IF_X64(byte **rip_rel_pc));
enum {
VARLEN_NONE,
VARLEN_MODRM,
VARLEN_FP_OP,
VARLEN_ESCAPE, /* 2-byte opcodes */
VARLEN_3BYTE_38_ESCAPE, /* 3-byte opcodes 0f 38 */
VARLEN_3BYTE_3A_ESCAPE, /* 3-byte opcodes 0f 3a */
};
/* Some macros to make the following table look better. */
#define m VARLEN_MODRM
#define f VARLEN_FP_OP
#define e VARLEN_ESCAPE
/* Data table indicating what function to use to calculate
the variable part of the x86 instruction. This table
is indexed by the primary opcode. */
static const byte variable_length[256] = {
m, m, m, m, 0, 0, 0, 0, m, m, m, m, 0, 0, 0, e, /* 0 */
m, m, m, m, 0, 0, 0, 0, m, m, m, m, 0, 0, 0, 0, /* 1 */
m, m, m, m, 0, 0, 0, 0, m, m, m, m, 0, 0, 0, 0, /* 2 */
m, m, m, m, 0, 0, 0, 0, m, m, m, m, 0, 0, 0, 0, /* 3 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 4 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 5 */
0, 0, m, m, 0, 0, 0, 0, 0, m, 0, m, 0, 0, 0, 0, /* 6 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 7 */
m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, /* 8 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 9 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* A */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* B */
m, m, 0, 0, m, m, m, m, 0, 0, 0, 0, 0, 0, 0, 0, /* C */
m, m, m, m, 0, 0, 0, 0, f, f, f, f, f, f, f, f, /* D */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* E */
0, 0, 0, 0, 0, 0, m, m, 0, 0, 0, 0, 0, 0, m, m /* F */
};
/* eliminate the macros */
#undef m
#undef f
#undef e
/* Data table for the additional fixed part of a two-byte opcode.
* This table is indexed by the 2nd opcode byte. Zero entries are
* reserved/bad opcodes.
* N.B.: none of these (except IA32_ON_IA64) need adjustment
* for data16 or addr16.
*/
static const byte escape_fixed_length[256] = {
1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 2,
/* 0 */ /* 0f0f has extra suffix opcode byte */
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 1 */
1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, /* 2 */
1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, /* 3 */
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 4 */
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 5 */
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 6 */
2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, /* 7 */
5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, /* 8 */
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 9 */
1, 1, 1, 1, 2, 1, 0, 0, 1, 1, 1, 1, 2, 1, 1, 1, /* A */
#ifdef IA32_ON_IA64
/* change is the 5, could also be 3 depending on which mode we are */
/* FIXME : no modrm byte so the standard variable thing won't work */
/* (need a escape_disp_adjustment table) */
1, 1, 1, 1, 1, 1, 1, 1, 5, 1, 2, 1, 1, 1, 1, 1, /* B */
#else
1,1,1,1, 1,1,1,1, 1,1,2,1, 1,1,1,1, /* B */
#endif
1, 1, 2, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* C */
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* D */
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* E */
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0 /* F */
/* 0f78 has immeds depending on prefixes: handled in decode_sizeof() */
};
/* Some macros to make the following table look better. */
#define m VARLEN_MODRM
#define e1 VARLEN_3BYTE_38_ESCAPE
#define e2 VARLEN_3BYTE_3A_ESCAPE
/* Data table indicating what function to use to calcuate
the variable part of the escaped x86 instruction. This table
is indexed by the 2nd opcode byte. */
static const byte escape_variable_length[256] = {
m, m, m, m, 0, 0, 0, 0, 0, 0, 0, 0, 0, m, 0, m, /* 0 */
m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, /* 1 */
m, m, m, m, 0, 0, 0, 0, m, m, m, m, m, m, m, m, /* 2 */
0, 0, 0, 0, 0, 0, 0, 0, e1, 0, e2, 0, 0, 0, 0, 0, /* 3 */
m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, /* 4 */
m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, /* 5 */
m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, /* 6 */
m, m, m, m, m, m, m, 0, m, m, 0, 0, m, m, m, m, /* 7 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 8 */
m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, /* 9 */
0, 0, 0, m, m, m, 0, 0, 0, 0, 0, m, m, m, m, m, /* A */
#ifdef IA32_ON_IA64
m, m, m, m, m, m, m, m, 0, 0, m, m, m, m, m, m, /* B */
#else
m, m, m, m, m, m, m, m, m, 0, m, m, m, m, m, m, /* B */
#endif
m, m, m, m, m, m, m, m, 0, 0, 0, 0, 0, 0, 0, 0, /* C */
m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, /* D */
m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, /* E */
m, m, m, m, m, m, m, m, m, m, m, m, m, m, m, 0 /* F */
};
/* eliminate the macros */
#undef m
#undef e
/* Data table for the additional fixed part of a three-byte opcode 0f 38.
* This table is indexed by the 3rd opcode byte. Zero entries are
* reserved/bad opcodes.
* N.B.: ALL of these have modrm bytes, and NONE of these need adjustment for data16
* or addr16.
*/
#if 0 /* to be robust wrt future additions we assume all entries are 1 */
static const byte threebyte_38_fixed_length[256] = {
1,1,1,1, 1,1,1,1, 1,1,1,1, 0,0,0,0, /* 0 */
1,0,0,0, 1,1,0,1, 0,0,0,0, 1,1,1,0, /* 1 */
1,1,1,1, 1,1,0,0, 1,1,1,1, 0,0,0,0, /* 2 */
1,1,1,1, 1,1,0,1, 1,1,1,1, 1,1,1,1, /* 3 */
1,1,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0, /* 4 */
0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0, /* 5 */
0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0, /* 6 */
0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0, /* 7 */
0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0, /* 8 */
0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0, /* 9 */
0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0, /* A */
0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0, /* B */
0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0, /* C */
0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0, /* D */
0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0, /* E */
1,1,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0 /* F */
};
#endif
/* Three-byte 0f 3a: all are assumed to have a 1-byte immediate as well! */
#if 0 /* to be robust wrt future additions we assume all entries are 1 */
static const byte threebyte_3a_fixed_length[256] = {
0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,1, /* 0 */
0,0,0,0, 1,1,1,1, 1,1,1,1, 1,1,1,0, /* 1 */
1,1,1,0, 0,0,0,0, 0,0,0,0, 0,0,0,0, /* 2 */
0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0, /* 3 */
0,1,1,1, 0,0,0,0, 0,0,0,0, 0,0,0,0, /* 4 */
0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0, /* 5 */
1,1,1,1, 0,0,0,0, 0,0,0,0, 0,0,0,0, /* 6 */
0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0, /* 7 */
0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0, /* 8 */
0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0, /* 9 */
0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0, /* A */
0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0, /* B */
0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0, /* C */
0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0, /* D */
0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0, /* E */
0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0 /* F */
};
#endif
/* Extra size when vex-encoded (from immeds) */
static const byte threebyte_38_vex_extra[256] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 2 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 3 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 4 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 5 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 6 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 7 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 8 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 9 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* A */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* B */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* C */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* D */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* E */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 /* F */
};
/* XOP.0x08 is assumed to always have an immed byte */
/* Extra size for XOP opcode 0x09 (from immeds) */
static const byte xop_9_extra[256] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 2 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 3 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 4 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 5 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 6 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 7 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 8 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 9 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* A */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* B */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* C */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* D */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* E */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 /* F */
};
/* Extra size for XOP opcode 0x0a (from immeds) */
static const byte xop_a_extra[256] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0 */
4, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 2 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 3 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 4 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 5 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 6 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 7 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 8 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 9 */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* A */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* B */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* C */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* D */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* E */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 /* F */
};
/* Returns the length of the instruction at pc.
* If num_prefixes is non-NULL, returns the number of prefix bytes.
* If rip_rel_pos is non-NULL, returns the offset into the instruction
* of a rip-relative addressing displacement (for data only: ignores
* control-transfer relative addressing), or 0 if none.
* May return 0 size for certain invalid instructions
*/
int
decode_sizeof(dcontext_t *dcontext, byte *start_pc,
int *num_prefixes _IF_X64(uint *rip_rel_pos))
{
byte *pc = start_pc;
uint opc = (uint)*pc;
int sz = 0;
ushort varlen;
bool word_operands = false; /* data16 */
bool qword_operands = false; /* rex.w */
bool addr16 = false; /* really "addr32" for x64 mode */
bool found_prefix = true;
bool rep_prefix = false;
byte reg_opcode; /* reg_opcode field of modrm byte */
#ifdef X64
byte *rip_rel_pc = NULL;
#endif
/* Check for prefix byte(s) */
while (found_prefix) {
/* NOTE - rex prefixes must come after all other prefixes (including
* prefixes that are part of the opcode xref PR 271878). We match
* read_instruction() in considering pre-prefix rex bytes as part of
* the following instr, event when ignored, rather then treating them
* as invalid. This in effect nops improperly placed rex prefixes which
* (xref PR 241563 and Intel Manual 2A 2.2.1) is the correct thing to do.
* Rex prefixes are 0x40-0x4f; >=0x48 has rex.w bit set.
*/
if (X64_MODE_DC(dcontext) && opc >= REX_PREFIX_BASE_OPCODE &&
opc <= (REX_PREFIX_BASE_OPCODE | REX_PREFIX_ALL_OPFLAGS)) {
if (opc >= (REX_PREFIX_BASE_OPCODE | REX_PREFIX_W_OPFLAG)) {
qword_operands = true;
if (word_operands)
word_operands = false; /* rex.w trumps data16 */
} /* else, doesn't affect instr size */
opc = (uint) * (++pc);
sz += 1;
} else {
switch (opc) {
case 0x66: /* operand size */
/* rex.w before other prefixes is a nop */
if (qword_operands)
qword_operands = false;
word_operands = true;
opc = (uint) * (++pc);
sz += 1;
break;
case 0xf2:
case 0xf3: /* REP */
rep_prefix = true;
/* fall through */
case 0xf0: /* LOCK */
case 0x64:
case 0x65: /* segment overrides */
case 0x26:
case 0x36:
case 0x2e:
case 0x3e:
opc = (uint) * (++pc);
sz += 1;
break;
case 0x67:
addr16 = true;
opc = (uint) * (++pc);
sz += 1;
/* up to caller to check for addr prefix! */
break;
case 0xc4:
case 0xc5: {
/* If 64-bit mode or mod selects for register, this is vex */
if (X64_MODE_DC(dcontext) || TESTALL(MODRM_BYTE(3, 0, 0), *(pc + 1))) {
/* Assumptions:
* - no vex-encoded instr size differs based on vex.w,
* so we don't bother to set qword_operands
* - no vex-encoded instr size differs based on prefixes,
* so we don't bother to decode vex.pp
*/
bool vex3 = (opc == 0xc4);
byte vex_mm = 0;
opc = (uint) * (++pc); /* 2nd vex prefix byte */
sz += 1;
if (vex3) {
vex_mm = (byte)(opc & 0x1f);
opc = (uint) * (++pc); /* 3rd vex prefix byte */
sz += 1;
}
opc = (uint) * (++pc); /* 1st opcode byte */
sz += 1;
if (num_prefixes != NULL)
*num_prefixes = sz;
/* no prefixes after vex + already did full size, so goto end */
if (!vex3 || (vex3 && (vex_mm == 1))) {
sz += sizeof_escape(dcontext, pc, addr16 _IF_X64(&rip_rel_pc));
goto decode_sizeof_done;
} else if (vex_mm == 2) {
sz += sizeof_3byte_38(dcontext, pc - 1, addr16,
true _IF_X64(&rip_rel_pc));
goto decode_sizeof_done;
} else if (vex_mm == 3) {
sz += sizeof_3byte_3a(dcontext, pc - 1,
addr16 _IF_X64(&rip_rel_pc));
goto decode_sizeof_done;
}
} else
found_prefix = false;
break;
}
case 0x8f: {
/* If XOP.map_select < 8, this is not XOP but instead OP_pop */
byte map_select = *(pc + 1) & 0x1f;
if (map_select >= 0x8) {
/* we have the same assumptions as for vex, that no instr size
* differs vased on vex.w or vex.pp
*/
pc += 3; /* skip all 3 xop prefix bytes */
sz += 3;
opc = (uint)*pc; /* opcode byte */
sz += 1;
if (num_prefixes != NULL)
*num_prefixes = sz;
/* all have modrm */
sz += sizeof_modrm(dcontext, pc + 1, addr16 _IF_X64(&rip_rel_pc));
if (map_select == 0x8) {
/* these always have an immediate byte */
sz += 1;
} else if (map_select == 0x9)
sz += xop_9_extra[opc];
else if (map_select == 0xa)
sz += xop_a_extra[opc];
else {
ASSERT_CURIOSITY(false && "unknown XOP map_select");
/* to try to handle future ISA additions we don't abort */
}
/* no prefixes after xop + already did full size, so goto end */
goto decode_sizeof_done;
} else
found_prefix = false;
break;
}
default: found_prefix = false;
}
}
}
if (num_prefixes != NULL)
*num_prefixes = sz;
if (word_operands) {
#ifdef X64
/* for x64 Intel, always 64-bit addr ("f64" in Intel table)
* FIXME: what about 2-byte jcc?
*/
if (X64_MODE_DC(dcontext) && proc_get_vendor() == VENDOR_INTEL)
sz += immed_adjustment_intel64[opc];
else
#endif
sz += immed_adjustment[opc]; /* no adjustment for 2-byte escapes */
}
if (addr16) { /* no adjustment for 2-byte escapes */
if (X64_MODE_DC(dcontext)) /* from 64 bits down to 32 bits */
sz += 2 * disp_adjustment[opc];
else /* from 32 bits down to 16 bits */
sz += disp_adjustment[opc];
}
#ifdef X64
if (X64_MODE_DC(dcontext)) {
int adj64 = x64_adjustment[opc];
if (adj64 > 0) /* default size adjustment */
sz += adj64;
else if (qword_operands)
sz += -adj64; /* negative indicates prefix, not default, adjust */
/* else, no adjustment */
}
#endif
/* opc now really points to opcode */
sz += fixed_length[opc];
varlen = variable_length[opc];
/* for a valid instr, sz must be > 0 here, but we don't want to assert
* since we need graceful failure
*/
if (varlen == VARLEN_MODRM)
sz += sizeof_modrm(dcontext, pc + 1, addr16 _IF_X64(&rip_rel_pc));
else if (varlen == VARLEN_ESCAPE) {
sz += sizeof_escape(dcontext, pc + 1, addr16 _IF_X64(&rip_rel_pc));
/* special case: Intel and AMD added size-differing prefix-dependent instrs! */
if (*(pc + 1) == 0x78) {
/* XXX: if have rex.w prefix we clear word_operands: is that legal combo? */
if (word_operands || rep_prefix) {
/* extrq, insertq: 2 1-byte immeds */
sz += 2;
} /* else, vmread, w/ no immeds */
}
} else if (varlen == VARLEN_FP_OP)
sz += sizeof_fp_op(dcontext, pc + 1, addr16 _IF_X64(&rip_rel_pc));
else
CLIENT_ASSERT(varlen == VARLEN_NONE, "internal decoding error");
/* special case that doesn't fit the mold (of course one had to exist) */
reg_opcode = (byte)(((*(pc + 1)) & 0x38) >> 3);
if (opc == 0xf6 && reg_opcode == 0) {
sz += 1; /* TEST Eb,ib -- add size of immediate */
} else if (opc == 0xf7 && reg_opcode == 0) {
if (word_operands)
sz += 2; /* TEST Ew,iw -- add size of immediate */
else
sz += 4; /* TEST El,il -- add size of immediate */
}
decode_sizeof_done:
#ifdef X64
if (rip_rel_pos != NULL) {
if (rip_rel_pc != NULL) {
CLIENT_ASSERT(X64_MODE_DC(dcontext),
"decode_sizeof: invalid non-x64 rip_rel instr");
CLIENT_ASSERT(CHECK_TRUNCATE_TYPE_uint(rip_rel_pc - start_pc),
"decode_sizeof: unknown rip_rel instr type");
*rip_rel_pos = (uint)(rip_rel_pc - start_pc);
} else
*rip_rel_pos = 0;
}
#endif
return sz;
}
static int
sizeof_3byte_38(dcontext_t *dcontext, byte *pc, bool addr16,
bool vex _IF_X64(byte **rip_rel_pc))
{
int sz = 1; /* opcode past 0x0f 0x38 */
uint opc = *(++pc);
/* so far all 3-byte instrs have modrm bytes */
/* to be robust for future additions we don't actually
* use the threebyte_38_fixed_length[opc] entry and assume 1 */
if (vex)
sz += threebyte_38_vex_extra[opc];
sz += sizeof_modrm(dcontext, pc + 1, addr16 _IF_X64(rip_rel_pc));
return sz;
}
static int
sizeof_3byte_3a(dcontext_t *dcontext, byte *pc, bool addr16 _IF_X64(byte **rip_rel_pc))
{
pc++;
/* so far all 0f 3a 3-byte instrs have modrm bytes and 1-byte immeds */
/* to be robust for future additions we don't actually
* use the threebyte_3a_fixed_length[opc] entry and assume 1 */
return 1 + sizeof_modrm(dcontext, pc + 1, addr16 _IF_X64(rip_rel_pc)) + 1;
}
/* Two-byte opcode map (Tables A-4 and A-5). You use this routine
* when you have identified the primary opcode as 0x0f. You pass this
* routine the next byte to determine the number of extra bytes in the
* entire instruction.
* May return 0 size for certain invalid instructions.
*/
static int
sizeof_escape(dcontext_t *dcontext, byte *pc, bool addr16 _IF_X64(byte **rip_rel_pc))
{
uint opc = (uint)*pc;
int sz = escape_fixed_length[opc];
ushort varlen = escape_variable_length[opc];
/* for a valid instr, sz must be > 0 here, but we don't want to assert
* since we need graceful failure
*/
if (varlen == VARLEN_MODRM)
return sz + sizeof_modrm(dcontext, pc + 1, addr16 _IF_X64(rip_rel_pc));
else if (varlen == VARLEN_3BYTE_38_ESCAPE) {
return sz + sizeof_3byte_38(dcontext, pc, addr16, false _IF_X64(rip_rel_pc));
} else if (varlen == VARLEN_3BYTE_3A_ESCAPE) {
return sz + sizeof_3byte_3a(dcontext, pc, addr16 _IF_X64(rip_rel_pc));
} else
CLIENT_ASSERT(varlen == VARLEN_NONE, "internal decoding error");
return sz;
}
/* 32-bit addressing forms with the ModR/M Byte (Table 2-2). You call
* this routine with the byte following the primary opcode byte when you
* know that the operation's next byte is a ModR/M byte. This routine
* passes back the size of the Eaddr specification in bytes based on the
* following encoding of Table 2-2.
*
* Mod R/M
* 0 1 2 3 4 5 6 7
* 0 1 1 1 1 * 5 1 1
* 1 2 2 2 2 3 2 2 2
* 2 5 5 5 5 6 5 5 5
* 3 1 1 1 1 1 1 1 1
* where (*) is 6 if base==5 and 2 otherwise.
*/
static int
sizeof_modrm(dcontext_t *dcontext, byte *pc, bool addr16 _IF_X64(byte **rip_rel_pc))
{
int l = 0; /* return value for sizeof(eAddr) */
uint modrm = (uint)*pc;
int r_m = modrm & 0x7;
uint mod = modrm >> 6;
uint sib;
#ifdef X64
if (rip_rel_pc != NULL && X64_MODE_DC(dcontext) && mod == 0 && r_m == 5) {
*rip_rel_pc = pc + 1; /* no sib: next 4 bytes are disp */
}
#endif
if (addr16 && !X64_MODE_DC(dcontext)) {
if (mod == 1)
return 2; /* modrm + disp8 */
else if (mod == 2)
return 3; /* modrm + disp16 */
else if (mod == 3)
return 1; /* just modrm */
else {
CLIENT_ASSERT(mod == 0, "internal decoding error on addr16 prefix");
if (r_m == 6)
return 3; /* modrm + disp16 */
else
return 1; /* just modrm */
}
CLIENT_ASSERT(false, "internal decoding error on addr16 prefix");
}
/* for x64, addr16 simply truncates the computed address: there is
* no change in disp sizes */
if (mod == 3) /* register operand */
return 1;
switch (mod) { /* memory or immediate operand */
case 0: l = (r_m == 5) ? 5 : 1; break;
case 1: l = 2; break;
case 2: l = 5; break;
}
if (r_m == 4) {
l += 1; /* adjust for sib byte */
sib = (uint)(*(pc + 1));
if ((sib & 0x7) == 5) {
if (mod == 0)
l += 4; /* disp32(,index,s) */
}
}
return l;
}
/* General floating-point instruction formats (Table B-22). You use
* this routine when you have identified the primary opcode as one in
* the range 0xb8 through 0xbf. You pass this routine the next byte
* to determine the number of extra bytes in the entire
* instruction. */
static int
sizeof_fp_op(dcontext_t *dcontext, byte *pc, bool addr16 _IF_X64(byte **rip_rel_pc))
{
if (*pc > 0xbf)
return 1; /* entire ModR/M byte is an opcode extension */
/* fp opcode in reg/opcode field */
return sizeof_modrm(dcontext, pc, addr16 _IF_X64(rip_rel_pc));
}
/* Table indicating "interesting" instructions, i.e., ones we
* would like to decode. Currently these are control-transfer
* instructions and interrupts.
* This table is indexed by the 1st (primary) opcode byte.
* A 0 indicates we are not interested, a 1 that we are.
* A 2 indicates a second opcode byte exists, a 3 indicates an opcode
* extension is present in the modrm byte.
*/
static const byte interesting[256] = {
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
2, /* 0 */
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0, /* 1 */
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0, /* 2 */
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0, /* 3 */
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0, /* 4 */
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0, /* 5 */
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0, /* 6 */
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
/* 7 */ /* jcc_short */
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
0,
/* 8 */ /* mov_seg */
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
1,
0,
0,
/* 9 */ /* call_far, popf */
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0, /* A */
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0, /* B */
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
1,
1,
1,
1,
1,
1,
/* C */ /* ret*, int* */
0,
0,
0,
0,
0,
0,
0,
0,
0,
3,
0,
0,
0,
3,
0,
0,
/* D */ /* fnstenv, fnsave */
1,
1,
1,
1,
0,
0,
0,
0,
1,
1,
1,
1,
0,
0,
0,
0,
/* E */ /* loop*, call, jmp* */
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
3, /* F */
};
/* Table indicating operations on the lower 6 eflags (CF,PF,AF,ZF,SF,OF)
* This table is indexed by the 1st (primary) opcode byte.
* We use the eflags constants from instr.h.
* We ignore writing some of the 6 as a conservative simplification.
* Also note that for some groups we assign values to invalid opcodes
* just for simplicity
*/
#define x 0
#define RC EFLAGS_READ_CF
#define RP EFLAGS_READ_PF
#define RZ EFLAGS_READ_ZF
#define RS EFLAGS_READ_SF
#define RO EFLAGS_READ_OF
#define R6 EFLAGS_READ_6
#define RB (EFLAGS_READ_CF | EFLAGS_READ_ZF)
#define RL (EFLAGS_READ_SF | EFLAGS_READ_OF)
#define RE (EFLAGS_READ_SF | EFLAGS_READ_OF | EFLAGS_READ_ZF)
#define R5O (EFLAGS_READ_6 & (~EFLAGS_READ_OF))
#define WC EFLAGS_WRITE_CF
#define WZ EFLAGS_WRITE_ZF
#define W6 EFLAGS_WRITE_6
#define W5 (EFLAGS_WRITE_6 & (~EFLAGS_WRITE_CF))
#define W5O (EFLAGS_WRITE_6 & (~EFLAGS_WRITE_OF))
#define BC (EFLAGS_WRITE_6 | EFLAGS_READ_CF)
#define BA (EFLAGS_WRITE_6 | EFLAGS_READ_AF)
#define BD (EFLAGS_WRITE_6 | EFLAGS_READ_CF | EFLAGS_READ_AF)
#define B6 (EFLAGS_WRITE_6 | EFLAGS_READ_6)
#define EFLAGS_6_ESCAPE -1
#define EFLAGS_6_SPECIAL -2
#define E EFLAGS_6_ESCAPE
#define S EFLAGS_6_SPECIAL
static const int eflags_6[256] = {
W6, W6, W6, W6, W6, W6, x, x, W6, W6, W6, W6, W6, W6, x, E, /* 0 */
BC, BC, BC, BC, BC, BC, x, x, BC, BC, BC, BC, BC, BC, x, x, /* 1 */
W6, W6, W6, W6, W6, W6, x, BD, W6, W6, W6, W6, W6, W6, x, BD, /* 2 */
W6, W6, W6, W6, W6, W6, x, BA, W6, W6, W6, W6, W6, W6, x, BA, /* 3 */
W5, W5, W5, W5, W5, W5, W5, W5, W5, W5, W5, W5, W5, W5, W5, W5, /* 4 */
x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, /* 5 */
x, x, x, WZ, x, x, x, x, x, W6, x, W6, x, x, x, x, /* 6 */
RO, RO, RC, RC, RZ, RZ, RB, RB, RS, RS, RP, RP, RL, RL, RE, RE, /* 7 */
S, S, S, S, W6, W6, x, x, x, x, x, x, x, x, x, x, /* 8 */
x, x, x, x, x, x, x, x, x, x, x, x, R6, W6, W5O, R5O, /* 9 */
x, x, x, x, x, x, W6, W6, W6, W6, x, x, x, x, W6, W6, /* A */
x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, /* B */
S, S, x, x, x, x, x, x, x, x, x, x, R6, R6, R6, W6, /* C */
S, S, S, S, W6, W6, x, x, x, x, S, S, x, x, x, S, /* D */
RZ, RZ, x, x, x, x, x, x, x, x, x, x, x, x, x, x, /* E */
x, x, x, x, x, WC, S, S, WC, WC, x, x, x, x, S, S, /* F */
};
/* Same as eflags_6 table, but for 2nd byte of 0x0f extension opcodes
*/
static const int escape_eflags_6[256] = {
x, x, WZ, WZ, x, x, x, x, x, x, x, x, x, x, x, x, /* 0 */
x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, /* 1 */
W6, W6, W6, W6, x, x, x, x, x, x, x, x, x, x, W6, W6, /* 2 */
x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, /* 3 */
RO, RO, RC, RC, RZ, RZ, RB, RB, RS, RS, RP, RP, RL, RL, RE, RE, /* 4 */
x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, /* 5 */
x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, /* 6 */
x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, /* 7 */
RO, RO, RC, RC, RZ, RZ, RB, RB, RS, RS, RP, RP, RL, RL, RE, RE, /* 8 */
RO, RO, RC, RC, RZ, RZ, RB, RB, RS, RS, RP, RP, RL, RL, RE, RE, /* 9 */
x, x, x, W6, W6, W6, x, x, x, x, W6, W6, W6, W6, x, W6, /* A */
W6, W6, x, W6, x, x, x, x, x, x, W6, W6, W6, W6, x, x, /* B */
W6, W6, x, x, x, x, x, WZ, x, x, x, x, x, x, x, x, /* C */
x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, /* D */
x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, /* E */
x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, /* F */
};
#undef x
#undef RC
#undef RP
#undef RZ
#undef RS
#undef RO
#undef R6
#undef RB
#undef RL
#undef RE
#undef R5O
#undef WC
#undef WZ
#undef W6
#undef W5
#undef W5O
#undef BC
#undef BA
#undef BD
#undef B6
#undef E
#undef S
/* This routine converts a signed 8-bit offset into a target pc. The
* formal parameter pc should point to the beginning of the branch
* instruction containing the offset and having length len in bytes.
* The x86 architecture calculates offsets from the beginning of the
* instruction following the branch. */
static app_pc
convert_8bit_offset(byte *pc, byte offset, uint len)
{
return ((app_pc)pc) + (((int)(offset << 24)) >> 24) + len;
}
static bool
intercept_fip_save(byte *pc, byte byte0, byte byte1)
{
if ((byte0 == 0xdd && ((byte1 >> 3) & 0x7) == 6) /* dd /6 == OP_fnsave */ ||
(byte0 == 0xd9 && ((byte1 >> 3) & 0x7) == 6) /* d9 /6 == OP_fnstenv */)
return true;
if (byte0 == 0x0f && byte1 == 0xae) {
int opc_ext;
byte byte2 = *(pc + 2);
opc_ext = (byte2 >> 3) & 0x7;
return opc_ext == 0 || /* 0f ae /0 == OP_fxsave */
opc_ext == 4 || /* 0f ae /4 == OP_xsave */
opc_ext == 6; /* 0f ae /6 == OP_xsaveopt */
}
if (byte0 == 0x0f && byte1 == 0xc7) {
int opc_ext;
byte byte2 = *(pc + 2);
opc_ext = (byte2 >> 3) & 0x7;
return opc_ext == 4; /* 0f c7 /4 == OP_xsavec */
}
return false;
}
/* Decodes only enough of the instruction at address pc to determine
* its size, its effects on the 6 arithmetic eflags, and whether it is
* a control-transfer instruction. If it is, the operands fields of
* instr are filled in. If not, only the raw bits fields of instr are
* filled in. This corresponds to a Level 3 decoding for control
* transfer instructions but a Level 1 decoding plus arithmetic eflags
* information for all other instructions.
*
* Fills in the PREFIX_SEG_GS and PREFIX_SEG_FS prefix flags for all instrs.
* Does NOT fill in any other prefix flags unless this is a cti instr
* and the flags affect the instr.
*
* Assumes that instr is already initialized, but uses the x86/x64 mode
* for the current thread rather than that set in instr.
* If caller is re-using same instr struct over multiple decodings,
* should call instr_reset or instr_reuse.
* Returns the address of the byte following the instruction.
* Returns NULL on decoding an invalid instr and sets opcode to OP_INVALID.
*/
byte *
decode_cti(dcontext_t *dcontext, byte *pc, instr_t *instr)
{
byte byte0, byte1;
byte *start_pc = pc;
/* find and remember the instruction and its size */
int prefixes;
/* next two needed for eflags analysis */
int eflags;
int i;
byte modrm = 0; /* used only for EFLAGS_6_SPECIAL */
#ifdef X64
/* PR 251479: we need to know about all rip-relative addresses.
* Since change/setting raw bits invalidates, we must set this
* on every return. */
uint rip_rel_pos;
#endif
int sz = decode_sizeof(dcontext, pc, &prefixes _IF_X64(&rip_rel_pos));
if (sz == 0) {
/* invalid instruction! */
instr_set_opcode(instr, OP_INVALID);
return NULL;
}
instr_set_opcode(instr, OP_UNDECODED);
IF_X64(instr_set_x86_mode(instr, get_x86_mode(dcontext)));
/* we call instr_set_raw_bits on every return from here, not up
* front, because any instr_set_src, instr_set_dst, or
* instr_set_opcode will kill original bits state */
/* Fill in SEG_FS and SEG_GS override prefixes, ignore rest for now.
* We rely on having these set during bb building.
* FIXME - could be done in decode_sizeof which is already walking these
* bytes, but would need to complicate its interface and prefixes are
* fairly rare to begin with. */
if (prefixes > 0) {
for (i = 0; i < prefixes; i++, pc++) {
switch (*pc) {
case FS_SEG_OPCODE: instr_set_prefix_flag(instr, PREFIX_SEG_FS); break;
case GS_SEG_OPCODE: instr_set_prefix_flag(instr, PREFIX_SEG_GS); break;
default: break;
}
}
}
byte0 = *pc;
byte1 = *(pc + 1);
/* eflags analysis
* we do this even if -unsafe_ignore_eflags b/c it doesn't cost that
* much and we can use the analysis to detect any bb that reads a flag
* prior to writing it
*/
eflags = eflags_6[byte0];
if (eflags == EFLAGS_6_ESCAPE) {
eflags = escape_eflags_6[byte1];
if (eflags == EFLAGS_6_SPECIAL)
modrm = *(pc + 2);
} else if (eflags == EFLAGS_6_SPECIAL) {
modrm = byte1;
}
if (eflags == EFLAGS_6_SPECIAL) {
/* a number of cases exist beyond the ability of 2 tables
* to distinguish
*/
int opc_ext = (modrm >> 3) & 7; /* middle 3 bits */
if (byte0 <= 0x84) {
/* group 1* (80-83): all W6 except /2,/3=B */
if (opc_ext == 2 || opc_ext == 3)
eflags = EFLAGS_WRITE_6 | EFLAGS_READ_CF;
else
eflags = EFLAGS_WRITE_6;
} else if (byte0 <= 0xd3) {
/* group 2* (c0,c1,d0-d3): /0,/1=WC|WO, /2,/3=WC|WO|RC, /4,/5,/7=W6 */
if (opc_ext == 0 || opc_ext == 1)
eflags = EFLAGS_WRITE_CF | EFLAGS_WRITE_OF;
else if (opc_ext == 2 || opc_ext == 3)
eflags = EFLAGS_WRITE_CF | EFLAGS_WRITE_OF | EFLAGS_READ_CF;
else if (opc_ext == 4 || opc_ext == 5 || opc_ext == 7)
eflags = EFLAGS_WRITE_6;
else
eflags = 0;
} else if (byte0 <= 0xdf) {
/* floats: dac0-dadf and dbc0-dbdf = RC|RP|RZ */
if ((byte0 == 0xda || byte0 == 0xdb) && modrm >= 0xc0 && modrm <= 0xdf)
eflags = EFLAGS_READ_CF | EFLAGS_READ_PF | EFLAGS_READ_ZF;
/* floats: dbe8-dbf7 and dfe8-dff7 = WC|WP|WZ */
else if ((byte0 == 0xdb || byte0 == 0xdf) && modrm >= 0xe8 && modrm <= 0xf7)
eflags = EFLAGS_WRITE_CF | EFLAGS_WRITE_PF | EFLAGS_WRITE_ZF;
else
eflags = 0;
} else if (byte0 <= 0xf7) {
/* group 3a (f6) & 3b (f7): all W except /2 (OP_not) */
if (opc_ext == 2)
eflags = 0;
else
eflags = EFLAGS_WRITE_6;
} else {
/* group 4 (fe) & 5 (ff): /0,/1=W5 */
if (opc_ext == 0 || opc_ext == 1)
eflags = EFLAGS_WRITE_6 & (~EFLAGS_WRITE_CF);
else
eflags = 0;
}
}
instr->eflags = eflags;
instr_set_arith_flags_valid(instr, true);
if (interesting[byte0] == 0) {
/* assumption: opcode already OP_UNDECODED */
/* assumption: operands are already marked invalid (instr was reset) */
instr_set_raw_bits(instr, start_pc, sz);
IF_X64(instr_set_rip_rel_pos(instr, rip_rel_pos));
return (start_pc + sz);
}
/* FIXME: would further "interesting" table produce any noticeable
* performance improvement?
*/
if (prefixes > 0) {
/* prefixes are rare on ctis
* rather than handle them all here, just do full decode
* FIXME: if we start to see more and more jcc branch hints we
* may change our minds here! This is case 211206/6749.
*/
if (decode(dcontext, start_pc, instr) == NULL)
return NULL;
else
return (start_pc + sz);
}
#ifdef FOOL_CPUID
/* for fooling program into thinking hardware is different than it is */
if (byte0 == 0x0f && byte1 == 0xa2) { /* cpuid */
instr_set_opcode(instr, OP_cpuid);
/* don't bother to set dsts/srcs */
instr_set_operands_valid(instr, false);
instr_set_raw_bits(instr, start_pc, sz);
IF_X64(instr_set_rip_rel_pos(instr, rip_rel_pos));
return (start_pc + sz);
}
#endif
/* prefixes won't make a difference for 8-bit-offset jumps */
if (byte0 == 0xeb) { /* jmp_short */
app_pc tgt = convert_8bit_offset(pc, byte1, 2);
instr_set_opcode(instr, OP_jmp_short);
instr_set_num_opnds(dcontext, instr, 0, 1);
instr_set_target(instr, opnd_create_pc(tgt));
instr_set_raw_bits(instr, start_pc, sz);
IF_X64(instr_set_rip_rel_pos(instr, rip_rel_pos));
return (pc + 2);
}
if ((byte0 & 0xf0) == 0x70) { /* jcc_short */
/* 2-byte pc-relative jumps with an 8-bit displacement */
app_pc tgt = convert_8bit_offset(pc, byte1, 2);
/* Set the instr's opcode field. Relies on special ordering
* in opcode enum. */
instr_set_opcode(instr, OP_jo_short + (byte0 & 0x0f));
/* calculate the branch's target address */
instr_set_num_opnds(dcontext, instr, 0, 1);
instr_set_target(instr, opnd_create_pc(tgt));
instr_set_raw_bits(instr, start_pc, sz);
IF_X64(instr_set_rip_rel_pos(instr, rip_rel_pos));
return (pc + 2);
}
if (byte0 == 0xe8) { /* call */
int offset = *((int *)(pc + 1));
app_pc tgt = pc + offset + 5;
instr_set_opcode(instr, OP_call);
instr_set_num_opnds(dcontext, instr, 2, 2);
instr_set_target(instr, opnd_create_pc(tgt));
instr_set_src(instr, 1, opnd_create_reg(REG_XSP));
instr_set_dst(instr, 0, opnd_create_reg(REG_XSP));
instr_set_dst(instr, 1,
opnd_create_base_disp(
REG_XSP, REG_NULL, 0, 0,
resolve_variable_size_dc(dcontext, 0, OPSZ_call, false)));
instr_set_raw_bits(instr, start_pc, sz);
IF_X64(instr_set_rip_rel_pos(instr, rip_rel_pos));
return (pc + 5);
}
if (byte0 == 0xe9) { /* jmp */
int offset = *((int *)(pc + 1));
app_pc tgt = pc + offset + 5;
instr_set_opcode(instr, OP_jmp);
instr_set_num_opnds(dcontext, instr, 0, 1);
instr_set_target(instr, opnd_create_pc(tgt));
instr_set_raw_bits(instr, start_pc, sz);
IF_X64(instr_set_rip_rel_pos(instr, rip_rel_pos));
return (pc + 5);
}
if ((byte0 == 0x0f) && ((byte1 & 0xf0) == 0x80)) { /* jcc */
/* 6-byte pc-relative jumps with a 32-bit displacement */
/* calculate the branch's target address */
int offset = *((int *)(pc + 2));
app_pc tgt = pc + offset + 6;
/* Set the instr's opcode field. Relies on special ordering
* in opcode enum. */
instr_set_opcode(instr, OP_jo + (byte1 & 0x0f));
instr_set_num_opnds(dcontext, instr, 0, 1);
instr_set_target(instr, opnd_create_pc(tgt));
instr_set_raw_bits(instr, start_pc, sz);
IF_X64(instr_set_rip_rel_pos(instr, rip_rel_pos));
return (pc + 6);
}
if (byte0 == 0xff) { /* check for indirect calls/branches */
/* dispatch based on bits 5,4,3 in mod_rm byte */
uint opc = (byte1 >> 3) & 0x7;
if (opc >= 2 && opc <= 5) {
/* this is an indirect jump or call */
/* we care about the operands and prefixes, so just do the full decode
*/
if (decode(dcontext, start_pc, instr) == NULL)
return NULL;
else
return (start_pc + sz);
}
/* otherwise it wasn't an indirect branch so continue */
}
if ((byte0 & 0xf0) == 0xc0) { /* check for returns */
byte nibble1 = (byte)(byte0 & 0x0f);
switch (nibble1) {
case 2: /* ret w/ 2-byte immed */
case 0xa: /* far ret w/ 2-byte immed */
/* we bailed out to decode() earlier if any prefixes */
CLIENT_ASSERT(prefixes == 0, "decode_cti: internal prefix error");
instr_set_opcode(instr, nibble1 == 2 ? OP_ret : OP_ret_far);
instr_set_num_opnds(dcontext, instr, 1, 3);
instr_set_dst(instr, 0, opnd_create_reg(REG_XSP));
instr_set_src(instr, 0, opnd_create_immed_int(*((short *)(pc + 1)), OPSZ_2));
instr_set_src(instr, 1, opnd_create_reg(REG_XSP));
instr_set_src(
instr, 2,
opnd_create_base_disp(
REG_XSP, REG_NULL, 0, 0,
resolve_variable_size_dc(
dcontext, 0, nibble1 == 2 ? OPSZ_ret : OPSZ_REXVARSTACK, false)));
instr_set_raw_bits(instr, start_pc, sz);
IF_X64(instr_set_rip_rel_pos(instr, rip_rel_pos));
return (pc + 3);
case 3: /* ret w/ no immed */
instr_set_opcode(instr, OP_ret);
instr_set_raw_bits(instr, start_pc, sz);
IF_X64(instr_set_rip_rel_pos(instr, rip_rel_pos));
/* we don't set any operands and leave to an up-decode for that */
return (pc + 1);
case 0xb: /* far ret w/ no immed */
instr_set_opcode(instr, OP_ret_far);
instr_set_raw_bits(instr, start_pc, sz);
IF_X64(instr_set_rip_rel_pos(instr, rip_rel_pos));
/* we don't set any operands and leave to an up-decode for that */
return (pc + 1);
}
/* otherwise it wasn't a return so continue */
}
if ((byte0 & 0xf0) == 0xe0) { /* check for a funny 8-bit branch */
byte nibble1 = (byte)(byte0 & 0x0f);
/* determine the opcode */
if (nibble1 == 0) { /* loopne */
instr_set_opcode(instr, OP_loopne);
} else if (nibble1 == 1) { /* loope */
instr_set_opcode(instr, OP_loope);
} else if (nibble1 == 2) { /* loop */
instr_set_opcode(instr, OP_loop);
} else if (nibble1 == 3) { /* jecxz */
instr_set_opcode(instr, OP_jecxz);
} else if (nibble1 == 10) { /* jmp_far */
/* we need prefix info (data size controls immediate offset size),
* this is rare so go ahead and do full decode
*/
if (decode(dcontext, start_pc, instr) == NULL)
return NULL;
else
return (start_pc + sz);
}
if (instr_opcode_valid(instr)) {
/* calculate the branch's target address */
app_pc tgt = convert_8bit_offset(pc, byte1, 2);
/* all (except jmp far) use ecx as a source */
instr_set_num_opnds(dcontext, instr, 0, 2);
/* if we made it here, no addr prefix, so REG_XCX not REG_ECX or REG_CX */
CLIENT_ASSERT(prefixes == 0, "decoding internal inconsistency");
instr_set_src(instr, 1, opnd_create_reg(REG_XCX));
instr_set_target(instr, opnd_create_pc(tgt));
instr_set_raw_bits(instr, start_pc, sz);
IF_X64(instr_set_rip_rel_pos(instr, rip_rel_pos));
return (pc + 2);
}
/* otherwise it wasn't a funny 8-bit cbr so continue */
}
if (byte0 == 0x9a) { /* check for far-absolute calls */
/* we need prefix info, this is rare so we do a full decode
*/
if (decode(dcontext, start_pc, instr) == NULL)
return NULL;
else
return (start_pc + sz);
}
/* both win32 and linux want to know about interrupts */
if (byte0 == 0xcd) { /* int */
instr_set_opcode(instr, OP_int);
instr_set_num_opnds(dcontext, instr, 2, 2);
instr_set_dst(instr, 0, opnd_create_reg(REG_XSP));
instr_set_dst(instr, 1, opnd_create_base_disp(REG_XSP, REG_NULL, 0, 0, OPSZ_4));
instr_set_src(instr, 0, opnd_create_immed_int((char)byte1, OPSZ_1));
instr_set_src(instr, 1, opnd_create_reg(REG_XSP));
instr_set_raw_bits(instr, start_pc, sz);
IF_X64(instr_set_rip_rel_pos(instr, rip_rel_pos));
return (pc + 2);
}
/* sys{enter,exit,call,ret} */
if (byte0 == 0x0f &&
(byte1 == 0x34 || byte1 == 0x35 || byte1 == 0x05 || byte1 == 0x07)) {
if (byte1 == 0x34) {
instr_set_opcode(instr, OP_sysenter);
instr_set_num_opnds(dcontext, instr, 1, 0);
instr_set_dst(instr, 0, opnd_create_reg(REG_XSP));
} else if (byte1 == 0x35) {
instr_set_opcode(instr, OP_sysexit);
instr_set_num_opnds(dcontext, instr, 1, 0);
instr_set_dst(instr, 0, opnd_create_reg(REG_XSP));
} else if (byte1 == 0x05) {
instr_set_opcode(instr, OP_syscall);
instr_set_num_opnds(dcontext, instr, 1, 0);
instr_set_dst(instr, 0, opnd_create_reg(REG_XCX));
} else if (byte1 == 0x07) {
instr_set_opcode(instr, OP_sysret);
instr_set_num_opnds(dcontext, instr, 0, 0);
}
instr_set_raw_bits(instr, start_pc, sz);
IF_X64(instr_set_rip_rel_pos(instr, rip_rel_pos));
return (pc + 2);
}
/* iret */
if (byte0 == 0xcf) {
instr_set_opcode(instr, OP_iret);
instr_set_raw_bits(instr, start_pc, sz);
IF_X64(instr_set_rip_rel_pos(instr, rip_rel_pos));
return (pc + 1);
}
/* popf */
if (byte0 == 0x9d) {
reg_id_t stack_sized_reg = REG_XSP;
#ifdef X64
if (dr_get_isa_mode(dcontext) == DR_ISA_IA32) {
stack_sized_reg = REG_ESP;
}
#endif
instr_set_opcode(instr, OP_popf);
instr_set_raw_bits(instr, start_pc, sz);
instr_set_num_opnds(dcontext, instr, 1, 2);
instr_set_src(instr, 0, opnd_create_reg(stack_sized_reg));
instr_set_src(
instr, 1,
opnd_create_base_disp(
stack_sized_reg, REG_NULL, 0, 0,
resolve_variable_size_dc(dcontext, prefixes, OPSZ_VARSTACK, false)));
instr_set_dst(instr, 0, opnd_create_reg(stack_sized_reg));
IF_X64(instr_set_rip_rel_pos(instr, rip_rel_pos));
return (pc + 1);
}
#ifdef UNIX
/* mov_seg instruction detection for i#107: mangling seg update/query. */
if (INTERNAL_OPTION(mangle_app_seg) && (byte0 == 0x8c || byte0 == 0x8e)) {
instr_set_opcode(instr, OP_mov_seg);
instr_set_raw_bits(instr, start_pc, sz);
IF_X64(instr_set_rip_rel_pos(instr, rip_rel_pos));
return (start_pc + sz);
}
#endif
/* i#698: we must intercept floating point instruction pointer saves.
* Rare enough that we do a full decode on an opcode match.
*/
if (intercept_fip_save(pc, byte0, byte1)) {
if (decode(dcontext, start_pc, instr) == NULL)
return NULL;
else
return (start_pc + sz);
}
/* all non-pc-relative instructions */
/* assumption: opcode already OP_UNDECODED */
instr_set_raw_bits(instr, start_pc, sz);
IF_X64(instr_set_rip_rel_pos(instr, rip_rel_pos));
/* assumption: operands are already marked invalid (instr was reset) */
return (start_pc + sz);
}
/* Returns a pointer to the pc of the next instruction
* Returns NULL on decoding an invalid instruction.
*/
byte *
decode_next_pc(dcontext_t *dcontext, byte *pc)
{
int sz = decode_sizeof(dcontext, pc, NULL _IF_X64(NULL));
if (sz == 0)
return NULL;
else
return pc + sz;
}
/* Decodes the size of the instruction at address pc and points instr
* at the raw bits for the instruction.
* This corresponds to a Level 1 decoding.
* Assumes that instr is already initialized, but uses the x86/x64 mode
* for the current thread rather than that set in instr.
* If caller is re-using same instr struct over multiple decodings,
* should call instr_reset or instr_reuse.
* Returns the address of the next byte after the decoded instruction.
* Returns NULL on decoding an invalid instr and sets opcode to OP_INVALID.
*/
byte *
decode_raw(dcontext_t *dcontext, byte *pc, instr_t *instr)
{
int sz = decode_sizeof(dcontext, pc, NULL _IF_X64(NULL));
IF_X64(instr_set_x86_mode(instr, get_x86_mode(dcontext)));
if (sz == 0) {
/* invalid instruction! */
instr_set_opcode(instr, OP_INVALID);
return NULL;
}
instr_set_opcode(instr, OP_UNDECODED);
instr_set_raw_bits(instr, pc, sz);
/* assumption: operands are already marked invalid (instr was reset) */
return (pc + sz);
}
| 1 | 14,978 | This is a nop: prob best for history to not change the line | DynamoRIO-dynamorio | c |
@@ -21,7 +21,8 @@ type TrivialTestSlasher struct {
SendCalls uint64
}
-// Slash is a required function for storageFaultSlasher interfaces and is intended to do nothing.
+// Slash is a required function for storageFaultSlasher interfaces and does nothing but track
+// how many times it's called.
func (ts *TrivialTestSlasher) Slash(context.Context, *types.BlockHeight) error {
ts.SendCalls++
return nil | 1 | package storage
import (
"context"
"github.com/filecoin-project/go-filecoin/types"
)
// FakeProver provides fake PoSt proofs for a miner.
type FakeProver struct{}
// CalculatePoSt returns a fixed fake proof.
func (p *FakeProver) CalculatePoSt(ctx context.Context, start, end *types.BlockHeight, inputs []PoStInputs) (*PoStSubmission, error) {
return &PoStSubmission{
Proofs: []types.PoStProof{[]byte("test proof")},
}, nil
}
// TrivialTestSlasher is a storage fault slasher that does nothing
type TrivialTestSlasher struct {
SendCalls uint64
}
// Slash is a required function for storageFaultSlasher interfaces and is intended to do nothing.
func (ts *TrivialTestSlasher) Slash(context.Context, *types.BlockHeight) error {
ts.SendCalls++
return nil
}
| 1 | 20,812 | Can you delete this whole file now? | filecoin-project-venus | go |
@@ -24,6 +24,7 @@ type ReturnedContract struct {
Erc721Token blockchain.Erc721Token
ArrDelete blockchain.ArrayDelete
ArrString blockchain.ArrayString
+ ArrPassing blockchain.ArrayPassing
}
// StartContracts deploys and starts fp token smart contract and stable token smart contract,erc721 token smart contract | 1 | package assetcontract
import (
"math/rand"
"strconv"
"strings"
"time"
"github.com/pkg/errors"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/tools/executiontester/blockchain"
)
const (
// ChainIP is the ip address of iotex api endpoint
chainIP = "localhost"
)
// ReturnedContract include all contract as return value
type ReturnedContract struct {
FpToken blockchain.FpToken
StbToken blockchain.StableToken
Erc721Token blockchain.Erc721Token
ArrDelete blockchain.ArrayDelete
ArrString blockchain.ArrayString
}
// StartContracts deploys and starts fp token smart contract and stable token smart contract,erc721 token smart contract
func StartContracts(cfg config.Config) (ret ReturnedContract, err error) {
endpoint := chainIP + ":" + strconv.Itoa(cfg.API.Port)
// deploy allowance sheet
allowance, err := deployContract(blockchain.AllowanceSheetBinary, endpoint)
if err != nil {
return
}
// deploy balance sheet
balance, err := deployContract(blockchain.BalanceSheetBinary, endpoint)
if err != nil {
return
}
// deploy registry
reg, err := deployContract(blockchain.RegistryBinary, endpoint)
if err != nil {
return
}
// deploy global pause
pause, err := deployContract(blockchain.GlobalPauseBinary, endpoint)
if err != nil {
return
}
// deploy stable token
stable, err := deployContract(blockchain.StableTokenBinary, endpoint)
if err != nil {
return
}
// create stable token
// TODO: query total supply and call stbToken.SetTotal()
ret.StbToken = blockchain.NewStableToken(endpoint).
SetAllowance(allowance).
SetBalance(balance).
SetRegistry(reg).
SetPause(pause).
SetStable(stable)
ret.StbToken.SetOwner(blockchain.Producer, blockchain.ProducerPrivKey)
// stable token set-up
if err = ret.StbToken.Start(); err != nil {
return
}
// deploy fp token
fpReg, err := deployContract(blockchain.FpRegistryBinary, endpoint)
if err != nil {
return
}
cdp, err := deployContract(blockchain.CdpManageBinary, endpoint)
if err != nil {
return
}
manage, err := deployContract(blockchain.ManageBinary, endpoint)
if err != nil {
return
}
proxy, err := deployContract(blockchain.ManageProxyBinary, endpoint)
if err != nil {
return
}
eap, err := deployContract(blockchain.EapStorageBinary, endpoint)
if err != nil {
return
}
riskLock, err := deployContract(blockchain.TokenRiskLockBinary, endpoint)
if err != nil {
return
}
// create fp token
ret.FpToken = blockchain.NewFpToken(endpoint).
SetManagement(manage).
SetManagementProxy(proxy).
SetEapStorage(eap).
SetRiskLock(riskLock).
SetRegistry(fpReg).
SetCdpManager(cdp).
SetStableToken(stable)
ret.FpToken.SetOwner(blockchain.Producer, blockchain.ProducerPrivKey)
// fp token set-up
if err = ret.FpToken.Start(); err != nil {
return
}
// erc721 token set-up
addr, err := deployContract(blockchain.Erc721Binary, endpoint)
if err != nil {
return
}
ret.Erc721Token = blockchain.NewErc721Token(endpoint)
ret.Erc721Token.SetAddress(addr)
ret.Erc721Token.SetOwner(blockchain.Producer, blockchain.ProducerPrivKey)
if err = ret.Erc721Token.Start(); err != nil {
return
}
// array-delete.sol set-up
addr, err = deployContract(blockchain.ArrayDeleteBin, endpoint)
if err != nil {
return
}
ret.ArrDelete = blockchain.NewArrayDelete(endpoint)
ret.ArrDelete.SetAddress(addr)
ret.ArrDelete.SetOwner(blockchain.Producer, blockchain.ProducerPrivKey)
if err = ret.ArrDelete.Start(); err != nil {
return
}
// array-of-strings.sol set-up
addr, err = deployContract(blockchain.ArrayStringBin, endpoint)
if err != nil {
return
}
ret.ArrString = blockchain.NewArrayString(endpoint)
ret.ArrString.SetAddress(addr)
ret.ArrString.SetOwner(blockchain.Producer, blockchain.ProducerPrivKey)
err = ret.ArrString.Start()
return
}
// GenerateAssetID generates an asset ID
func GenerateAssetID() string {
for {
id := strconv.Itoa(rand.Int())
if len(id) >= 8 {
// attach 8-digit YYYYMMDD at front
t := time.Now().Format(time.RFC3339)
t = strings.Replace(t, "-", "", -1)
return t[:8] + id[:8]
}
}
}
func deployContract(code, endpoint string, args ...[]byte) (string, error) {
// deploy the contract
contract := blockchain.NewContract(endpoint)
h, err := contract.
SetExecutor(blockchain.Producer).
SetPrvKey(blockchain.ProducerPrivKey).
Deploy(code, args...)
if err != nil {
return "", errors.Wrapf(err, "failed to deploy contract, txhash = %s", h)
}
receipt, err := contract.CheckCallResult(h)
if err != nil {
return h, errors.Wrapf(err, "check failed to deploy contract, txhash = %s", h)
}
return receipt.ContractAddress, nil
}
| 1 | 17,062 | can you combine this passing test, code, solidity binary into existing ArrDelete? the function is much similar, pushing int value into an array, and delete one item in the array | iotexproject-iotex-core | go |
@@ -340,10 +340,10 @@ public class QueueFragment extends Fragment {
SortOrder sortOrder = UserPreferences.getQueueKeepSortedOrder();
DBWriter.reorderQueue(sortOrder, true);
if (recyclerAdapter != null) {
- recyclerAdapter.setLocked(true);
+ recyclerAdapter.setKeepSorted(true);
}
} else if (recyclerAdapter != null) {
- recyclerAdapter.setLocked(UserPreferences.isQueueLocked());
+ recyclerAdapter.setKeepSorted(UserPreferences.isQueueKeepSorted());
}
getActivity().invalidateOptionsMenu();
return true; | 1 | package de.danoeh.antennapod.fragment;
import android.content.Context;
import android.content.DialogInterface;
import android.content.SharedPreferences;
import android.os.Bundle;
import android.util.Log;
import android.view.LayoutInflater;
import android.view.Menu;
import android.view.MenuInflater;
import android.view.MenuItem;
import android.view.View;
import android.view.ViewGroup;
import android.widget.CheckBox;
import android.widget.ProgressBar;
import android.widget.TextView;
import androidx.appcompat.app.AlertDialog;
import androidx.appcompat.app.AppCompatActivity;
import androidx.core.view.ViewCompat;
import androidx.fragment.app.Fragment;
import androidx.recyclerview.widget.ItemTouchHelper;
import androidx.recyclerview.widget.RecyclerView;
import androidx.recyclerview.widget.SimpleItemAnimator;
import com.google.android.material.snackbar.Snackbar;
import de.danoeh.antennapod.R;
import de.danoeh.antennapod.activity.MainActivity;
import de.danoeh.antennapod.adapter.QueueRecyclerAdapter;
import de.danoeh.antennapod.core.dialog.ConfirmationDialog;
import de.danoeh.antennapod.core.event.DownloadEvent;
import de.danoeh.antennapod.core.event.DownloaderUpdate;
import de.danoeh.antennapod.core.event.FeedItemEvent;
import de.danoeh.antennapod.core.event.PlaybackPositionEvent;
import de.danoeh.antennapod.core.event.PlayerStatusEvent;
import de.danoeh.antennapod.core.event.QueueEvent;
import de.danoeh.antennapod.core.event.UnreadItemsUpdateEvent;
import de.danoeh.antennapod.core.feed.FeedItem;
import de.danoeh.antennapod.core.feed.util.PlaybackSpeedUtils;
import de.danoeh.antennapod.core.preferences.UserPreferences;
import de.danoeh.antennapod.core.service.download.DownloadService;
import de.danoeh.antennapod.core.storage.DBReader;
import de.danoeh.antennapod.core.storage.DBWriter;
import de.danoeh.antennapod.core.storage.DownloadRequester;
import de.danoeh.antennapod.core.util.Converter;
import de.danoeh.antennapod.core.util.FeedItemUtil;
import de.danoeh.antennapod.core.util.SortOrder;
import de.danoeh.antennapod.core.util.download.AutoUpdateManager;
import de.danoeh.antennapod.dialog.EpisodesApplyActionFragment;
import de.danoeh.antennapod.menuhandler.FeedItemMenuHandler;
import de.danoeh.antennapod.menuhandler.MenuItemUtils;
import de.danoeh.antennapod.view.EmptyViewHandler;
import de.danoeh.antennapod.view.EpisodeItemListRecyclerView;
import de.danoeh.antennapod.view.viewholder.EpisodeItemViewHolder;
import io.reactivex.Observable;
import io.reactivex.android.schedulers.AndroidSchedulers;
import io.reactivex.disposables.Disposable;
import io.reactivex.schedulers.Schedulers;
import org.greenrobot.eventbus.EventBus;
import org.greenrobot.eventbus.Subscribe;
import org.greenrobot.eventbus.ThreadMode;
import java.util.List;
import java.util.Locale;
import static de.danoeh.antennapod.dialog.EpisodesApplyActionFragment.ACTION_DELETE;
import static de.danoeh.antennapod.dialog.EpisodesApplyActionFragment.ACTION_DOWNLOAD;
import static de.danoeh.antennapod.dialog.EpisodesApplyActionFragment.ACTION_REMOVE_FROM_QUEUE;
/**
* Shows all items in the queue.
*/
public class QueueFragment extends Fragment {
public static final String TAG = "QueueFragment";
private TextView infoBar;
private EpisodeItemListRecyclerView recyclerView;
private QueueRecyclerAdapter recyclerAdapter;
private EmptyViewHandler emptyView;
private ProgressBar progLoading;
private List<FeedItem> queue;
private boolean isUpdatingFeeds = false;
private static final String PREFS = "QueueFragment";
private static final String PREF_SHOW_LOCK_WARNING = "show_lock_warning";
private Disposable disposable;
private ItemTouchHelper itemTouchHelper;
private SharedPreferences prefs;
@Override
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setRetainInstance(true);
setHasOptionsMenu(true);
prefs = getActivity().getSharedPreferences(PREFS, Context.MODE_PRIVATE);
}
@Override
public void onStart() {
super.onStart();
if (queue != null) {
onFragmentLoaded(true);
}
loadItems(true);
EventBus.getDefault().register(this);
}
@Override
public void onPause() {
super.onPause();
recyclerView.saveScrollPosition(QueueFragment.TAG);
}
@Override
public void onStop() {
super.onStop();
EventBus.getDefault().unregister(this);
if (disposable != null) {
disposable.dispose();
}
}
@Subscribe(threadMode = ThreadMode.MAIN)
public void onEventMainThread(QueueEvent event) {
Log.d(TAG, "onEventMainThread() called with: " + "event = [" + event + "]");
if (queue == null) {
return;
} else if (recyclerAdapter == null) {
loadItems(true);
return;
}
switch(event.action) {
case ADDED:
queue.add(event.position, event.item);
recyclerAdapter.notifyItemInserted(event.position);
break;
case SET_QUEUE:
queue = event.items;
recyclerAdapter.notifyDataSetChanged();
break;
case REMOVED:
case IRREVERSIBLE_REMOVED:
int position = FeedItemUtil.indexOfItemWithId(queue, event.item.getId());
queue.remove(position);
recyclerAdapter.notifyItemRemoved(position);
break;
case CLEARED:
queue.clear();
recyclerAdapter.notifyDataSetChanged();
break;
case SORTED:
queue = event.items;
recyclerAdapter.notifyDataSetChanged();
break;
case MOVED:
return;
}
recyclerView.saveScrollPosition(QueueFragment.TAG);
onFragmentLoaded(false);
}
@Subscribe(threadMode = ThreadMode.MAIN)
public void onEventMainThread(FeedItemEvent event) {
Log.d(TAG, "onEventMainThread() called with: " + "event = [" + event + "]");
if (queue == null) {
return;
} else if (recyclerAdapter == null) {
loadItems(true);
return;
}
for (int i = 0, size = event.items.size(); i < size; i++) {
FeedItem item = event.items.get(i);
int pos = FeedItemUtil.indexOfItemWithId(queue, item.getId());
if (pos >= 0) {
queue.remove(pos);
queue.add(pos, item);
recyclerAdapter.notifyItemChangedCompat(pos);
refreshInfoBar();
}
}
}
@Subscribe(sticky = true, threadMode = ThreadMode.MAIN)
public void onEventMainThread(DownloadEvent event) {
Log.d(TAG, "onEventMainThread() called with DownloadEvent");
DownloaderUpdate update = event.update;
if (event.hasChangedFeedUpdateStatus(isUpdatingFeeds)) {
getActivity().invalidateOptionsMenu();
}
if (recyclerAdapter != null && update.mediaIds.length > 0) {
for (long mediaId : update.mediaIds) {
int pos = FeedItemUtil.indexOfItemWithMediaId(queue, mediaId);
if (pos >= 0) {
recyclerAdapter.notifyItemChangedCompat(pos);
}
}
}
}
@Subscribe(threadMode = ThreadMode.MAIN)
public void onEventMainThread(PlaybackPositionEvent event) {
if (recyclerAdapter != null) {
for (int i = 0; i < recyclerAdapter.getItemCount(); i++) {
EpisodeItemViewHolder holder = (EpisodeItemViewHolder)
recyclerView.findViewHolderForAdapterPosition(i);
if (holder != null && holder.isCurrentlyPlayingItem()) {
holder.notifyPlaybackPositionUpdated(event);
break;
}
}
}
}
@Subscribe(threadMode = ThreadMode.MAIN)
public void onPlayerStatusChanged(PlayerStatusEvent event) {
loadItems(false);
if (isUpdatingFeeds != updateRefreshMenuItemChecker.isRefreshing()) {
getActivity().supportInvalidateOptionsMenu();
}
}
@Subscribe(threadMode = ThreadMode.MAIN)
public void onUnreadItemsChanged(UnreadItemsUpdateEvent event) {
// Sent when playback position is reset
loadItems(false);
if (isUpdatingFeeds != updateRefreshMenuItemChecker.isRefreshing()) {
getActivity().supportInvalidateOptionsMenu();
}
}
private void resetViewState() {
recyclerAdapter = null;
}
@Override
public void onDestroyView() {
super.onDestroyView();
resetViewState();
}
private final MenuItemUtils.UpdateRefreshMenuItemChecker updateRefreshMenuItemChecker =
() -> DownloadService.isRunning && DownloadRequester.getInstance().isDownloadingFeeds();
@Override
public void onCreateOptionsMenu(Menu menu, MenuInflater inflater) {
if(!isAdded()) {
return;
}
super.onCreateOptionsMenu(menu, inflater);
if (queue != null) {
inflater.inflate(R.menu.queue, menu);
MenuItemUtils.setupSearchItem(menu, (MainActivity) getActivity(), 0);
MenuItemUtils.refreshLockItem(getActivity(), menu);
// Show Lock Item only if queue is sorted manually
boolean keepSorted = UserPreferences.isQueueKeepSorted();
MenuItem lockItem = menu.findItem(R.id.queue_lock);
lockItem.setVisible(!keepSorted);
// Random sort is not supported in keep sorted mode
MenuItem sortRandomItem = menu.findItem(R.id.queue_sort_random);
sortRandomItem.setVisible(!keepSorted);
// Set keep sorted checkbox
MenuItem keepSortedItem = menu.findItem(R.id.queue_keep_sorted);
keepSortedItem.setChecked(keepSorted);
isUpdatingFeeds = MenuItemUtils.updateRefreshMenuItem(menu, R.id.refresh_item, updateRefreshMenuItemChecker);
}
}
@Override
public boolean onOptionsItemSelected(MenuItem item) {
if (!super.onOptionsItemSelected(item)) {
switch (item.getItemId()) {
case R.id.queue_lock:
toggleQueueLock();
return true;
case R.id.refresh_item:
AutoUpdateManager.runImmediate(requireContext());
return true;
case R.id.clear_queue:
// make sure the user really wants to clear the queue
ConfirmationDialog conDialog = new ConfirmationDialog(getActivity(),
R.string.clear_queue_label,
R.string.clear_queue_confirmation_msg) {
@Override
public void onConfirmButtonPressed(
DialogInterface dialog) {
dialog.dismiss();
DBWriter.clearQueue();
}
};
conDialog.createNewDialog().show();
return true;
case R.id.episode_actions:
((MainActivity) requireActivity()).loadChildFragment(
EpisodesApplyActionFragment.newInstance(queue, ACTION_DELETE | ACTION_REMOVE_FROM_QUEUE | ACTION_DOWNLOAD));
return true;
case R.id.queue_sort_episode_title_asc:
setSortOrder(SortOrder.EPISODE_TITLE_A_Z);
return true;
case R.id.queue_sort_episode_title_desc:
setSortOrder(SortOrder.EPISODE_TITLE_Z_A);
return true;
case R.id.queue_sort_date_asc:
setSortOrder(SortOrder.DATE_OLD_NEW);
return true;
case R.id.queue_sort_date_desc:
setSortOrder(SortOrder.DATE_NEW_OLD);
return true;
case R.id.queue_sort_duration_asc:
setSortOrder(SortOrder.DURATION_SHORT_LONG);
return true;
case R.id.queue_sort_duration_desc:
setSortOrder(SortOrder.DURATION_LONG_SHORT);
return true;
case R.id.queue_sort_feed_title_asc:
setSortOrder(SortOrder.FEED_TITLE_A_Z);
return true;
case R.id.queue_sort_feed_title_desc:
setSortOrder(SortOrder.FEED_TITLE_Z_A);
return true;
case R.id.queue_sort_random:
setSortOrder(SortOrder.RANDOM);
return true;
case R.id.queue_sort_smart_shuffle_asc:
setSortOrder(SortOrder.SMART_SHUFFLE_OLD_NEW);
return true;
case R.id.queue_sort_smart_shuffle_desc:
setSortOrder(SortOrder.SMART_SHUFFLE_NEW_OLD);
return true;
case R.id.queue_keep_sorted:
boolean keepSortedOld = UserPreferences.isQueueKeepSorted();
boolean keepSortedNew = !keepSortedOld;
UserPreferences.setQueueKeepSorted(keepSortedNew);
if (keepSortedNew) {
SortOrder sortOrder = UserPreferences.getQueueKeepSortedOrder();
DBWriter.reorderQueue(sortOrder, true);
if (recyclerAdapter != null) {
recyclerAdapter.setLocked(true);
}
} else if (recyclerAdapter != null) {
recyclerAdapter.setLocked(UserPreferences.isQueueLocked());
}
getActivity().invalidateOptionsMenu();
return true;
default:
return false;
}
} else {
return true;
}
}
private void toggleQueueLock() {
boolean isLocked = UserPreferences.isQueueLocked();
if (isLocked) {
setQueueLocked(false);
} else {
boolean shouldShowLockWarning = prefs.getBoolean(PREF_SHOW_LOCK_WARNING, true);
if (!shouldShowLockWarning) {
setQueueLocked(true);
} else {
AlertDialog.Builder builder = new AlertDialog.Builder(getContext());
builder.setTitle(R.string.lock_queue);
builder.setMessage(R.string.queue_lock_warning);
View view = View.inflate(getContext(), R.layout.checkbox_do_not_show_again, null);
CheckBox checkDoNotShowAgain = view.findViewById(R.id.checkbox_do_not_show_again);
builder.setView(view);
builder.setPositiveButton(R.string.lock_queue, (dialog, which) -> {
prefs.edit().putBoolean(PREF_SHOW_LOCK_WARNING, !checkDoNotShowAgain.isChecked()).apply();
setQueueLocked(true);
});
builder.setNegativeButton(R.string.cancel_label, null);
builder.show();
}
}
}
private void setQueueLocked(boolean locked) {
UserPreferences.setQueueLocked(locked);
getActivity().supportInvalidateOptionsMenu();
if (recyclerAdapter != null) {
recyclerAdapter.setLocked(locked);
}
if (locked) {
((MainActivity) getActivity()).showSnackbarAbovePlayer(R.string.queue_locked, Snackbar.LENGTH_SHORT);
} else {
((MainActivity) getActivity()).showSnackbarAbovePlayer(R.string.queue_unlocked, Snackbar.LENGTH_SHORT);
}
}
/**
* This method is called if the user clicks on a sort order menu item.
*
* @param sortOrder New sort order.
*/
private void setSortOrder(SortOrder sortOrder) {
UserPreferences.setQueueKeepSortedOrder(sortOrder);
DBWriter.reorderQueue(sortOrder, true);
}
@Override
public boolean onContextItemSelected(MenuItem item) {
Log.d(TAG, "onContextItemSelected() called with: " + "item = [" + item + "]");
if(!isVisible()) {
return false;
}
FeedItem selectedItem = recyclerAdapter.getSelectedItem();
if (selectedItem == null) {
Log.i(TAG, "Selected item was null, ignoring selection");
return super.onContextItemSelected(item);
}
int position = FeedItemUtil.indexOfItemWithId(queue, selectedItem.getId());
if (position < 0) {
Log.i(TAG, "Selected item no longer exist, ignoring selection");
return super.onContextItemSelected(item);
}
switch(item.getItemId()) {
case R.id.move_to_top_item:
queue.add(0, queue.remove(position));
recyclerAdapter.notifyItemMoved(position, 0);
DBWriter.moveQueueItemToTop(selectedItem.getId(), true);
return true;
case R.id.move_to_bottom_item:
queue.add(queue.size()-1, queue.remove(position));
recyclerAdapter.notifyItemMoved(position, queue.size()-1);
DBWriter.moveQueueItemToBottom(selectedItem.getId(), true);
return true;
default:
return FeedItemMenuHandler.onMenuItemClicked(this, item.getItemId(), selectedItem);
}
}
@Override
public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) {
super.onCreateView(inflater, container, savedInstanceState);
View root = inflater.inflate(R.layout.queue_fragment, container, false);
((AppCompatActivity) getActivity()).setSupportActionBar(root.findViewById(R.id.toolbar));
infoBar = root.findViewById(R.id.info_bar);
recyclerView = root.findViewById(R.id.recyclerView);
RecyclerView.ItemAnimator animator = recyclerView.getItemAnimator();
if (animator instanceof SimpleItemAnimator) {
((SimpleItemAnimator) animator).setSupportsChangeAnimations(false);
}
recyclerView.setRecycledViewPool(((MainActivity) getActivity()).getRecycledViewPool());
registerForContextMenu(recyclerView);
itemTouchHelper = new ItemTouchHelper(
new ItemTouchHelper.SimpleCallback(ItemTouchHelper.UP | ItemTouchHelper.DOWN,
ItemTouchHelper.LEFT | ItemTouchHelper.RIGHT) {
// Position tracking whilst dragging
int dragFrom = -1;
int dragTo = -1;
@Override
public boolean onMove(RecyclerView recyclerView, RecyclerView.ViewHolder viewHolder,
RecyclerView.ViewHolder target) {
int fromPosition = viewHolder.getAdapterPosition();
int toPosition = target.getAdapterPosition();
// Update tracked position
if (dragFrom == -1) {
dragFrom = fromPosition;
}
dragTo = toPosition;
int from = viewHolder.getAdapterPosition();
int to = target.getAdapterPosition();
Log.d(TAG, "move(" + from + ", " + to + ") in memory");
if (from >= queue.size() || to >= queue.size()) {
return false;
}
queue.add(to, queue.remove(from));
recyclerAdapter.notifyItemMoved(from, to);
return true;
}
@Override
public void onSwiped(RecyclerView.ViewHolder viewHolder, int direction) {
if (disposable != null) {
disposable.dispose();
}
final int position = viewHolder.getAdapterPosition();
Log.d(TAG, "remove(" + position + ")");
final FeedItem item = queue.get(position);
final boolean isRead = item.isPlayed();
DBWriter.markItemPlayed(FeedItem.PLAYED, false, item.getId());
DBWriter.removeQueueItem(getActivity(), true, item);
((MainActivity) getActivity()).showSnackbarAbovePlayer(
item.hasMedia() ? R.string.marked_as_read_label : R.string.marked_as_read_no_media_label,
Snackbar.LENGTH_LONG)
.setAction(getString(R.string.undo), v -> {
DBWriter.addQueueItemAt(getActivity(), item.getId(), position, false);
if (!isRead) {
DBWriter.markItemPlayed(FeedItem.UNPLAYED, item.getId());
}
});
}
@Override
public boolean isLongPressDragEnabled() {
return false;
}
@Override
public boolean isItemViewSwipeEnabled() {
return !UserPreferences.isQueueLocked();
}
@Override
public void clearView(RecyclerView recyclerView, RecyclerView.ViewHolder viewHolder) {
super.clearView(recyclerView, viewHolder);
// Check if drag finished
if (dragFrom != -1 && dragTo != -1 && dragFrom != dragTo) {
reallyMoved(dragFrom, dragTo);
}
dragFrom = dragTo = -1;
}
private void reallyMoved(int from, int to) {
// Write drag operation to database
Log.d(TAG, "Write to database move(" + from + ", " + to + ")");
DBWriter.moveQueueItem(from, to, true);
}
}
);
itemTouchHelper.attachToRecyclerView(recyclerView);
emptyView = new EmptyViewHandler(getContext());
emptyView.attachToRecyclerView(recyclerView);
emptyView.setIcon(R.attr.stat_playlist);
emptyView.setTitle(R.string.no_items_header_label);
emptyView.setMessage(R.string.no_items_label);
progLoading = root.findViewById(R.id.progLoading);
progLoading.setVisibility(View.VISIBLE);
return root;
}
private void onFragmentLoaded(final boolean restoreScrollPosition) {
if (queue != null && queue.size() > 0) {
if (recyclerAdapter == null) {
MainActivity activity = (MainActivity) getActivity();
recyclerAdapter = new QueueRecyclerAdapter(activity, itemTouchHelper);
recyclerView.setAdapter(recyclerAdapter);
emptyView.updateAdapter(recyclerAdapter);
}
recyclerAdapter.updateItems(queue);
recyclerView.setVisibility(View.VISIBLE);
} else {
recyclerAdapter = null;
recyclerView.setVisibility(View.GONE);
emptyView.updateAdapter(recyclerAdapter);
}
if (restoreScrollPosition) {
recyclerView.restoreScrollPosition(QueueFragment.TAG);
}
// we need to refresh the options menu because it sometimes
// needs data that may have just been loaded.
getActivity().supportInvalidateOptionsMenu();
refreshInfoBar();
}
private void refreshInfoBar() {
String info = String.format(Locale.getDefault(), "%d%s",
queue.size(), getString(R.string.episodes_suffix));
if (queue.size() > 0) {
long timeLeft = 0;
for (FeedItem item : queue) {
float playbackSpeed = 1;
if (UserPreferences.timeRespectsSpeed()) {
playbackSpeed = PlaybackSpeedUtils.getCurrentPlaybackSpeed(item.getMedia());
}
if (item.getMedia() != null) {
long itemTimeLeft = item.getMedia().getDuration() - item.getMedia().getPosition();
timeLeft += itemTimeLeft / playbackSpeed;
}
}
info += " • ";
info += getString(R.string.time_left_label);
info += Converter.getDurationStringLocalized(getActivity(), timeLeft);
}
infoBar.setText(info);
}
private void loadItems(final boolean restoreScrollPosition) {
Log.d(TAG, "loadItems()");
if (disposable != null) {
disposable.dispose();
}
if (queue == null) {
recyclerView.setVisibility(View.GONE);
emptyView.hide();
progLoading.setVisibility(View.VISIBLE);
}
disposable = Observable.fromCallable(DBReader::getQueue)
.subscribeOn(Schedulers.io())
.observeOn(AndroidSchedulers.mainThread())
.subscribe(items -> {
progLoading.setVisibility(View.GONE);
queue = items;
onFragmentLoaded(restoreScrollPosition);
if (recyclerAdapter != null) {
recyclerAdapter.notifyDataSetChanged();
}
}, error -> Log.e(TAG, Log.getStackTraceString(error)));
}
}
| 1 | 16,639 | I think that could lead to problems when setting it to not sorted again. In that case, it will not be reset. What about using one single method for the adapter (`setDragDropEnabled`)? | AntennaPod-AntennaPod | java |
@@ -47,8 +47,8 @@ func handleSequelProCommand(appLocation string) (string, error) {
return "", err
}
- if app.SiteStatus() != "running" {
- return "", errors.New("app not running locally. Try `ddev start`")
+ if app.SiteStatus() != platform.SiteRunning {
+ return "", errors.New("App not running locally. Try `ddev start`")
}
db, err := app.FindContainerByType("db") | 1 | package cmd
import (
"fmt"
"log"
"os"
"os/exec"
"path/filepath"
"strconv"
"runtime"
"github.com/drud/ddev/pkg/appports"
"github.com/drud/ddev/pkg/dockerutil"
"github.com/drud/ddev/pkg/plugins/platform"
"github.com/drud/ddev/pkg/util"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
// SequelproLoc is where we expect to find the sequel pro.app
// It's global so it can be mocked in testing.
var SequelproLoc = "/Applications/sequel pro.app"
// localDevSequelproCmd represents the sequelpro command
var localDevSequelproCmd = &cobra.Command{
Use: "sequelpro",
Short: "Easily connect local site to sequelpro",
Long: `A helper command for easily using sequelpro (OSX database browser) with a ddev app that has been initialized locally.`,
Run: func(cmd *cobra.Command, args []string) {
if len(args) != 0 {
log.Fatalf("invalid arguments to sequelpro command: %v", args)
}
out, err := handleSequelProCommand(SequelproLoc)
if err != nil {
log.Fatalf("Could not run sequelpro command: %s", err)
}
util.Success(out)
},
}
// handleSequelProCommand() is the "real" handler for the real command
func handleSequelProCommand(appLocation string) (string, error) {
app, err := getActiveApp("")
if err != nil {
return "", err
}
if app.SiteStatus() != "running" {
return "", errors.New("app not running locally. Try `ddev start`")
}
db, err := app.FindContainerByType("db")
if err != nil {
return "", err
}
dbPrivatePort, err := strconv.ParseInt(appports.GetPort("db"), 10, 64)
if err != nil {
return "", err
}
dbPublishPort := fmt.Sprint(dockerutil.GetPublishedPort(dbPrivatePort, db))
tmpFilePath := filepath.Join(app.AppRoot(), ".ddev/sequelpro.spf")
tmpFile, err := os.Create(tmpFilePath)
if err != nil {
log.Fatalln(err)
}
defer util.CheckClose(tmpFile)
_, err = tmpFile.WriteString(fmt.Sprintf(
platform.SequelproTemplate,
"db", //dbname
"127.0.0.1", //host
app.HostName(), //connection name
"db", // dbpass
dbPublishPort, // port
"db", //dbuser
))
util.CheckErr(err)
err = exec.Command("open", tmpFilePath).Run()
if err != nil {
return "", err
}
return "sequelpro command finished successfully!", nil
}
// dummyDevSequelproCmd represents the "not available" sequelpro command
var dummyDevSequelproCmd = &cobra.Command{
Use: "sequelpro",
Short: "This command is not available since sequel pro.app is not installed",
Long: `Where installed, "ddev sequelpro" launches the sequel pro database browser`,
Run: func(cmd *cobra.Command, args []string) {
util.Failed("The sequelpro command is not available because sequel pro.app is not detected on your workstation")
},
}
// init installs the real command if it's available, otherwise dummy command (if on OSX), otherwise no command
func init() {
switch {
case detectSequelpro():
RootCmd.AddCommand(localDevSequelproCmd)
case runtime.GOOS == "darwin":
RootCmd.AddCommand(dummyDevSequelproCmd)
}
}
// detectSequelpro looks for the sequel pro app in /Applications; returns true if found
func detectSequelpro() bool {
if _, err := os.Stat(SequelproLoc); err == nil {
return true
}
return false
}
| 1 | 11,413 | I'd say the error should be an error, not instructions to the user. So error would be something like "site should be running and is not" | drud-ddev | go |
@@ -60,11 +60,12 @@ const (
// 9) Add 'ipToTask' map to state file
// 10) Add 'healthCheckType' field in 'apicontainer.Container'
// 11)
- // a) Add 'PrivateDNSName' field to 'api.ENI'
- // b)Remove `AppliedStatus` field form 'apicontainer.Container'
+ // a) Add 'PrivateDNSName' field to 'api.ENI'
+ // b)Remove `AppliedStatus` field form 'apicontainer.Container'
// 12) Deprecate 'TransitionDependencySet' and add new 'TransitionDependenciesMap' in 'apicontainer.Container'
// 13) Add 'resources' field to 'api.task.task'
- ECSDataVersion = 13
+ // 14) Add 'V3EndpointID' field to 'Container' struct
+ ECSDataVersion = 14
// ecsDataFile specifies the filename in the ECS_DATADIR
ecsDataFile = "ecs_agent_data.json" | 1 | // Copyright 2014-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
// Package statemanager implements simple constructs for saving and restoring
// state from disk.
// It provides the interface for a StateManager which can read/write arbitrary
// json data from/to disk.
package statemanager
import (
"encoding/json"
"errors"
"os"
"strconv"
"sync"
"time"
"github.com/aws/amazon-ecs-agent/agent/config"
"github.com/aws/amazon-ecs-agent/agent/logger"
)
const (
// ECSDataVersion is the current version of saved data. Any backwards or
// forwards incompatible changes to the data-format should increment this number
// and retain the ability to read old data versions.
// Version changes:
// 1) initial
// 2)
// a) Add 'ACSSeqNum' top level field (backwards compatible; technically
// forwards compatible but could cause resource constraint violations)
// b) remove 'DEAD', 'UNKNOWN' state from ever being marshalled (backward and
// forward compatible)
// 3) Add 'Protocol' field to 'portMappings' and 'KnownPortBindings'
// 4) Add 'DockerConfig' struct
// 5) Add 'ImageStates' struct as part of ImageManager
// 6)
// a) Refactor 'Internal' field in 'apicontainer.Container' to 'Type' enum
// b) Add 'ContainerResourcesProvisioned' as a new 'ContainerStatus' enum
// c) Add 'SteadyStateStatus' field to 'Container' struct
// d) Add 'ENIAttachments' struct
// e) Deprecate 'SteadyStateDependencies' in favor of 'TransitionDependencySet'
// 7)
// a) Add 'MetadataUpdated' field to 'apicontainer.Container'
// b) Add 'DomainNameServers' and 'DomainNameSearchList' in `api.ENI`
// 8)
// a) Add 'UseExecutionRole' in `api.ECRAuthData`
// b) Add `executionCredentialsID` in `apitask.Task`
// c) Add 'LogsAuthStrategy' field to 'apicontainer.Container'
// d) Added task cgroup related fields ('CPU', 'Memory', 'MemoryCPULimitsEnabled') to 'apitask.Task'
// 9) Add 'ipToTask' map to state file
// 10) Add 'healthCheckType' field in 'apicontainer.Container'
// 11)
// a) Add 'PrivateDNSName' field to 'api.ENI'
// b)Remove `AppliedStatus` field form 'apicontainer.Container'
// 12) Deprecate 'TransitionDependencySet' and add new 'TransitionDependenciesMap' in 'apicontainer.Container'
// 13) Add 'resources' field to 'api.task.task'
ECSDataVersion = 13
// ecsDataFile specifies the filename in the ECS_DATADIR
ecsDataFile = "ecs_agent_data.json"
// minSaveInterval specifies how frequently to flush to disk
minSaveInterval = 10 * time.Second
)
var log = logger.ForModule("statemanager")
// Saveable types should be able to be json serializable and deserializable
// Properly, this should have json.Marshaler/json.Unmarshaler here, but string
// and so on can be marshaled/unmarshaled sanely but don't fit those interfaces.
type Saveable interface{}
// Saver is a type that can be saved
type Saver interface {
Save() error
ForceSave() error
}
// Option functions are functions that may be used as part of constructing a new
// StateManager
type Option func(StateManager)
type saveableState map[string]*Saveable
type intermediateSaveableState map[string]json.RawMessage
// State is a struct of all data that should be saveable/loadable to disk. Each
// element should be json-serializable.
//
// Note, changing this to work with BinaryMarshaler or another more compact
// format would be fine, but everything already needs a json representation
// since that's our wire format and the extra space taken / IO-time is expected
// to be fairly negligible.
type state struct {
Data saveableState
Version int
}
type intermediateState struct {
Data intermediateSaveableState
}
type versionOnlyState struct {
Version int
}
type platformDependencies interface{}
// A StateManager can load and save state from disk.
// Load is not expected to return an error if there is no state to load.
type StateManager interface {
Saver
Load() error
}
type basicStateManager struct {
statePath string // The path to a file in which state can be serialized
state *state // pointers to the data we should save / load into
saveTimesLock sync.Mutex // guards save times
lastSave time.Time //the last time a save completed
nextPlannedSave time.Time //the next time a save is planned
savingLock sync.Mutex // guards marshal, write, move (on Linux), and load (on Windows)
platformDependencies platformDependencies // platform-specific dependencies
}
// NewStateManager constructs a new StateManager which saves data at the
// location specified in cfg and operates under the given options.
// The returned StateManager will not save more often than every 10 seconds and
// will not reliably return errors with Save, but will log them appropriately.
func NewStateManager(cfg *config.Config, options ...Option) (StateManager, error) {
fi, err := os.Stat(cfg.DataDir)
if err != nil {
return nil, err
}
if !fi.IsDir() {
return nil, errors.New("State manager DataDir must exist")
}
state := &state{
Data: make(saveableState),
Version: ECSDataVersion,
}
manager := &basicStateManager{
statePath: cfg.DataDir,
state: state,
}
for _, option := range options {
option(manager)
}
manager.platformDependencies = newPlatformDependencies()
return manager, nil
}
// AddSaveable is an option that adds a given saveable as one that should be saved
// under the given name. The name must be the same across uses of the
// statemanager (e.g. program invocations) for it to be serialized and
// deserialized correctly.
func AddSaveable(name string, saveable Saveable) Option {
return (Option)(func(m StateManager) {
manager, ok := m.(*basicStateManager)
if !ok {
log.Crit("Unable to add to state manager; unknown instantiation")
return
}
manager.state.Data[name] = &saveable
})
}
// Save triggers a save to file, though respects a minimum save interval to wait
// between saves.
func (manager *basicStateManager) Save() error {
manager.saveTimesLock.Lock()
defer manager.saveTimesLock.Unlock()
if time.Since(manager.lastSave) >= minSaveInterval {
// we can just save
err := manager.ForceSave()
manager.lastSave = time.Now()
manager.nextPlannedSave = time.Time{} // re-zero it; assume all pending desires to save are fulfilled
return err
} else if manager.nextPlannedSave.IsZero() {
// No save planned yet, we should plan one.
next := manager.lastSave.Add(minSaveInterval)
manager.nextPlannedSave = next
go func() {
time.Sleep(next.Sub(time.Now()))
manager.Save()
}()
}
// else nextPlannedSave wasn't Zero so there's a save planned elsewhere that'll
// fulfill this
return nil
}
// ForceSave saves the given State to a file. It is an atomic operation on POSIX
// systems (by Renaming over the target file).
// This function logs errors at will and does not necessarily expect the caller
// to handle the error because there's little a caller can do in general other
// than just keep going.
// In addition, the StateManager internally buffers save requests in order to
// only save at most every STATE_SAVE_INTERVAL.
func (manager *basicStateManager) ForceSave() error {
manager.savingLock.Lock()
defer manager.savingLock.Unlock()
log.Info("Saving state!")
s := manager.state
s.Version = ECSDataVersion
data, err := json.Marshal(s)
if err != nil {
log.Error("Error saving state; could not marshal data; this is odd", "err", err)
return err
}
return manager.writeFile(data)
}
// Load reads state off the disk from the well-known filepath and loads it into
// the passed State object.
func (manager *basicStateManager) Load() error {
s := manager.state
log.Info("Loading state!")
data, err := manager.readFile()
if err != nil {
log.Error("Error reading existing state file", "err", err)
return err
}
if data == nil {
return nil
}
// Dry-run to make sure this is a version we can understand
err = manager.dryRun(data)
if err != nil {
return err
}
// Now load it into the actual state. The reason we do this with the
// intermediate state is that we *must* unmarshal directly into the
// "saveable" pointers we were given in AddSaveable; if we unmarshal
// directly into a map with values of pointers, those pointers are lost.
// We *must* unmarshal this way because the existing pointers could have
// semi-initialized data (and are actually expected to)
var intermediate intermediateState
err = json.Unmarshal(data, &intermediate)
if err != nil {
log.Debug("Could not unmarshal into intermediate")
return err
}
for key, rawJSON := range intermediate.Data {
actualPointer, ok := manager.state.Data[key]
if !ok {
log.Error("Loading state: potentially malformed json key of " + key)
continue
}
err = json.Unmarshal(rawJSON, actualPointer)
if err != nil {
log.Debug("Could not unmarshal into actual")
return err
}
}
log.Debug("Loaded state!", "state", s)
return nil
}
func (manager *basicStateManager) dryRun(data []byte) error {
// Dry-run to make sure this is a version we can understand
tmps := versionOnlyState{}
err := json.Unmarshal(data, &tmps)
if err != nil {
log.Crit("Could not unmarshal existing state; corrupted data?", "err", err, "data", data)
return err
}
if tmps.Version > ECSDataVersion {
strversion := strconv.Itoa(tmps.Version)
return errors.New("Unsupported data format: Version " + strversion + " not " + strconv.Itoa(ECSDataVersion))
}
return nil
}
| 1 | 20,396 | Are you sure `v3EndpointIDToContainerName` and `v3EndpointIDToTask` are saved in the state file, can you verify that? | aws-amazon-ecs-agent | go |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.