patch
stringlengths
17
31.2k
y
int64
1
1
oldf
stringlengths
0
2.21M
idx
int64
1
1
id
int64
4.29k
68.4k
msg
stringlengths
8
843
proj
stringclasses
212 values
lang
stringclasses
9 values
@@ -87,7 +87,7 @@ public class ProtoCodecBenchmarks { @Benchmark public List<Span> bytebuffer_zipkinDecoder() { - return SpanBytesDecoder.PROTO3.decodeList(ByteBufUtil.getBytes(encodedBuf)); + return SpanBytesDecoder.PROTO3.decodeList(encodedBuf.nioBuffer()); } @Benchmark
1
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package zipkin2.codec; import com.google.common.io.Resources; import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufUtil; import io.netty.buffer.PooledByteBufAllocator; import java.io.IOException; import java.util.Collections; import java.util.List; import java.util.concurrent.TimeUnit; import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.BenchmarkMode; import org.openjdk.jmh.annotations.Fork; import org.openjdk.jmh.annotations.Measurement; import org.openjdk.jmh.annotations.Mode; import org.openjdk.jmh.annotations.OutputTimeUnit; import org.openjdk.jmh.annotations.Scope; import org.openjdk.jmh.annotations.Setup; import org.openjdk.jmh.annotations.State; import org.openjdk.jmh.annotations.TearDown; import org.openjdk.jmh.annotations.Threads; import org.openjdk.jmh.annotations.Warmup; import org.openjdk.jmh.runner.Runner; import org.openjdk.jmh.runner.options.Options; import org.openjdk.jmh.runner.options.OptionsBuilder; import zipkin2.Span; @Measurement(iterations = 5, time = 1) @Warmup(iterations = 10, time = 1) @Fork(3) @BenchmarkMode(Mode.SampleTime) @OutputTimeUnit(TimeUnit.MICROSECONDS) @State(Scope.Thread) @Threads(1) public class ProtoCodecBenchmarks { static final byte[] clientSpanJsonV2 = read("/zipkin2-client.json"); static final Span clientSpan = SpanBytesDecoder.JSON_V2.decodeOne(clientSpanJsonV2); // Assume a message is 1000 spans (which is a high number for as this is per-node-second) static final List<Span> spans = Collections.nCopies(1000, clientSpan); static final byte[] encodedBytes = SpanBytesEncoder.PROTO3.encodeList(spans); private ByteBuf encodedBuf; @Setup public void setup() { encodedBuf = PooledByteBufAllocator.DEFAULT.buffer(encodedBytes.length); encodedBuf.writeBytes(encodedBytes); } @TearDown public void tearDown() { encodedBuf.release(); } @Benchmark public List<Span> bytes_zipkinDecoder() { return SpanBytesDecoder.PROTO3.decodeList(encodedBytes); } @Benchmark public List<Span> bytes_protobufDecoder() { return ProtobufSpanDecoder.decodeList(encodedBytes); } @Benchmark public List<Span> bytes_wireDecoder() { return WireSpanDecoder.decodeList(encodedBytes); } @Benchmark public List<Span> bytebuffer_zipkinDecoder() { return SpanBytesDecoder.PROTO3.decodeList(ByteBufUtil.getBytes(encodedBuf)); } @Benchmark public List<Span> bytebuffer_protobufDecoder() { return ProtobufSpanDecoder.decodeList(encodedBuf.nioBuffer()); } @Benchmark public List<Span> bytebuffer_wireDecoder() { return WireSpanDecoder.decodeList(encodedBuf.nioBuffer()); } // Convenience main entry-point public static void main(String[] args) throws Exception { Options opt = new OptionsBuilder() .include(".*" + ProtoCodecBenchmarks.class.getSimpleName() + ".*bytes.*") .addProfiler("gc") .build(); new Runner(opt).run(); } static byte[] read(String resource) { try { return Resources.toByteArray(Resources.getResource(CodecBenchmarks.class, resource)); } catch (IOException e) { throw new IllegalStateException(e); } } }
1
14,921
here is us decoding directly the bytebuffer in benchmarks
openzipkin-zipkin
java
@@ -39,7 +39,16 @@ namespace Datadog.Trace.Logging } else { - SetDefaultValues(); + // Do not set default values because the Tracer (and this event + // subscriber) may be initialized at a time when it is not safe + // to add properties to the MDC of the underlying logging framework. + // + // This previously caused a SerializationException when the Tracer + // was initialized in the Asp.Net HttpModule startup, the default + // values were added to the underlying log4net framework, and, + // upon handling a new request, Asp.Net tried to copy the type + // log4net.Util.PropertiesDictionary from one AppDomain to another + // even though the type did not implement MarshalObjectByRef _scopeManager.SpanActivated += MapOnSpanActivated; _scopeManager.TraceEnded += MapOnTraceEnded; }
1
using System; using System.Collections.Concurrent; using System.Diagnostics; using Datadog.Trace.Logging.LogProviders; namespace Datadog.Trace.Logging { /// <summary> /// Subscriber to ScopeManager events that sets/unsets correlation identifier /// properties in the application's logging context. /// </summary> internal class LibLogScopeEventSubscriber : IDisposable { private readonly IScopeManager _scopeManager; private readonly ILogProvider _logProvider; // Each mapped context sets a key-value pair into the logging context // Disposing the returned context unsets the key-value pair // Keep a stack to retain the history of our correlation identifier properties // (the stack is particularly important for Serilog, see below). // // IMPORTANT: Serilog -- The logging contexts (throughout the entire application) // are maintained in a stack, as opposed to a map, and must be closed // in reverse-order of opening. When operating on the stack-based model, // it is only valid to add the properties once unset them once. private readonly ConcurrentStack<IDisposable> _contextDisposalStack = new ConcurrentStack<IDisposable>(); public LibLogScopeEventSubscriber(IScopeManager scopeManager) { _scopeManager = scopeManager; _logProvider = LogProvider.CurrentLogProvider ?? LogProvider.ResolveLogProvider(); if (_logProvider is SerilogLogProvider) { // Do not set default values for Serilog because it is unsafe to set // except at the application startup, but this would require auto-instrumentation _scopeManager.SpanOpened += StackOnSpanOpened; _scopeManager.SpanClosed += StackOnSpanClosed; } else { SetDefaultValues(); _scopeManager.SpanActivated += MapOnSpanActivated; _scopeManager.TraceEnded += MapOnTraceEnded; } } public void StackOnSpanOpened(object sender, SpanEventArgs spanEventArgs) { SetCorrelationIdentifierContext(spanEventArgs.Span.TraceId, spanEventArgs.Span.SpanId); } public void StackOnSpanClosed(object sender, SpanEventArgs spanEventArgs) { RemoveLastCorrelationIdentifierContext(); } public void MapOnSpanActivated(object sender, SpanEventArgs spanEventArgs) { RemoveAllCorrelationIdentifierContexts(); SetCorrelationIdentifierContext(spanEventArgs.Span.TraceId, spanEventArgs.Span.SpanId); } public void MapOnTraceEnded(object sender, SpanEventArgs spanEventArgs) { RemoveAllCorrelationIdentifierContexts(); SetDefaultValues(); } public void Dispose() { if (_logProvider is SerilogLogProvider) { _scopeManager.SpanOpened -= StackOnSpanOpened; _scopeManager.SpanClosed -= StackOnSpanClosed; } else { _scopeManager.SpanActivated -= MapOnSpanActivated; _scopeManager.TraceEnded -= MapOnTraceEnded; } RemoveAllCorrelationIdentifierContexts(); } private void SetDefaultValues() { SetCorrelationIdentifierContext(0, 0); } private void RemoveLastCorrelationIdentifierContext() { // TODO: Debug logs for (int i = 0; i < 2; i++) { if (_contextDisposalStack.TryPop(out IDisposable ctxDisposable)) { ctxDisposable.Dispose(); } else { // There is nothing left to pop so do nothing. // Though we are in a strange circumstance if we did not balance // the stack properly Debug.Fail($"{nameof(RemoveLastCorrelationIdentifierContext)} call failed. Too few items on the context stack."); } } } private void RemoveAllCorrelationIdentifierContexts() { // TODO: Debug logs while (_contextDisposalStack.TryPop(out IDisposable ctxDisposable)) { ctxDisposable.Dispose(); } } private void SetCorrelationIdentifierContext(ulong traceId, ulong spanId) { // TODO: Debug logs _contextDisposalStack.Push( LogProvider.OpenMappedContext( CorrelationIdentifier.TraceIdKey, traceId.ToString(), destructure: false)); _contextDisposalStack.Push( LogProvider.OpenMappedContext( CorrelationIdentifier.SpanIdKey, spanId.ToString(), destructure: false)); } } }
1
16,093
When the Tracer is first created AND `DD_LOGS_INJECTION=true`, this is invoked and causes the issues described.
DataDog-dd-trace-dotnet
.cs
@@ -44,7 +44,7 @@ public class JsonRpcErrorConverter { case GAS_PRICE_TOO_LOW: return JsonRpcError.GAS_PRICE_TOO_LOW; case PRIVACY_GROUP_DOES_NOT_EXIST: - return JsonRpcError.PRIVACY_GROUP_DOES_NOT_EXIST; + return JsonRpcError.OFFCHAIN_PRIVACY_GROUP_DOES_NOT_EXIST; default: return JsonRpcError.INVALID_PARAMS;
1
/* * Copyright ConsenSys AG. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. * * SPDX-License-Identifier: Apache-2.0 */ package org.hyperledger.besu.ethereum.api.jsonrpc; import org.hyperledger.besu.ethereum.api.jsonrpc.internal.response.JsonRpcError; import org.hyperledger.besu.ethereum.mainnet.TransactionValidator.TransactionInvalidReason; public class JsonRpcErrorConverter { public static JsonRpcError convertTransactionInvalidReason( final TransactionInvalidReason reason) { switch (reason) { case NONCE_TOO_LOW: case PRIVATE_NONCE_TOO_LOW: return JsonRpcError.NONCE_TOO_LOW; case INCORRECT_NONCE: case INCORRECT_PRIVATE_NONCE: return JsonRpcError.INCORRECT_NONCE; case INVALID_SIGNATURE: return JsonRpcError.INVALID_TRANSACTION_SIGNATURE; case INTRINSIC_GAS_EXCEEDS_GAS_LIMIT: return JsonRpcError.INTRINSIC_GAS_EXCEEDS_LIMIT; case UPFRONT_COST_EXCEEDS_BALANCE: return JsonRpcError.TRANSACTION_UPFRONT_COST_EXCEEDS_BALANCE; case EXCEEDS_BLOCK_GAS_LIMIT: return JsonRpcError.EXCEEDS_BLOCK_GAS_LIMIT; case TX_SENDER_NOT_AUTHORIZED: return JsonRpcError.TX_SENDER_NOT_AUTHORIZED; // Private Transaction Invalid Reasons case CHAIN_HEAD_WORLD_STATE_NOT_AVAILABLE: return JsonRpcError.CHAIN_HEAD_WORLD_STATE_NOT_AVAILABLE; case GAS_PRICE_TOO_LOW: return JsonRpcError.GAS_PRICE_TOO_LOW; case PRIVACY_GROUP_DOES_NOT_EXIST: return JsonRpcError.PRIVACY_GROUP_DOES_NOT_EXIST; default: return JsonRpcError.INVALID_PARAMS; } } }
1
21,927
NIT: If we are renaming the JsonRpcError, should we also rename this enum value to be consistent?
hyperledger-besu
java
@@ -572,7 +572,8 @@ type FunctionExpression struct { Defaults *ObjectExpression Block *Block - typ MonoType + typ MonoType + Vectorized Expression } func (*FunctionExpression) NodeType() string { return "FunctionExpression" }
1
package semantic import ( "regexp" "time" "github.com/influxdata/flux/ast" ) type Node interface { node() NodeType() string Copy() Node Location() ast.SourceLocation } type Loc ast.SourceLocation func (l Loc) Location() ast.SourceLocation { return ast.SourceLocation(l) } func (*Package) node() {} func (*File) node() {} func (*Block) node() {} func (*PackageClause) node() {} func (*ImportDeclaration) node() {} func (*OptionStatement) node() {} func (*BuiltinStatement) node() {} func (*TestStatement) node() {} func (*ExpressionStatement) node() {} func (*ReturnStatement) node() {} func (*MemberAssignment) node() {} func (*NativeVariableAssignment) node() {} func (*StringExpression) node() {} func (*ArrayExpression) node() {} func (*DictExpression) node() {} func (*FunctionExpression) node() {} func (*BinaryExpression) node() {} func (*CallExpression) node() {} func (*ConditionalExpression) node() {} func (*IdentifierExpression) node() {} func (*LogicalExpression) node() {} func (*MemberExpression) node() {} func (*IndexExpression) node() {} func (*ObjectExpression) node() {} func (*UnaryExpression) node() {} func (*Identifier) node() {} func (*Property) node() {} func (*TextPart) node() {} func (*InterpolatedPart) node() {} func (*FunctionParameters) node() {} func (*FunctionParameter) node() {} func (*BooleanLiteral) node() {} func (*DateTimeLiteral) node() {} func (*DurationLiteral) node() {} func (*FloatLiteral) node() {} func (*IntegerLiteral) node() {} func (*StringLiteral) node() {} func (*RegexpLiteral) node() {} func (*UnsignedIntegerLiteral) node() {} type Statement interface { Node stmt() } func (*OptionStatement) stmt() {} func (*BuiltinStatement) stmt() {} func (*TestStatement) stmt() {} func (*ExpressionStatement) stmt() {} func (*ReturnStatement) stmt() {} func (*NativeVariableAssignment) stmt() {} func (*MemberAssignment) stmt() {} type Assignment interface { Statement assignment() } func (*MemberAssignment) assignment() {} func (*NativeVariableAssignment) assignment() {} type Expression interface { Node expression() TypeOf() MonoType } func (*StringExpression) expression() {} func (*ArrayExpression) expression() {} func (*DictExpression) expression() {} func (*BinaryExpression) expression() {} func (*BooleanLiteral) expression() {} func (*CallExpression) expression() {} func (*ConditionalExpression) expression() {} func (*DateTimeLiteral) expression() {} func (*DurationLiteral) expression() {} func (*FloatLiteral) expression() {} func (*FunctionExpression) expression() {} func (*IdentifierExpression) expression() {} func (*IntegerLiteral) expression() {} func (*LogicalExpression) expression() {} func (*MemberExpression) expression() {} func (*IndexExpression) expression() {} func (*ObjectExpression) expression() {} func (*RegexpLiteral) expression() {} func (*StringLiteral) expression() {} func (*UnaryExpression) expression() {} func (*UnsignedIntegerLiteral) expression() {} type Literal interface { Expression literal() } func (*BooleanLiteral) literal() {} func (*DateTimeLiteral) literal() {} func (*DurationLiteral) literal() {} func (*FloatLiteral) literal() {} func (*IntegerLiteral) literal() {} func (*RegexpLiteral) literal() {} func (*StringLiteral) literal() {} func (*UnsignedIntegerLiteral) literal() {} type PropertyKey interface { Node Key() string } func (n *Identifier) Key() string { return n.Name } func (n *StringLiteral) Key() string { return n.Value } type Package struct { Loc Package string Files []*File } func (*Package) NodeType() string { return "Package" } func (p *Package) Copy() Node { if p == nil { return p } np := new(Package) *np = *p if len(p.Files) > 0 { np.Files = make([]*File, len(p.Files)) for i, f := range p.Files { np.Files[i] = f.Copy().(*File) } } return np } type File struct { Loc Package *PackageClause Imports []*ImportDeclaration Body []Statement } func (*File) NodeType() string { return "File" } func (p *File) Copy() Node { if p == nil { return p } np := new(File) *np = *p if len(p.Body) > 0 { np.Body = make([]Statement, len(p.Body)) for i, s := range p.Body { np.Body[i] = s.Copy().(Statement) } } return np } type PackageClause struct { Loc Name *Identifier } func (*PackageClause) NodeType() string { return "PackageClause" } func (p *PackageClause) Copy() Node { if p == nil { return p } np := new(PackageClause) *np = *p np.Name = p.Name.Copy().(*Identifier) return np } type ImportDeclaration struct { Loc As *Identifier Path *StringLiteral } func (*ImportDeclaration) NodeType() string { return "ImportDeclaration" } func (d *ImportDeclaration) Copy() Node { if d == nil { return d } nd := new(ImportDeclaration) *nd = *d nd.As = d.As.Copy().(*Identifier) nd.Path = d.Path.Copy().(*StringLiteral) return nd } type Block struct { Loc Body []Statement } func (*Block) NodeType() string { return "Block" } func (s *Block) ReturnStatement() *ReturnStatement { return s.Body[len(s.Body)-1].(*ReturnStatement) } func (s *Block) Copy() Node { if s == nil { return s } ns := new(Block) *ns = *s if len(s.Body) > 0 { ns.Body = make([]Statement, len(s.Body)) for i, stmt := range s.Body { ns.Body[i] = stmt.Copy().(Statement) } } return ns } type OptionStatement struct { Loc Assignment Assignment } func (s *OptionStatement) NodeType() string { return "OptionStatement" } func (s *OptionStatement) Copy() Node { if s == nil { return s } ns := new(OptionStatement) *ns = *s ns.Assignment = s.Assignment.Copy().(Assignment) return ns } type BuiltinStatement struct { Loc ID *Identifier } func (s *BuiltinStatement) NodeType() string { return "BuiltinStatement" } func (s *BuiltinStatement) Copy() Node { if s == nil { return s } ns := new(BuiltinStatement) *ns = *s ns.ID = s.ID.Copy().(*Identifier) return ns } type TestStatement struct { Loc Assignment *NativeVariableAssignment } func (s *TestStatement) NodeType() string { return "TestStatement" } func (s *TestStatement) Copy() Node { if s == nil { return s } ns := new(TestStatement) *ns = *s ns.Assignment = s.Assignment.Copy().(*NativeVariableAssignment) return ns } type ExpressionStatement struct { Loc Expression Expression } func (*ExpressionStatement) NodeType() string { return "ExpressionStatement" } func (s *ExpressionStatement) Copy() Node { if s == nil { return s } ns := new(ExpressionStatement) *ns = *s ns.Expression = s.Expression.Copy().(Expression) return ns } type ReturnStatement struct { Loc Argument Expression } func (*ReturnStatement) NodeType() string { return "ReturnStatement" } func (s *ReturnStatement) Copy() Node { if s == nil { return s } ns := new(ReturnStatement) *ns = *s ns.Argument = s.Argument.Copy().(Expression) return ns } type NativeVariableAssignment struct { Loc Identifier *Identifier Init Expression Typ PolyType } func (*NativeVariableAssignment) NodeType() string { return "NativeVariableAssignment" } func (s *NativeVariableAssignment) Copy() Node { if s == nil { return s } ns := new(NativeVariableAssignment) *ns = *s ns.Identifier = s.Identifier.Copy().(*Identifier) if s.Init != nil { ns.Init = s.Init.Copy().(Expression) } return ns } type MemberAssignment struct { Loc Member *MemberExpression Init Expression } func (*MemberAssignment) NodeType() string { return "MemberAssignment" } func (s *MemberAssignment) Copy() Node { if s == nil { return s } ns := new(MemberAssignment) *ns = *s if s.Member != nil { ns.Member = s.Member.Copy().(*MemberExpression) } if s.Init != nil { ns.Init = s.Init.Copy().(Expression) } return ns } type StringExpression struct { Loc Parts []StringExpressionPart } func (*StringExpression) NodeType() string { return "StringExpression" } func (e *StringExpression) Copy() Node { if e == nil { return e } ne := new(StringExpression) *ne = *e parts := make([]StringExpressionPart, len(e.Parts)) for i, p := range e.Parts { parts[i] = p.Copy().(StringExpressionPart) } ne.Parts = parts return ne } func (e *StringExpression) TypeOf() MonoType { return BasicString } type StringExpressionPart interface { Node stringPart() } func (*TextPart) stringPart() {} func (*InterpolatedPart) stringPart() {} type TextPart struct { Loc Value string } func (*TextPart) NodeType() string { return "TextPart" } func (p *TextPart) Copy() Node { if p == nil { return p } np := new(TextPart) *np = *p return np } type InterpolatedPart struct { Loc Expression Expression } func (*InterpolatedPart) NodeType() string { return "InterpolatedPart" } func (p *InterpolatedPart) Copy() Node { if p == nil { return p } np := new(InterpolatedPart) *np = *p if p.Expression != nil { np.Expression = p.Expression.Copy().(Expression) } return np } type ArrayExpression struct { Loc Elements []Expression Type MonoType } func (*ArrayExpression) NodeType() string { return "ArrayExpression" } func (e *ArrayExpression) Copy() Node { if e == nil { return e } ne := new(ArrayExpression) *ne = *e if len(e.Elements) > 0 { ne.Elements = make([]Expression, len(e.Elements)) for i, elem := range e.Elements { ne.Elements[i] = elem.Copy().(Expression) } } return ne } func (e *ArrayExpression) TypeOf() MonoType { return e.Type } type DictExpression struct { Loc Elements []struct { Key Expression Val Expression } Type MonoType } func (*DictExpression) NodeType() string { return "DictExpression" } func (e *DictExpression) Copy() Node { if e == nil { return e } ne := new(DictExpression) *ne = *e if len(e.Elements) > 0 { ne.Elements = make([]struct { Key Expression Val Expression }, len(e.Elements)) for i, elem := range e.Elements { ne.Elements[i] = struct { Key Expression Val Expression }{ Key: elem.Key.Copy().(Expression), Val: elem.Val.Copy().(Expression), } } } return ne } func (e *DictExpression) TypeOf() MonoType { return e.Type } // FunctionExpression represents the definition of a function type FunctionExpression struct { Loc Parameters *FunctionParameters Defaults *ObjectExpression Block *Block typ MonoType } func (*FunctionExpression) NodeType() string { return "FunctionExpression" } func (e *FunctionExpression) Copy() Node { if e == nil { return e } ne := new(FunctionExpression) *ne = *e if e.Parameters != nil { ne.Parameters = e.Parameters.Copy().(*FunctionParameters) } if e.Defaults != nil { ne.Defaults = e.Defaults.Copy().(*ObjectExpression) } ne.Block = e.Block.Copy().(*Block) return ne } func (e *FunctionExpression) TypeOf() MonoType { return e.typ } // GetFunctionBodyExpression will return the return value expression from // the function block. This will only return an expression if there // is exactly one expression in the block. It will return false // as the second argument if the statement is more complex. func (e *FunctionExpression) GetFunctionBodyExpression() (Expression, bool) { if len(e.Block.Body) != 1 { return nil, false } returnExpr, ok := e.Block.Body[0].(*ReturnStatement) if !ok { return nil, false } return returnExpr.Argument, true } // FunctionParameters represents the list of function parameters and which if any parameter is the pipe parameter. type FunctionParameters struct { Loc List []*FunctionParameter Pipe *Identifier } func (*FunctionParameters) NodeType() string { return "FunctionParameters" } func (p *FunctionParameters) Copy() Node { if p == nil { return p } np := new(FunctionParameters) *np = *p if len(p.List) > 0 { np.List = make([]*FunctionParameter, len(p.List)) for i, k := range p.List { np.List[i] = k.Copy().(*FunctionParameter) } } if p.Pipe != nil { np.Pipe = p.Pipe.Copy().(*Identifier) } return np } // FunctionParameter represents a function parameter. type FunctionParameter struct { Loc Key *Identifier } func (*FunctionParameter) NodeType() string { return "FunctionParameter" } func (p *FunctionParameter) Copy() Node { if p == nil { return p } np := new(FunctionParameter) *np = *p np.Key = p.Key.Copy().(*Identifier) return np } type BinaryExpression struct { Loc Operator ast.OperatorKind Left Expression Right Expression typ MonoType } func (*BinaryExpression) NodeType() string { return "BinaryExpression" } func (e *BinaryExpression) Copy() Node { if e == nil { return e } ne := new(BinaryExpression) *ne = *e ne.Left = e.Left.Copy().(Expression) ne.Right = e.Right.Copy().(Expression) return ne } func (e *BinaryExpression) TypeOf() MonoType { return e.typ } type CallExpression struct { Loc Callee Expression Arguments *ObjectExpression Pipe Expression typ MonoType } func (*CallExpression) NodeType() string { return "CallExpression" } func (e *CallExpression) Copy() Node { if e == nil { return e } ne := new(CallExpression) *ne = *e ne.Callee = e.Callee.Copy().(Expression) ne.Arguments = e.Arguments.Copy().(*ObjectExpression) if e.Pipe != nil { ne.Pipe = e.Pipe.Copy().(Expression) } return ne } func (e *CallExpression) TypeOf() MonoType { return e.typ } type ConditionalExpression struct { Loc Test Expression Alternate Expression Consequent Expression } func (*ConditionalExpression) NodeType() string { return "ConditionalExpression" } func (e *ConditionalExpression) Copy() Node { if e == nil { return e } ne := new(ConditionalExpression) *ne = *e ne.Test = e.Test.Copy().(Expression) ne.Alternate = e.Alternate.Copy().(Expression) ne.Consequent = e.Consequent.Copy().(Expression) return ne } func (e *ConditionalExpression) TypeOf() MonoType { return e.Alternate.TypeOf() } type LogicalExpression struct { Loc Operator ast.LogicalOperatorKind Left Expression Right Expression } func (*LogicalExpression) NodeType() string { return "LogicalExpression" } func (e *LogicalExpression) Copy() Node { if e == nil { return e } ne := new(LogicalExpression) *ne = *e ne.Left = e.Left.Copy().(Expression) ne.Right = e.Right.Copy().(Expression) return ne } func (e *LogicalExpression) TypeOf() MonoType { return BasicBool } type MemberExpression struct { Loc Object Expression Property string typ MonoType } func (*MemberExpression) NodeType() string { return "MemberExpression" } func (e *MemberExpression) Copy() Node { if e == nil { return e } ne := new(MemberExpression) *ne = *e ne.Object = e.Object.Copy().(Expression) return ne } func (e *MemberExpression) TypeOf() MonoType { return e.typ } type IndexExpression struct { Loc Array Expression Index Expression typ MonoType } func (*IndexExpression) NodeType() string { return "IndexExpression" } func (e *IndexExpression) Copy() Node { if e == nil { return e } ne := new(IndexExpression) *ne = *e ne.Array = e.Array.Copy().(Expression) ne.Index = e.Index.Copy().(Expression) return ne } func (e *IndexExpression) TypeOf() MonoType { return e.typ } type ObjectExpression struct { Loc With *IdentifierExpression Properties []*Property typ MonoType } func (*ObjectExpression) NodeType() string { return "ObjectExpression" } func (e *ObjectExpression) Copy() Node { if e == nil { return e } ne := new(ObjectExpression) *ne = *e ne.With = e.With.Copy().(*IdentifierExpression) if len(e.Properties) > 0 { ne.Properties = make([]*Property, len(e.Properties)) for i, prop := range e.Properties { ne.Properties[i] = prop.Copy().(*Property) } } return ne } func (e *ObjectExpression) TypeOf() MonoType { return e.typ } type UnaryExpression struct { Loc Operator ast.OperatorKind Argument Expression typ MonoType } func (*UnaryExpression) NodeType() string { return "UnaryExpression" } func (e *UnaryExpression) Copy() Node { if e == nil { return e } ne := new(UnaryExpression) *ne = *e ne.Argument = e.Argument.Copy().(Expression) return ne } func (e *UnaryExpression) TypeOf() MonoType { return e.typ } type Property struct { Loc Key PropertyKey Value Expression } func (*Property) NodeType() string { return "Property" } func (p *Property) Copy() Node { if p == nil { return p } np := new(Property) *np = *p np.Value = p.Value.Copy().(Expression) return np } type IdentifierExpression struct { Loc Name string typ MonoType } func (*IdentifierExpression) NodeType() string { return "IdentifierExpression" } func (e *IdentifierExpression) Copy() Node { if e == nil { return e } ne := new(IdentifierExpression) *ne = *e return ne } func (e *IdentifierExpression) TypeOf() MonoType { return e.typ } type Identifier struct { Loc Name string } func (*Identifier) NodeType() string { return "Identifier" } func (i *Identifier) Copy() Node { if i == nil { return i } ni := new(Identifier) *ni = *i return ni } type BooleanLiteral struct { Loc Value bool } func (*BooleanLiteral) NodeType() string { return "BooleanLiteral" } func (l *BooleanLiteral) Copy() Node { if l == nil { return l } nl := new(BooleanLiteral) *nl = *l return nl } func (e *BooleanLiteral) TypeOf() MonoType { return BasicBool } type DateTimeLiteral struct { Loc Value time.Time } func (*DateTimeLiteral) NodeType() string { return "DateTimeLiteral" } func (l *DateTimeLiteral) Copy() Node { if l == nil { return l } nl := new(DateTimeLiteral) *nl = *l return nl } func (e *DateTimeLiteral) TypeOf() MonoType { return BasicTime } type DurationLiteral struct { Loc Values []ast.Duration } func (*DurationLiteral) NodeType() string { return "DurationLiteral" } func (l *DurationLiteral) Copy() Node { if l == nil { return l } nl := new(DurationLiteral) *nl = *l return nl } func (e *DurationLiteral) TypeOf() MonoType { return BasicDuration } type IntegerLiteral struct { Loc Value int64 } func (*IntegerLiteral) NodeType() string { return "IntegerLiteral" } func (l *IntegerLiteral) Copy() Node { if l == nil { return l } nl := new(IntegerLiteral) *nl = *l return nl } func (e *IntegerLiteral) TypeOf() MonoType { return BasicInt } type FloatLiteral struct { Loc Value float64 } func (*FloatLiteral) NodeType() string { return "FloatLiteral" } func (l *FloatLiteral) Copy() Node { if l == nil { return l } nl := new(FloatLiteral) *nl = *l return nl } func (e *FloatLiteral) TypeOf() MonoType { return BasicFloat } type RegexpLiteral struct { Loc Value *regexp.Regexp } func (*RegexpLiteral) NodeType() string { return "RegexpLiteral" } func (l *RegexpLiteral) Copy() Node { if l == nil { return l } nl := new(RegexpLiteral) *nl = *l nl.Value = l.Value return nl } func (e *RegexpLiteral) TypeOf() MonoType { return BasicRegexp } type StringLiteral struct { Loc Value string } func (*StringLiteral) NodeType() string { return "StringLiteral" } func (l *StringLiteral) Copy() Node { if l == nil { return l } nl := new(StringLiteral) *nl = *l return nl } func (e *StringLiteral) TypeOf() MonoType { return BasicString } type UnsignedIntegerLiteral struct { Loc Value uint64 } func (*UnsignedIntegerLiteral) NodeType() string { return "UnsignedIntegerLiteral" } func (l *UnsignedIntegerLiteral) Copy() Node { if l == nil { return l } nl := new(UnsignedIntegerLiteral) *nl = *l return nl } func (e *UnsignedIntegerLiteral) TypeOf() MonoType { return BasicUint }
1
17,287
This could be made into a function expression, yes?
influxdata-flux
go
@@ -9,5 +9,5 @@ void release_assert_internal (bool check, const char * check_expr, const char * } std::cerr << "Assertion (" << check_expr << ") failed " << file << ":" << line << std::endl; - abort(); + abort (); }
1
#include <iostream> #include <rai/lib/utility.hpp> void release_assert_internal (bool check, const char * check_expr, const char * file, unsigned int line) { if (check) { return; } std::cerr << "Assertion (" << check_expr << ") failed " << file << ":" << line << std::endl; abort(); }
1
14,081
Oops, I missed this initially.
nanocurrency-nano-node
cpp
@@ -243,6 +243,12 @@ class TestLambdaBaseFeatures(unittest.TestCase): lambda_client.delete_function(FunctionName=lambda_name) def test_add_lambda_permission(self): + function_name = 'lambda_func-{}'.format(short_uid()) + testutil.create_lambda_function( + handler_file=TEST_LAMBDA_ECHO_FILE, + func_name=function_name, + runtime=LAMBDA_RUNTIME_PYTHON36 + ) iam_client = aws_stack.connect_to_service('iam') lambda_client = aws_stack.connect_to_service('lambda')
1
import re import os import json import shutil import time import unittest import six import base64 from botocore.exceptions import ClientError from io import BytesIO from localstack import config from localstack.constants import LOCALSTACK_MAVEN_VERSION, LOCALSTACK_ROOT_FOLDER, LAMBDA_TEST_ROLE from localstack.services.awslambda.lambda_executors import LAMBDA_RUNTIME_PYTHON37, LAMBDA_RUNTIME_NODEJS12X from localstack.utils import testutil from localstack.utils.testutil import ( get_lambda_log_events, check_expected_lambda_log_events_length, create_lambda_archive ) from localstack.utils.kinesis import kinesis_connector from localstack.utils.aws import aws_stack from localstack.utils.common import ( unzip, new_tmp_dir, short_uid, load_file, to_str, mkdir, download, save_file, run_safe, get_free_tcp_port, get_service_protocol, retry, to_bytes, cp_r ) from localstack.services.infra import start_proxy from localstack.services.install import INSTALL_PATH_LOCALSTACK_FAT_JAR from localstack.services.awslambda import lambda_api, lambda_executors from localstack.services.generic_proxy import ProxyListener from localstack.services.awslambda.lambda_api import ( LAMBDA_RUNTIME_DOTNETCORE2, LAMBDA_RUNTIME_DOTNETCORE31, LAMBDA_RUNTIME_RUBY25, LAMBDA_RUNTIME_PYTHON27, use_docker, LAMBDA_RUNTIME_PYTHON36, LAMBDA_RUNTIME_JAVA8, LAMBDA_RUNTIME_NODEJS810, LAMBDA_RUNTIME_PROVIDED, BATCH_SIZE_RANGES, INVALID_PARAMETER_VALUE_EXCEPTION, LAMBDA_DEFAULT_HANDLER) from .lambdas import lambda_integration THIS_FOLDER = os.path.dirname(os.path.realpath(__file__)) TEST_LAMBDA_PYTHON = os.path.join(THIS_FOLDER, 'lambdas', 'lambda_integration.py') TEST_LAMBDA_PYTHON_ECHO = os.path.join(THIS_FOLDER, 'lambdas', 'lambda_echo.py') TEST_LAMBDA_PYTHON3 = os.path.join(THIS_FOLDER, 'lambdas', 'lambda_python3.py') TEST_LAMBDA_NODEJS = os.path.join(THIS_FOLDER, 'lambdas', 'lambda_integration.js') TEST_LAMBDA_RUBY = os.path.join(THIS_FOLDER, 'lambdas', 'lambda_integration.rb') TEST_LAMBDA_DOTNETCORE2 = os.path.join(THIS_FOLDER, 'lambdas', 'dotnetcore2', 'dotnetcore2.zip') TEST_LAMBDA_DOTNETCORE31 = os.path.join(THIS_FOLDER, 'lambdas', 'dotnetcore31', 'dotnetcore31.zip') TEST_LAMBDA_CUSTOM_RUNTIME = os.path.join(THIS_FOLDER, 'lambdas', 'custom-runtime') TEST_LAMBDA_JAVA = os.path.join(LOCALSTACK_ROOT_FOLDER, 'localstack', 'infra', 'localstack-utils-tests.jar') TEST_LAMBDA_JAVA_WITH_LIB = os.path.join(THIS_FOLDER, 'lambdas', 'java', 'lambda-function-with-lib-0.0.1.jar') TEST_LAMBDA_ENV = os.path.join(THIS_FOLDER, 'lambdas', 'lambda_environment.py') TEST_LAMBDA_PYTHON3_MULTIPLE_CREATE1 = os.path.join(THIS_FOLDER, 'lambdas', 'python3', 'lambda1', 'lambda1.zip') TEST_LAMBDA_PYTHON3_MULTIPLE_CREATE2 = os.path.join(THIS_FOLDER, 'lambdas', 'python3', 'lambda2', 'lambda2.zip') TEST_LAMBDA_NAME_PY = 'test_lambda_py' TEST_LAMBDA_NAME_PY3 = 'test_lambda_py3' TEST_LAMBDA_NAME_JS = 'test_lambda_js' TEST_LAMBDA_NAME_RUBY = 'test_lambda_ruby' TEST_LAMBDA_NAME_DOTNETCORE2 = 'test_lambda_dotnetcore2' TEST_LAMBDA_NAME_DOTNETCORE31 = 'test_lambda_dotnetcore31' TEST_LAMBDA_NAME_CUSTOM_RUNTIME = 'test_lambda_custom_runtime' TEST_LAMBDA_NAME_JAVA = 'test_lambda_java' TEST_LAMBDA_NAME_JAVA_STREAM = 'test_lambda_java_stream' TEST_LAMBDA_NAME_JAVA_SERIALIZABLE = 'test_lambda_java_serializable' TEST_LAMBDA_NAME_JAVA_KINESIS = 'test_lambda_java_kinesis' TEST_LAMBDA_NAME_ENV = 'test_lambda_env' TEST_LAMBDA_ECHO_FILE = os.path.join(THIS_FOLDER, 'lambdas', 'lambda_echo.py') TEST_LAMBDA_SEND_MESSAGE_FILE = os.path.join(THIS_FOLDER, 'lambdas', 'lambda_send_message.py') TEST_LAMBDA_PUT_ITEM_FILE = os.path.join(THIS_FOLDER, 'lambdas', 'lambda_put_item.py') TEST_LAMBDA_START_EXECUTION_FILE = os.path.join(THIS_FOLDER, 'lambdas', 'lambda_start_execution.py') TEST_LAMBDA_FUNCTION_PREFIX = 'lambda-function' TEST_SNS_TOPIC_NAME = 'sns-topic-1' MAVEN_BASE_URL = 'https://repo.maven.apache.org/maven2' TEST_LAMBDA_JAR_URL = '{url}/cloud/localstack/{name}/{version}/{name}-{version}-tests.jar'.format( version=LOCALSTACK_MAVEN_VERSION, url=MAVEN_BASE_URL, name='localstack-utils') TEST_LAMBDA_LIBS = [ 'localstack', 'localstack_client', 'requests', 'psutil', 'urllib3', 'chardet', 'certifi', 'idna', 'pip', 'dns' ] def _run_forward_to_fallback_url(url, num_requests=3): lambda_client = aws_stack.connect_to_service('lambda') config.LAMBDA_FALLBACK_URL = url try: for i in range(num_requests): lambda_client.invoke(FunctionName='non-existing-lambda-%s' % i, Payload=b'{}', InvocationType='RequestResponse') finally: config.LAMBDA_FALLBACK_URL = '' class LambdaTestBase(unittest.TestCase): def test_create_lambda_function(self): func_name = 'lambda_func-{}'.format(short_uid()) kms_key_arn = 'arn:aws:kms:us-east-1:000000000000:key11' vpc_config = { 'SubnetIds': ['subnet-123456789'], 'SecurityGroupIds': ['sg-123456789'] } tags = { 'env': 'testing' } kwargs = { 'FunctionName': func_name, 'Runtime': LAMBDA_RUNTIME_PYTHON37, 'Handler': LAMBDA_DEFAULT_HANDLER, 'Role': LAMBDA_TEST_ROLE, 'KMSKeyArn': kms_key_arn, 'Code': { 'ZipFile': create_lambda_archive(load_file(TEST_LAMBDA_PYTHON_ECHO), get_content=True) }, 'Timeout': 3, 'VpcConfig': vpc_config, 'Tags': tags } client = aws_stack.connect_to_service('lambda') client.create_function(**kwargs) rs = client.get_function( FunctionName=func_name ) self.assertEqual(rs['Configuration'].get('KMSKeyArn', ''), kms_key_arn) self.assertEqual(rs['Configuration'].get('VpcConfig', {}), vpc_config) self.assertEqual(rs['Tags'], tags) client.delete_function(FunctionName=func_name) def check_lambda_logs(self, func_name, expected_lines=[]): log_events = LambdaTestBase.get_lambda_logs(func_name) log_messages = [e['message'] for e in log_events] for line in expected_lines: if '.*' in line: found = [re.match(line, m) for m in log_messages] if any(found): continue self.assertIn(line, log_messages) @staticmethod def get_lambda_logs(func_name): logs_client = aws_stack.connect_to_service('logs') log_group_name = '/aws/lambda/%s' % func_name streams = logs_client.describe_log_streams(logGroupName=log_group_name)['logStreams'] streams = sorted(streams, key=lambda x: x['creationTime'], reverse=True) log_events = logs_client.get_log_events( logGroupName=log_group_name, logStreamName=streams[0]['logStreamName'])['events'] return log_events class TestLambdaBaseFeatures(unittest.TestCase): def test_forward_to_fallback_url_dynamodb(self): db_table = 'lambda-records' ddb_client = aws_stack.connect_to_service('dynamodb') def num_items(): return len((run_safe(ddb_client.scan, TableName=db_table) or {'Items': []})['Items']) items_before = num_items() _run_forward_to_fallback_url('dynamodb://%s' % db_table) items_after = num_items() self.assertEqual(items_after, items_before + 3) def test_forward_to_fallback_url_http(self): class MyUpdateListener(ProxyListener): def forward_request(self, method, path, data, headers): records.append({'data': data, 'headers': headers}) return 200 records = [] local_port = get_free_tcp_port() proxy = start_proxy(local_port, backend_url=None, update_listener=MyUpdateListener()) items_before = len(records) _run_forward_to_fallback_url('%s://localhost:%s' % (get_service_protocol(), local_port)) items_after = len(records) for record in records: self.assertIn('non-existing-lambda', record['headers']['lambda-function-name']) self.assertEqual(items_after, items_before + 3) proxy.stop() def test_adding_fallback_function_name_in_headers(self): lambda_client = aws_stack.connect_to_service('lambda') ddb_client = aws_stack.connect_to_service('dynamodb') db_table = 'lambda-records' config.LAMBDA_FALLBACK_URL = 'dynamodb://%s' % db_table lambda_client.invoke(FunctionName='non-existing-lambda', Payload=b'{}', InvocationType='RequestResponse') result = run_safe(ddb_client.scan, TableName=db_table) self.assertEqual(result['Items'][0]['function_name']['S'], 'non-existing-lambda') def test_dead_letter_queue(self): sqs_client = aws_stack.connect_to_service('sqs') lambda_client = aws_stack.connect_to_service('lambda') # create DLQ and Lambda function queue_name = 'test-%s' % short_uid() lambda_name = 'test-%s' % short_uid() queue_url = sqs_client.create_queue(QueueName=queue_name)['QueueUrl'] queue_arn = aws_stack.sqs_queue_arn(queue_name) testutil.create_lambda_function( handler_file=TEST_LAMBDA_PYTHON, func_name=lambda_name, libs=TEST_LAMBDA_LIBS, runtime=LAMBDA_RUNTIME_PYTHON36, DeadLetterConfig={'TargetArn': queue_arn} ) # invoke Lambda, triggering an error payload = { lambda_integration.MSG_BODY_RAISE_ERROR_FLAG: 1 } lambda_client.invoke(FunctionName=lambda_name, Payload=json.dumps(payload), InvocationType='Event') # assert that message has been received on the DLQ def receive_dlq(): result = sqs_client.receive_message(QueueUrl=queue_url, MessageAttributeNames=['All']) self.assertGreater(len(result['Messages']), 0) msg_attrs = result['Messages'][0]['MessageAttributes'] self.assertIn('RequestID', msg_attrs) self.assertIn('ErrorCode', msg_attrs) self.assertIn('ErrorMessage', msg_attrs) retry(receive_dlq, retries=8, sleep=2) # update DLQ config lambda_client.update_function_configuration(FunctionName=lambda_name, DeadLetterConfig={}) # invoke Lambda again, assert that status code is 200 and error details contained in the payload result = lambda_client.invoke(FunctionName=lambda_name, Payload=json.dumps(payload)) payload = json.loads(to_str(result['Payload'].read())) self.assertEqual(200, result['StatusCode']) self.assertEqual('Unhandled', result['FunctionError']) self.assertEqual('$LATEST', result['ExecutedVersion']) self.assertIn('Test exception', payload['errorMessage']) self.assertEqual('Exception', payload['errorType']) self.assertEqual(list, type(payload['stackTrace'])) # clean up sqs_client.delete_queue(QueueUrl=queue_url) lambda_client.delete_function(FunctionName=lambda_name) def test_add_lambda_permission(self): iam_client = aws_stack.connect_to_service('iam') lambda_client = aws_stack.connect_to_service('lambda') # create lambda permission action = 'lambda:InvokeFunction' sid = 's3' principal = 's3.amazonaws.com' resp = lambda_client.add_permission(FunctionName=TEST_LAMBDA_NAME_PY, Action=action, StatementId=sid, Principal=principal, SourceArn=aws_stack.s3_bucket_arn('test-bucket')) self.assertIn('Statement', resp) # fetch lambda policy policy = lambda_client.get_policy(FunctionName=TEST_LAMBDA_NAME_PY)['Policy'] self.assertIsInstance(policy, six.string_types) policy = json.loads(to_str(policy)) self.assertEqual(policy['Statement'][0]['Action'], action) self.assertEqual(policy['Statement'][0]['Sid'], sid) self.assertEqual(policy['Statement'][0]['Resource'], lambda_api.func_arn(TEST_LAMBDA_NAME_PY)) self.assertEqual(policy['Statement'][0]['Principal']['Service'], principal) self.assertEqual(policy['Statement'][0]['Condition']['ArnLike']['AWS:SourceArn'], aws_stack.s3_bucket_arn('test-bucket')) # fetch IAM policy policies = iam_client.list_policies(Scope='Local', MaxItems=500)['Policies'] matching = [p for p in policies if p['PolicyName'] == 'lambda_policy_%s_%s' % (TEST_LAMBDA_NAME_PY, sid)] self.assertEqual(len(matching), 1) self.assertIn(':policy/', matching[0]['Arn']) # remove permission that we just added resp = lambda_client.remove_permission(FunctionName=TEST_LAMBDA_NAME_PY, StatementId=sid, Qualifier='qual1', RevisionId='r1') self.assertEqual(resp['ResponseMetadata']['HTTPStatusCode'], 200) def test_lambda_asynchronous_invocations(self): function_name = 'lambda_func-{}'.format(short_uid()) testutil.create_lambda_function( handler_file=TEST_LAMBDA_ECHO_FILE, func_name=function_name, runtime=LAMBDA_RUNTIME_PYTHON36 ) lambda_client = aws_stack.connect_to_service('lambda') # adding event invoke config response = lambda_client.put_function_event_invoke_config( FunctionName=function_name, MaximumRetryAttempts=123, MaximumEventAgeInSeconds=123, DestinationConfig={ 'OnSuccess': { 'Destination': function_name }, 'OnFailure': { 'Destination': function_name } } ) destination_config = { 'OnSuccess': { 'Destination': function_name }, 'OnFailure': { 'Destination': function_name } } # checking for parameter configuration self.assertEqual(response['MaximumRetryAttempts'], 123) self.assertEqual(response['MaximumEventAgeInSeconds'], 123) self.assertEqual(response['DestinationConfig'], destination_config) # over writing event invoke config response = lambda_client.put_function_event_invoke_config( FunctionName=function_name, MaximumRetryAttempts=123, DestinationConfig={ 'OnSuccess': { 'Destination': function_name }, 'OnFailure': { 'Destination': function_name } } ) # checking if 'MaximumEventAgeInSeconds' is removed self.assertNotIn('MaximumEventAgeInSeconds', response) # updating event invoke config response = lambda_client.update_function_event_invoke_config( FunctionName=function_name, MaximumRetryAttempts=111, ) # checking for updated and existing configuration self.assertEqual(response['MaximumRetryAttempts'], 111) self.assertEqual(response['DestinationConfig'], destination_config) # clean up response = lambda_client.delete_function_event_invoke_config( FunctionName=function_name) lambda_client.delete_function(FunctionName=function_name) def test_event_source_mapping_default_batch_size(self): function_name = 'lambda_func-{}'.format(short_uid()) queue_name_1 = 'queue-{}-1'.format(short_uid()) queue_name_2 = 'queue-{}-2'.format(short_uid()) ddb_table = 'ddb_table-{}'.format(short_uid()) testutil.create_lambda_function( handler_file=TEST_LAMBDA_ECHO_FILE, func_name=function_name, runtime=LAMBDA_RUNTIME_PYTHON36 ) lambda_client = aws_stack.connect_to_service('lambda') sqs_client = aws_stack.connect_to_service('sqs') queue_url_1 = sqs_client.create_queue(QueueName=queue_name_1)['QueueUrl'] queue_arn_1 = aws_stack.sqs_queue_arn(queue_name_1) rs = lambda_client.create_event_source_mapping( EventSourceArn=queue_arn_1, FunctionName=function_name ) self.assertEqual(rs['BatchSize'], BATCH_SIZE_RANGES['sqs'][0]) uuid = rs['UUID'] try: # Update batch size with invalid value lambda_client.update_event_source_mapping( UUID=uuid, FunctionName=function_name, BatchSize=BATCH_SIZE_RANGES['sqs'][1] + 1 ) self.fail('This call should not be successful as the batch size > MAX_BATCH_SIZE') except ClientError as e: self.assertEqual(e.response['Error']['Code'], INVALID_PARAMETER_VALUE_EXCEPTION) queue_url_2 = sqs_client.create_queue(QueueName=queue_name_2)['QueueUrl'] queue_arn_2 = aws_stack.sqs_queue_arn(queue_name_2) try: # Create event source mapping with invalid batch size value lambda_client.create_event_source_mapping( EventSourceArn=queue_arn_2, FunctionName=function_name, BatchSize=BATCH_SIZE_RANGES['sqs'][1] + 1 ) self.fail('This call should not be successful as the batch size > MAX_BATCH_SIZE') except ClientError as e: self.assertEqual(e.response['Error']['Code'], INVALID_PARAMETER_VALUE_EXCEPTION) table_arn = aws_stack.create_dynamodb_table(ddb_table, partition_key='id')['TableDescription']['TableArn'] rs = lambda_client.create_event_source_mapping( EventSourceArn=table_arn, FunctionName=function_name ) self.assertEqual(rs['BatchSize'], BATCH_SIZE_RANGES['dynamodb'][0]) # clean up dynamodb_client = aws_stack.connect_to_service('dynamodb') dynamodb_client.delete_table(TableName=ddb_table) sqs_client.delete_queue(QueueUrl=queue_url_1) sqs_client.delete_queue(QueueUrl=queue_url_2) lambda_client.delete_function(FunctionName=function_name) def test_disabled_event_source_mapping_with_dynamodb(self): function_name = 'lambda_func-{}'.format(short_uid()) ddb_table = 'ddb_table-{}'.format(short_uid()) testutil.create_lambda_function( handler_file=TEST_LAMBDA_ECHO_FILE, func_name=function_name, runtime=LAMBDA_RUNTIME_PYTHON36 ) table_arn = aws_stack.create_dynamodb_table(ddb_table, partition_key='id')['TableDescription']['TableArn'] lambda_client = aws_stack.connect_to_service('lambda') rs = lambda_client.create_event_source_mapping( FunctionName=function_name, EventSourceArn=table_arn ) uuid = rs['UUID'] dynamodb = aws_stack.connect_to_resource('dynamodb') table = dynamodb.Table(ddb_table) items = [ {'id': short_uid(), 'data': 'data1'}, {'id': short_uid(), 'data': 'data2'} ] table.put_item(Item=items[0]) events = get_lambda_log_events(function_name) # lambda was invoked 1 time self.assertEqual(len(events[0]['Records']), 1) # disable event source mapping lambda_client.update_event_source_mapping( UUID=uuid, Enabled=False ) table.put_item(Item=items[1]) events = get_lambda_log_events(function_name) # lambda no longer invoked, still have 1 event self.assertEqual(len(events[0]['Records']), 1) # clean up dynamodb_client = aws_stack.connect_to_service('dynamodb') dynamodb_client.delete_table(TableName=ddb_table) lambda_client.delete_function(FunctionName=function_name) def test_deletion_event_source_mapping_with_dynamodb(self): function_name = 'lambda_func-{}'.format(short_uid()) ddb_table = 'ddb_table-{}'.format(short_uid()) testutil.create_lambda_function( handler_file=TEST_LAMBDA_ECHO_FILE, func_name=function_name, runtime=LAMBDA_RUNTIME_PYTHON36 ) table_arn = aws_stack.create_dynamodb_table(ddb_table, partition_key='id')['TableDescription']['TableArn'] lambda_client = aws_stack.connect_to_service('lambda') lambda_client.create_event_source_mapping( FunctionName=function_name, EventSourceArn=table_arn ) dynamodb_client = aws_stack.connect_to_service('dynamodb') dynamodb_client.delete_table(TableName=ddb_table) result = lambda_client.list_event_source_mappings(EventSourceArn=table_arn) self.assertEqual(len(result['EventSourceMappings']), 0) # clean up lambda_client.delete_function(FunctionName=function_name) def test_event_source_mapping_with_sqs(self): lambda_client = aws_stack.connect_to_service('lambda') sqs_client = aws_stack.connect_to_service('sqs') function_name = 'lambda_func-{}'.format(short_uid()) queue_name_1 = 'queue-{}-1'.format(short_uid()) testutil.create_lambda_function( handler_file=TEST_LAMBDA_ECHO_FILE, func_name=function_name, runtime=LAMBDA_RUNTIME_PYTHON36 ) queue_url_1 = sqs_client.create_queue(QueueName=queue_name_1)['QueueUrl'] queue_arn_1 = aws_stack.sqs_queue_arn(queue_name_1) lambda_client.create_event_source_mapping( EventSourceArn=queue_arn_1, FunctionName=function_name ) sqs_client.send_message(QueueUrl=queue_url_1, MessageBody=json.dumps({'foo': 'bar'})) events = retry(get_lambda_log_events, sleep_before=3, function_name=function_name) # lambda was invoked 1 time self.assertEqual(len(events[0]['Records']), 1) rs = sqs_client.receive_message(QueueUrl=queue_url_1) self.assertEqual(rs.get('Messages'), None) # clean up sqs_client.delete_queue(QueueUrl=queue_url_1) lambda_client.delete_function(FunctionName=function_name) def test_create_kinesis_event_source_mapping(self): function_name = 'lambda_func-{}'.format(short_uid()) stream_name = 'test-foobar' testutil.create_lambda_function( handler_file=TEST_LAMBDA_ECHO_FILE, func_name=function_name, runtime=LAMBDA_RUNTIME_PYTHON36 ) arn = aws_stack.kinesis_stream_arn(stream_name, account_id='000000000000') lambda_client = aws_stack.connect_to_service('lambda') lambda_client.create_event_source_mapping( EventSourceArn=arn, FunctionName=function_name ) stream_name = 'test-foobar' aws_stack.create_kinesis_stream(stream_name, delete=True) kinesis_connector.listen_to_kinesis( stream_name=stream_name, wait_until_started=True) kinesis = aws_stack.connect_to_service('kinesis') stream_summary = kinesis.describe_stream_summary(StreamName=stream_name) self.assertEqual(stream_summary['StreamDescriptionSummary']['OpenShardCount'], 1) num_events_kinesis = 10 kinesis.put_records(Records=[ { 'Data': '{}', 'PartitionKey': 'test_%s' % i } for i in range(0, num_events_kinesis) ], StreamName=stream_name) events = get_lambda_log_events(function_name) self.assertEqual(len(events[0]['Records']), 10) self.assertIn('eventID', events[0]['Records'][0]) self.assertIn('eventSourceARN', events[0]['Records'][0]) self.assertIn('eventSource', events[0]['Records'][0]) self.assertIn('eventVersion', events[0]['Records'][0]) self.assertIn('eventName', events[0]['Records'][0]) self.assertIn('invokeIdentityArn', events[0]['Records'][0]) self.assertIn('awsRegion', events[0]['Records'][0]) self.assertIn('kinesis', events[0]['Records'][0]) class TestPythonRuntimes(LambdaTestBase): @classmethod def setUpClass(cls): cls.lambda_client = aws_stack.connect_to_service('lambda') cls.s3_client = aws_stack.connect_to_service('s3') cls.sns_client = aws_stack.connect_to_service('sns') Util.create_function(TEST_LAMBDA_PYTHON, TEST_LAMBDA_NAME_PY, runtime=LAMBDA_RUNTIME_PYTHON27, libs=TEST_LAMBDA_LIBS) @classmethod def tearDownClass(cls): testutil.delete_lambda_function(TEST_LAMBDA_NAME_PY) def test_invocation_type_not_set(self): result = self.lambda_client.invoke( FunctionName=TEST_LAMBDA_NAME_PY, Payload=b'{}') result_data = json.loads(result['Payload'].read()) self.assertEqual(result['StatusCode'], 200) self.assertEqual(result_data['event'], {}) def test_invocation_type_request_response(self): result = self.lambda_client.invoke( FunctionName=TEST_LAMBDA_NAME_PY, Payload=b'{}', InvocationType='RequestResponse' ) result_data = result['Payload'].read() result_data = json.loads(to_str(result_data)) self.assertEqual(result['StatusCode'], 200) self.assertIsInstance(result_data, dict) def test_invocation_type_event(self): result = self.lambda_client.invoke( FunctionName=TEST_LAMBDA_NAME_PY, Payload=b'{}', InvocationType='Event') self.assertEqual(result['StatusCode'], 202) def test_invocation_type_dry_run(self): result = self.lambda_client.invoke( FunctionName=TEST_LAMBDA_NAME_PY, Payload=b'{}', InvocationType='DryRun') self.assertEqual(result['StatusCode'], 204) def test_lambda_environment(self): vars = {'Hello': 'World'} testutil.create_lambda_function(handler_file=TEST_LAMBDA_ENV, libs=TEST_LAMBDA_LIBS, func_name=TEST_LAMBDA_NAME_ENV, runtime=LAMBDA_RUNTIME_PYTHON27, envvars=vars) # invoke function and assert result contains env vars result = self.lambda_client.invoke( FunctionName=TEST_LAMBDA_NAME_ENV, Payload=b'{}') result_data = result['Payload'] self.assertEqual(result['StatusCode'], 200) self.assertDictEqual(json.load(result_data), vars) # get function config and assert result contains env vars result = self.lambda_client.get_function_configuration( FunctionName=TEST_LAMBDA_NAME_ENV) self.assertEqual(result['Environment'], {'Variables': vars}) # clean up testutil.delete_lambda_function(TEST_LAMBDA_NAME_ENV) def test_invocation_with_qualifier(self): lambda_name = 'test_lambda_%s' % short_uid() bucket_name = 'test-bucket-lambda2' bucket_key = 'test_lambda.zip' # upload zip file to S3 zip_file = testutil.create_lambda_archive( load_file(TEST_LAMBDA_PYTHON), get_content=True, libs=TEST_LAMBDA_LIBS, runtime=LAMBDA_RUNTIME_PYTHON27) self.s3_client.create_bucket(Bucket=bucket_name) self.s3_client.upload_fileobj( BytesIO(zip_file), bucket_name, bucket_key) # create lambda function response = self.lambda_client.create_function( FunctionName=lambda_name, Handler='handler.handler', Runtime=lambda_api.LAMBDA_RUNTIME_PYTHON27, Role='r1', Code={ 'S3Bucket': bucket_name, 'S3Key': bucket_key }, Publish=True ) self.assertIn('Version', response) # invoke lambda function data_before = b'{"foo": "bar with \'quotes\\""}' result = self.lambda_client.invoke( FunctionName=lambda_name, Payload=data_before, Qualifier=response['Version'] ) data_after = json.loads(result['Payload'].read()) self.assertEqual(json.loads(to_str(data_before)), data_after['event']) context = data_after['context'] self.assertEqual(response['Version'], context['function_version']) self.assertEqual(lambda_name, context['function_name']) # assert that logs are present expected = ['Lambda log message - print function'] if use_docker(): # Note that during regular test execution, nosetests captures the output from # the logging module - hence we can only expect this when running in Docker expected.append('.*Lambda log message - logging module') self.check_lambda_logs(lambda_name, expected_lines=expected) # clean up testutil.delete_lambda_function(lambda_name) def test_upload_lambda_from_s3(self): lambda_name = 'test_lambda_%s' % short_uid() bucket_name = 'test-bucket-lambda' bucket_key = 'test_lambda.zip' # upload zip file to S3 zip_file = testutil.create_lambda_archive( load_file(TEST_LAMBDA_PYTHON), get_content=True, libs=TEST_LAMBDA_LIBS, runtime=LAMBDA_RUNTIME_PYTHON27) self.s3_client.create_bucket(Bucket=bucket_name) self.s3_client.upload_fileobj( BytesIO(zip_file), bucket_name, bucket_key) # create lambda function self.lambda_client.create_function( FunctionName=lambda_name, Handler='handler.handler', Runtime=lambda_api.LAMBDA_RUNTIME_PYTHON27, Role='r1', Code={ 'S3Bucket': bucket_name, 'S3Key': bucket_key } ) # invoke lambda function data_before = b'{"foo": "bar with \'quotes\\""}' result = self.lambda_client.invoke( FunctionName=lambda_name, Payload=data_before) data_after = json.loads(result['Payload'].read()) self.assertEqual(json.loads(to_str(data_before)), data_after['event']) context = data_after['context'] self.assertEqual('$LATEST', context['function_version']) self.assertEqual(lambda_name, context['function_name']) # clean up testutil.delete_lambda_function(lambda_name) def test_python_lambda_running_in_docker(self): if not use_docker(): return testutil.create_lambda_function( handler_file=TEST_LAMBDA_PYTHON3, libs=TEST_LAMBDA_LIBS, func_name=TEST_LAMBDA_NAME_PY3, runtime=LAMBDA_RUNTIME_PYTHON36) result = self.lambda_client.invoke( FunctionName=TEST_LAMBDA_NAME_PY3, Payload=b'{}') result_data = result['Payload'].read() self.assertEqual(result['StatusCode'], 200) self.assertEqual(to_str(result_data).strip(), '{}') # clean up testutil.delete_lambda_function(TEST_LAMBDA_NAME_PY3) def test_handler_in_submodule(self): func_name = 'lambda-%s' % short_uid() zip_file = testutil.create_lambda_archive( load_file(TEST_LAMBDA_PYTHON), get_content=True, libs=TEST_LAMBDA_LIBS, runtime=LAMBDA_RUNTIME_PYTHON36, file_name='abc/def/main.py') testutil.create_lambda_function(func_name=func_name, zip_file=zip_file, handler='abc.def.main.handler', runtime=LAMBDA_RUNTIME_PYTHON36) # invoke function and assert result result = self.lambda_client.invoke(FunctionName=func_name, Payload=b'{}') result_data = json.loads(result['Payload'].read()) self.assertEqual(result['StatusCode'], 200) self.assertEqual(result_data['event'], json.loads('{}')) def test_python3_runtime_multiple_create_with_conflicting_module(self): original_do_use_docker = lambda_api.DO_USE_DOCKER try: # always use the local runner lambda_api.DO_USE_DOCKER = False python3_with_settings1 = load_file(TEST_LAMBDA_PYTHON3_MULTIPLE_CREATE1, mode='rb') python3_with_settings2 = load_file(TEST_LAMBDA_PYTHON3_MULTIPLE_CREATE2, mode='rb') lambda_name1 = 'test1-%s' % short_uid() testutil.create_lambda_function(func_name=lambda_name1, zip_file=python3_with_settings1, runtime=LAMBDA_RUNTIME_PYTHON36, handler='handler1.handler') lambda_name2 = 'test2-%s' % short_uid() testutil.create_lambda_function(func_name=lambda_name2, zip_file=python3_with_settings2, runtime=LAMBDA_RUNTIME_PYTHON36, handler='handler2.handler') result1 = self.lambda_client.invoke(FunctionName=lambda_name1, Payload=b'{}') result_data1 = result1['Payload'].read() result2 = self.lambda_client.invoke(FunctionName=lambda_name2, Payload=b'{}') result_data2 = result2['Payload'].read() self.assertEqual(result1['StatusCode'], 200) self.assertIn('setting1', to_str(result_data1)) self.assertEqual(result2['StatusCode'], 200) self.assertIn('setting2', to_str(result_data2)) # clean up testutil.delete_lambda_function(lambda_name1) testutil.delete_lambda_function(lambda_name2) finally: lambda_api.DO_USE_DOCKER = original_do_use_docker def test_lambda_subscribe_sns_topic(self): function_name = '{}-{}'.format(TEST_LAMBDA_FUNCTION_PREFIX, short_uid()) testutil.create_lambda_function(handler_file=TEST_LAMBDA_ECHO_FILE, func_name=function_name, runtime=LAMBDA_RUNTIME_PYTHON36) topic = self.sns_client.create_topic(Name=TEST_SNS_TOPIC_NAME) topic_arn = topic['TopicArn'] self.sns_client.subscribe( TopicArn=topic_arn, Protocol='lambda', Endpoint=lambda_api.func_arn(function_name), ) subject = '[Subject] Test subject' message = 'Hello world.' self.sns_client.publish( TopicArn=topic_arn, Subject=subject, Message=message ) events = retry(check_expected_lambda_log_events_length, retries=3, sleep=1, function_name=function_name, expected_length=1) notification = events[0]['Records'][0]['Sns'] self.assertIn('Subject', notification) self.assertEqual(notification['Subject'], subject) def test_lambda_send_message_to_sqs(self): function_name = '{}-{}'.format(TEST_LAMBDA_FUNCTION_PREFIX, short_uid()) queue_name = 'lambda-queue-{}'.format(short_uid()) sqs_client = aws_stack.connect_to_service('sqs') testutil.create_lambda_function(handler_file=TEST_LAMBDA_SEND_MESSAGE_FILE, func_name=function_name, runtime=LAMBDA_RUNTIME_PYTHON36) queue_url = sqs_client.create_queue(QueueName=queue_name)['QueueUrl'] event = { 'message': 'message-from-test-lambda-{}'.format(short_uid()), 'queue_name': queue_name, 'region_name': config.DEFAULT_REGION } self.lambda_client.invoke( FunctionName=function_name, Payload=json.dumps(event) ) # assert that message has been received on the Queue def receive_message(): rs = sqs_client.receive_message(QueueUrl=queue_url, MessageAttributeNames=['All']) self.assertGreater(len(rs['Messages']), 0) return rs['Messages'][0] message = retry(receive_message, retries=3, sleep=2) self.assertEqual(message['Body'], event['message']) # clean up testutil.delete_lambda_function(function_name) sqs_client.delete_queue(QueueUrl=queue_url) def test_lambda_put_item_to_dynamodb(self): table_name = 'ddb-table-{}'.format(short_uid()) function_name = '{}-{}'.format(TEST_LAMBDA_FUNCTION_PREFIX, short_uid()) aws_stack.create_dynamodb_table(table_name, partition_key='id') testutil.create_lambda_function(handler_file=TEST_LAMBDA_PUT_ITEM_FILE, func_name=function_name, runtime=LAMBDA_RUNTIME_PYTHON36) data = { short_uid(): 'data-{}'.format(i) for i in range(3) } event = { 'table_name': table_name, 'region_name': config.DEFAULT_REGION, 'items': [{'id': k, 'data': v} for k, v in data.items()] } self.lambda_client.invoke( FunctionName=function_name, Payload=json.dumps(event) ) dynamodb = aws_stack.connect_to_resource('dynamodb') rs = dynamodb.Table(table_name).scan() items = rs['Items'] self.assertEqual(len(items), len(data.keys())) for item in items: self.assertEqual(data[item['id']], item['data']) # clean up testutil.delete_lambda_function(function_name) dynamodb_client = aws_stack.connect_to_service('dynamodb') dynamodb_client.delete_table(TableName=table_name) def test_lambda_start_stepfunctions_execution(self): function_name = '{}-{}'.format(TEST_LAMBDA_FUNCTION_PREFIX, short_uid()) resource_lambda_name = '{}-{}'.format(TEST_LAMBDA_FUNCTION_PREFIX, short_uid()) state_machine_name = 'state-machine-{}'.format(short_uid()) testutil.create_lambda_function(handler_file=TEST_LAMBDA_START_EXECUTION_FILE, func_name=function_name, runtime=LAMBDA_RUNTIME_PYTHON36) testutil.create_lambda_function(handler_file=TEST_LAMBDA_ECHO_FILE, func_name=resource_lambda_name, runtime=LAMBDA_RUNTIME_PYTHON36) state_machine_def = { 'StartAt': 'step1', 'States': { 'step1': { 'Type': 'Task', 'Resource': aws_stack.lambda_function_arn(resource_lambda_name), 'ResultPath': '$.result_value', 'End': True } } } sfn_client = aws_stack.connect_to_service('stepfunctions') rs = sfn_client.create_state_machine( name=state_machine_name, definition=json.dumps(state_machine_def), roleArn=aws_stack.role_arn('sfn_role') ) sm_arn = rs['stateMachineArn'] self.lambda_client.invoke( FunctionName=function_name, Payload=json.dumps({ 'state_machine_arn': sm_arn, 'region_name': config.DEFAULT_REGION, 'input': {} }) ) time.sleep(1) rs = sfn_client.list_executions( stateMachineArn=sm_arn ) # assert that state machine get executed 1 time self.assertEqual( len([ex for ex in rs['executions'] if ex['stateMachineArn'] == sm_arn]), 1 ) # clean up testutil.delete_lambda_function(function_name) testutil.delete_lambda_function(resource_lambda_name) # clean up sfn_client.delete_state_machine(stateMachineArn=sm_arn) def create_multiple_lambda_permissions(self): iam_client = aws_stack.connect_to_service('iam') lambda_client = aws_stack.connect_to_service('lambda') role_name = 'role-{}'.format(short_uid()) assume_policy_document = { 'Version': '2012-10-17', 'Statement': [ { 'Action': 'sts:AssumeRole', 'Principal': {'Service': 'lambda.amazonaws.com'} } ] } iam_client.create_role( RoleName=role_name, AssumeRolePolicyDocument=json.dumps(assume_policy_document) ) Util.create_function('testLambda', TEST_LAMBDA_NAME_PY, runtime=LAMBDA_RUNTIME_PYTHON37, libs=TEST_LAMBDA_LIBS) action = 'lambda:InvokeFunction' sid = 'logs' resp = lambda_client.add_permission(FunctionName='testLambda', Action=action, StatementId=sid, Principal='logs.amazonaws.com') self.assertIn('Statement', resp) sid = 'kinesis' resp = lambda_client.add_permission(FunctionName='testLambda', Action=action, StatementId=sid, Principal='kinesis.amazonaws.com') self.assertIn('Statement', resp) # delete lambda testutil.delete_lambda_function(TEST_LAMBDA_PYTHON) class TestNodeJSRuntimes(LambdaTestBase): @classmethod def setUpClass(cls): cls.lambda_client = aws_stack.connect_to_service('lambda') def test_nodejs_lambda_running_in_docker(self): if not use_docker(): return testutil.create_lambda_function( func_name=TEST_LAMBDA_NAME_JS, handler_file=TEST_LAMBDA_NODEJS, handler='lambda_integration.handler', runtime=LAMBDA_RUNTIME_NODEJS810 ) ctx = {'custom': {'foo': 'bar'}, 'client': {'snap': ['crackle', 'pop']}, 'env': {'fizz': 'buzz'}} result = self.lambda_client.invoke( FunctionName=TEST_LAMBDA_NAME_JS, Payload=b'{}', ClientContext=to_str(base64.b64encode(to_bytes(json.dumps(ctx))))) result_data = result['Payload'].read() self.assertEqual(result['StatusCode'], 200) self.assertEqual(json.loads(json.loads(result_data)['context']['clientContext']). get('custom').get('foo'), 'bar') # assert that logs are present expected = ['.*Node.js Lambda handler executing.'] self.check_lambda_logs(TEST_LAMBDA_NAME_JS, expected_lines=expected) # clean up testutil.delete_lambda_function(TEST_LAMBDA_NAME_JS) def test_invoke_nodejs_lambda(self): if not use_docker(): return handler_file = os.path.join(THIS_FOLDER, 'lambdas', 'lambda_handler.js') testutil.create_lambda_function( func_name=TEST_LAMBDA_NAME_JS, zip_file=testutil.create_zip_file(handler_file, get_content=True), runtime=LAMBDA_RUNTIME_NODEJS12X, handler='lambda_handler.handler' ) rs = self.lambda_client.invoke( FunctionName=TEST_LAMBDA_NAME_JS, Payload=json.dumps({ 'event_type': 'test_lambda' }) ) self.assertEqual(rs['ResponseMetadata']['HTTPStatusCode'], 200) events = get_lambda_log_events(TEST_LAMBDA_NAME_JS) self.assertGreater(len(events), 0) # clean up testutil.delete_lambda_function(TEST_LAMBDA_NAME_JS) class TestCustomRuntimes(LambdaTestBase): @classmethod def setUpClass(cls): cls.lambda_client = aws_stack.connect_to_service('lambda') def test_nodejs_lambda_running_in_docker(self): if not use_docker(): return testutil.create_lambda_function( func_name=TEST_LAMBDA_NAME_CUSTOM_RUNTIME, handler_file=TEST_LAMBDA_CUSTOM_RUNTIME, handler='function.handler', runtime=LAMBDA_RUNTIME_PROVIDED ) result = self.lambda_client.invoke( FunctionName=TEST_LAMBDA_NAME_CUSTOM_RUNTIME, Payload=b'{"text":"bar with \'quotes\\""}') result_data = result['Payload'].read() self.assertEqual(result['StatusCode'], 200) self.assertEqual( to_str(result_data).strip(), """Echoing request: '{"text": "bar with \'quotes\\""}'""") # assert that logs are present expected = ['.*Custom Runtime Lambda handler executing.'] self.check_lambda_logs( TEST_LAMBDA_NAME_CUSTOM_RUNTIME, expected_lines=expected) # clean up testutil.delete_lambda_function(TEST_LAMBDA_NAME_CUSTOM_RUNTIME) class TestDotNetCoreRuntimes(LambdaTestBase): @classmethod def setUpClass(cls): cls.lambda_client = aws_stack.connect_to_service('lambda') cls.zip_file_content2 = load_file(TEST_LAMBDA_DOTNETCORE2, mode='rb') cls.zip_file_content31 = load_file(TEST_LAMBDA_DOTNETCORE31, mode='rb') def __run_test(self, func_name, zip_file, handler, runtime, expected_lines): if not use_docker(): return testutil.create_lambda_function( func_name=func_name, zip_file=zip_file, handler=handler, runtime=runtime) result = self.lambda_client.invoke(FunctionName=func_name, Payload=b'{}') result_data = result['Payload'].read() self.assertEqual(result['StatusCode'], 200) self.assertEqual(to_str(result_data).strip(), '{}') # TODO make lambda log checks more resilient to various formats # self.check_lambda_logs(func_name, expected_lines=expected_lines) testutil.delete_lambda_function(func_name) def test_dotnetcore2_lambda_running_in_docker(self): self.__run_test( func_name=TEST_LAMBDA_NAME_DOTNETCORE2, zip_file=self.zip_file_content2, handler='DotNetCore2::DotNetCore2.Lambda.Function::SimpleFunctionHandler', runtime=LAMBDA_RUNTIME_DOTNETCORE2, expected_lines=['Running .NET Core 2.0 Lambda']) def test_dotnetcore31_lambda_running_in_docker(self): self.__run_test( func_name=TEST_LAMBDA_NAME_DOTNETCORE31, zip_file=self.zip_file_content31, handler='dotnetcore31::dotnetcore31.Function::FunctionHandler', runtime=LAMBDA_RUNTIME_DOTNETCORE31, expected_lines=['Running .NET Core 3.1 Lambda']) class TestRubyRuntimes(LambdaTestBase): @classmethod def setUpClass(cls): cls.lambda_client = aws_stack.connect_to_service('lambda') def test_ruby_lambda_running_in_docker(self): if not use_docker(): return testutil.create_lambda_function( func_name=TEST_LAMBDA_NAME_RUBY, handler_file=TEST_LAMBDA_RUBY, handler='lambda_integration.handler', runtime=LAMBDA_RUNTIME_RUBY25 ) result = self.lambda_client.invoke( FunctionName=TEST_LAMBDA_NAME_RUBY, Payload=b'{}') result_data = result['Payload'].read() self.assertEqual(result['StatusCode'], 200) self.assertEqual(to_str(result_data).strip(), '{}') # clean up testutil.delete_lambda_function(TEST_LAMBDA_NAME_RUBY) class TestJavaRuntimes(LambdaTestBase): @classmethod def setUpClass(cls): cls.lambda_client = aws_stack.connect_to_service('lambda') # deploy lambda - Java if not os.path.exists(TEST_LAMBDA_JAVA): mkdir(os.path.dirname(TEST_LAMBDA_JAVA)) download(TEST_LAMBDA_JAR_URL, TEST_LAMBDA_JAVA) # deploy Lambda - default handler cls.test_java_jar = load_file(TEST_LAMBDA_JAVA, mode='rb') zip_dir = new_tmp_dir() zip_lib_dir = os.path.join(zip_dir, 'lib') zip_jar_path = os.path.join(zip_lib_dir, 'test.lambda.jar') mkdir(zip_lib_dir) cp_r(INSTALL_PATH_LOCALSTACK_FAT_JAR, os.path.join(zip_lib_dir, 'executor.lambda.jar')) save_file(zip_jar_path, cls.test_java_jar) cls.test_java_zip = testutil.create_zip_file(zip_dir, get_content=True) testutil.create_lambda_function( func_name=TEST_LAMBDA_NAME_JAVA, zip_file=cls.test_java_zip, runtime=LAMBDA_RUNTIME_JAVA8, handler='cloud.localstack.sample.LambdaHandler' ) # Deploy lambda - Java with stream handler. # Lambda supports single JAR deployments without the zip, so we upload the JAR directly. testutil.create_lambda_function( func_name=TEST_LAMBDA_NAME_JAVA_STREAM, zip_file=cls.test_java_jar, runtime=LAMBDA_RUNTIME_JAVA8, handler='cloud.localstack.sample.LambdaStreamHandler' ) # deploy lambda - Java with serializable input object testutil.create_lambda_function( func_name=TEST_LAMBDA_NAME_JAVA_SERIALIZABLE, zip_file=cls.test_java_zip, runtime=LAMBDA_RUNTIME_JAVA8, handler='cloud.localstack.sample.SerializedInputLambdaHandler' ) # deploy lambda - Java with Kinesis input object testutil.create_lambda_function( func_name=TEST_LAMBDA_NAME_JAVA_KINESIS, zip_file=cls.test_java_zip, runtime=LAMBDA_RUNTIME_JAVA8, handler='cloud.localstack.sample.KinesisLambdaHandler' ) @classmethod def tearDownClass(cls): # clean up testutil.delete_lambda_function(TEST_LAMBDA_NAME_JAVA) testutil.delete_lambda_function(TEST_LAMBDA_NAME_JAVA_STREAM) testutil.delete_lambda_function(TEST_LAMBDA_NAME_JAVA_SERIALIZABLE) testutil.delete_lambda_function(TEST_LAMBDA_NAME_JAVA_KINESIS) def test_java_runtime(self): self.assertIsNotNone(self.test_java_jar) result = self.lambda_client.invoke(FunctionName=TEST_LAMBDA_NAME_JAVA, Payload=b'{}') result_data = result['Payload'].read() self.assertEqual(result['StatusCode'], 200) # TODO: find out why the assertion below does not work in Travis-CI! (seems to work locally) # self.assertIn('LinkedHashMap', to_str(result_data)) self.assertIsNotNone(result_data) def test_java_runtime_with_lib(self): java_jar_with_lib = load_file(TEST_LAMBDA_JAVA_WITH_LIB, mode='rb') # create ZIP file from JAR file jar_dir = new_tmp_dir() zip_dir = new_tmp_dir() unzip(TEST_LAMBDA_JAVA_WITH_LIB, jar_dir) zip_lib_dir = os.path.join(zip_dir, 'lib') shutil.move(os.path.join(jar_dir, 'lib'), zip_lib_dir) jar_without_libs_file = testutil.create_zip_file(jar_dir) shutil.copy(jar_without_libs_file, os.path.join(zip_lib_dir, 'lambda.jar')) java_zip_with_lib = testutil.create_zip_file(zip_dir, get_content=True) for archive in [java_jar_with_lib, java_zip_with_lib]: lambda_name = 'test-%s' % short_uid() testutil.create_lambda_function(func_name=lambda_name, zip_file=archive, runtime=LAMBDA_RUNTIME_JAVA8, handler='cloud.localstack.sample.LambdaHandlerWithLib') result = self.lambda_client.invoke(FunctionName=lambda_name, Payload=b'{"echo":"echo"}') result_data = result['Payload'].read() self.assertEqual(result['StatusCode'], 200) self.assertIn('echo', to_str(result_data)) # clean up testutil.delete_lambda_function(lambda_name) def test_sns_event(self): result = self.lambda_client.invoke( FunctionName=TEST_LAMBDA_NAME_JAVA, InvocationType='Event', Payload=b'{"Records": [{"Sns": {"Message": "{}"}}]}') self.assertEqual(result['StatusCode'], 202) def test_ddb_event(self): result = self.lambda_client.invoke( FunctionName=TEST_LAMBDA_NAME_JAVA, InvocationType='Event', Payload=b'{"Records": [{"dynamodb": {"Message": "{}"}}]}') self.assertEqual(result['StatusCode'], 202) def test_kinesis_invocation(self): payload = b'{"Records": [{"kinesis": {"data": "dGVzdA==", "partitionKey": "partition"}}]}' result = self.lambda_client.invoke(FunctionName=TEST_LAMBDA_NAME_JAVA_KINESIS, Payload=payload) result_data = result['Payload'].read() self.assertEqual(result['StatusCode'], 200) self.assertEqual(to_str(result_data).strip(), '"test "') def test_kinesis_event(self): result = self.lambda_client.invoke( FunctionName=TEST_LAMBDA_NAME_JAVA, InvocationType='Event', Payload=b'{"Records": [{"Kinesis": {"Data": "data", "PartitionKey": "partition"}}]}') result_data = result['Payload'].read() self.assertEqual(result['StatusCode'], 202) self.assertEqual(to_str(result_data).strip(), '') def test_stream_handler(self): result = self.lambda_client.invoke( FunctionName=TEST_LAMBDA_NAME_JAVA_STREAM, Payload=b'{}') result_data = result['Payload'].read() self.assertEqual(result['StatusCode'], 200) self.assertEqual(to_str(result_data).strip(), '{}') def test_serializable_input_object(self): result = self.lambda_client.invoke( FunctionName=TEST_LAMBDA_NAME_JAVA_SERIALIZABLE, Payload=b'{"bucket": "test_bucket", "key": "test_key"}') result_data = result['Payload'].read() self.assertEqual(result['StatusCode'], 200) self.assertDictEqual( json.loads(to_str(result_data)), {'validated': True, 'bucket': 'test_bucket', 'key': 'test_key'} ) def test_trigger_java_lambda_through_sns(self): topic_name = 'topic-%s' % short_uid() bucket_name = 'bucket-%s' % short_uid() key = 'key-%s' % short_uid() function_name = TEST_LAMBDA_NAME_JAVA sns_client = aws_stack.connect_to_service('sns') topic_arn = sns_client.create_topic(Name=topic_name)['TopicArn'] s3_client = aws_stack.connect_to_service('s3') s3_client.create_bucket(Bucket=bucket_name) s3_client.put_bucket_notification_configuration( Bucket=bucket_name, NotificationConfiguration={ 'TopicConfigurations': [ { 'TopicArn': topic_arn, 'Events': ['s3:ObjectCreated:*'] } ] } ) sns_client.subscribe( TopicArn=topic_arn, Protocol='lambda', Endpoint=aws_stack.lambda_function_arn(function_name) ) events_before = run_safe(get_lambda_log_events, function_name) or [] s3_client.put_object(Bucket=bucket_name, Key=key, Body='something') time.sleep(2) # We got an event that confirm lambda invoked retry(function=check_expected_lambda_log_events_length, retries=3, sleep=1, expected_length=len(events_before) + 1, function_name=function_name) # clean up sns_client.delete_topic(TopicArn=topic_arn) s3_client.delete_objects(Bucket=bucket_name, Delete={'Objects': [{'Key': key}]}) s3_client.delete_bucket(Bucket=bucket_name) class TestDockerBehaviour(LambdaTestBase): @classmethod def setUpClass(cls): cls.lambda_client = aws_stack.connect_to_service('lambda') def test_prime_and_destroy_containers(self): # run these tests only for the "reuse containers" Lambda executor if not isinstance(lambda_api.LAMBDA_EXECUTOR, lambda_executors.LambdaExecutorReuseContainers): return executor = lambda_api.LAMBDA_EXECUTOR func_name = 'test_prime_and_destroy_containers' func_arn = lambda_api.func_arn(func_name) # make sure existing containers are gone executor.cleanup() self.assertEqual(len(executor.get_all_container_names()), 0) # deploy and invoke lambda without Docker testutil.create_lambda_function( func_name=func_name, handler_file=TEST_LAMBDA_ENV, libs=TEST_LAMBDA_LIBS, runtime=LAMBDA_RUNTIME_PYTHON27, envvars={'Hello': 'World'}) self.assertEqual(len(executor.get_all_container_names()), 0) self.assertDictEqual(executor.function_invoke_times, {}) # invoke a few times. durations = [] num_iterations = 3 for i in range(0, num_iterations + 1): prev_invoke_time = None if i > 0: prev_invoke_time = executor.function_invoke_times[func_arn] start_time = time.time() self.lambda_client.invoke(FunctionName=func_name, Payload=b'{}') duration = time.time() - start_time self.assertEqual(len(executor.get_all_container_names()), 1) # ensure the last invoke time is being updated properly. if i > 0: self.assertGreater(executor.function_invoke_times[func_arn], prev_invoke_time) else: self.assertGreater(executor.function_invoke_times[func_arn], 0) durations.append(duration) # the first call would have created the container. subsequent calls would reuse and be faster. for i in range(1, num_iterations + 1): self.assertLess(durations[i], durations[0]) status = executor.get_docker_container_status(func_arn) self.assertEqual(status, 1) container_network = executor.get_docker_container_network(func_arn) self.assertEqual(container_network, 'default') executor.cleanup() status = executor.get_docker_container_status(func_arn) self.assertEqual(status, 0) self.assertEqual(len(executor.get_all_container_names()), 0) # clean up testutil.delete_lambda_function(func_name) def test_docker_command_for_separate_container_lambda_executor(self): # run these tests only for the "separate containers" Lambda executor if not isinstance(lambda_api.LAMBDA_EXECUTOR, lambda_executors.LambdaExecutorSeparateContainers): return executor = lambda_api.LAMBDA_EXECUTOR func_name = 'test_docker_command_for_separate_container_lambda_executor' func_arn = lambda_api.func_arn(func_name) handler = 'handler' lambda_cwd = '/app/lambda' network = 'compose_network' dns = 'some-ip-address' config.LAMBDA_DOCKER_NETWORK = network config.LAMBDA_DOCKER_DNS = dns try: cmd = executor.prepare_execution( func_arn, {}, LAMBDA_RUNTIME_NODEJS810, '', handler, lambda_cwd) expected = 'docker run -v "%s":/var/task --network="%s" --dns="%s" --rm "lambci/lambda:%s" "%s"' % ( lambda_cwd, network, dns, LAMBDA_RUNTIME_NODEJS810, handler) self.assertIn(('--network="%s"' % network), cmd, 'cmd=%s expected=%s' % (cmd, expected)) self.assertIn(('--dns="%s"' % dns), cmd, 'cmd=%s expected=%s' % (cmd, expected)) finally: config.LAMBDA_DOCKER_NETWORK = '' config.LAMBDA_DOCKER_DNS = '' def test_destroy_idle_containers(self): # run these tests only for the "reuse containers" Lambda executor if not isinstance(lambda_api.LAMBDA_EXECUTOR, lambda_executors.LambdaExecutorReuseContainers): return executor = lambda_api.LAMBDA_EXECUTOR func_name = 'test_destroy_idle_containers' func_arn = lambda_api.func_arn(func_name) # make sure existing containers are gone executor.destroy_existing_docker_containers() self.assertEqual(len(executor.get_all_container_names()), 0) # deploy and invoke lambda without Docker testutil.create_lambda_function( func_name=func_name, handler_file=TEST_LAMBDA_ENV, libs=TEST_LAMBDA_LIBS, runtime=LAMBDA_RUNTIME_PYTHON27, envvars={'Hello': 'World'}) self.assertEqual(len(executor.get_all_container_names()), 0) self.lambda_client.invoke(FunctionName=func_name, Payload=b'{}') self.assertEqual(len(executor.get_all_container_names()), 1) # try to destroy idle containers. executor.idle_container_destroyer() self.assertEqual(len(executor.get_all_container_names()), 1) # simulate an idle container executor.function_invoke_times[func_arn] = time.time() - lambda_executors.MAX_CONTAINER_IDLE_TIME_MS executor.idle_container_destroyer() self.assertEqual(len(executor.get_all_container_names()), 0) # clean up testutil.delete_lambda_function(func_name) class Util(object): @classmethod def create_function(cls, file, name, runtime=None, libs=None): runtime = runtime or LAMBDA_RUNTIME_PYTHON27 testutil.create_lambda_function( func_name=name, handler_file=file, libs=TEST_LAMBDA_LIBS, runtime=runtime)
1
11,644
Can we add an assertion here the policy actually contains both statements created above?
localstack-localstack
py
@@ -59,7 +59,13 @@ DEFINE_bool(enable_partitioned_index_filter, false, "True for partitioned index DEFINE_string(rocksdb_compression, "snappy", "Compression algorithm used by RocksDB, " - "options: no,snappy,lz4,lz4hc,zstd,zlib,bzip2"); + "options: no,snappy,lz4,lz4hc,zstd,zlib,bzip2,xpress"); + +DEFINE_string(rocksdb_bottommost_compression, + "disable", + "Specify the bottommost level compression algorithm" + "options: no,snappy,lz4,lz4hc,zstd,zlib,bzip2,xpress,disable"); + DEFINE_string(rocksdb_compression_per_level, "", "Specify per level compression algorithm, "
1
/* Copyright (c) 2018 vesoft inc. All rights reserved. * * This source code is licensed under Apache 2.0 License. */ #include "kvstore/RocksEngineConfig.h" #include <rocksdb/cache.h> #include <rocksdb/concurrent_task_limiter.h> #include <rocksdb/convenience.h> #include <rocksdb/db.h> #include <rocksdb/filter_policy.h> #include <rocksdb/rate_limiter.h> #include <rocksdb/slice_transform.h> #include <rocksdb/utilities/options_util.h> #include "common/base/Base.h" #include "common/conf/Configuration.h" #include "common/fs/FileUtils.h" #include "common/utils/NebulaKeyUtils.h" #include "kvstore/EventListener.h" // [WAL] DEFINE_bool(rocksdb_disable_wal, false, "Whether to disable the WAL in rocksdb"); DEFINE_bool(rocksdb_wal_sync, false, "Whether WAL writes are synchronized to disk or not"); // [DBOptions] DEFINE_string(rocksdb_db_options, "{}", "json string of DBOptions, all keys and values are string"); // [CFOptions "default"] DEFINE_string(rocksdb_column_family_options, "{}", "json string of ColumnFamilyOptions, all keys and values are string"); // [TableOptions/BlockBasedTable "default"] DEFINE_string(rocksdb_block_based_table_options, "{}", "json string of BlockBasedTableOptions, all keys and values are string"); DEFINE_int32(rocksdb_batch_size, 4 * 1024, "default reserved bytes for one batch operation"); /* * For these un-supported string options as below, will need to specify them * with gflag. */ // BlockBasedTable block_cache DEFINE_int64(rocksdb_block_cache, 1024, "The default block cache size used in BlockBasedTable. The unit is MB"); DEFINE_int32(rocksdb_row_cache_num, 16 * 1000 * 1000, "Total keys inside the cache"); DEFINE_int32(cache_bucket_exp, 8, "Total buckets number is 1 << cache_bucket_exp"); DEFINE_bool(enable_partitioned_index_filter, false, "True for partitioned index filters"); DEFINE_string(rocksdb_compression, "snappy", "Compression algorithm used by RocksDB, " "options: no,snappy,lz4,lz4hc,zstd,zlib,bzip2"); DEFINE_string(rocksdb_compression_per_level, "", "Specify per level compression algorithm, " "delimited by `:', ignored fields will be " "replaced by FLAGS_rocksdb_compression. " "e.g. \"no:no:lz4:lz4::zstd\" === " "\"no:no:lz4:lz4:lz4:snappy:zstd:snappy\""); DEFINE_bool(enable_rocksdb_statistics, false, "Whether or not to enable rocksdb's statistics"); DEFINE_string(rocksdb_stats_level, "kExceptHistogramOrTimers", "rocksdb statistics level"); DEFINE_int32(num_compaction_threads, 0, "Number of total compaction threads. 0 means unlimited."); DEFINE_int32(rocksdb_rate_limit, 0, "write limit in bytes per sec. The unit is MB. 0 means unlimited."); DEFINE_bool(enable_rocksdb_whole_key_filtering, false, "Whether or not to enable rocksdb's whole key bloom filter"); DEFINE_bool(enable_rocksdb_prefix_filtering, true, "Whether or not to enable rocksdb's prefix bloom filter."); DEFINE_bool(rocksdb_compact_change_level, true, "If true, compacted files will be moved to the minimum level capable " "of holding the data or given level (specified non-negative " "target_level)."); DEFINE_int32(rocksdb_compact_target_level, -1, "If change_level is true and target_level have non-negative " "value, compacted files " "will be moved to target_level. If change_level is true and " "target_level is -1, " "compacted files will be moved to the minimum level capable of " "holding the data."); DEFINE_string(rocksdb_table_format, "BlockBasedTable", "SST file format of rocksdb, only support BlockBasedTable and PlainTable"); DEFINE_string(rocksdb_wal_dir, "", "Rocksdb wal directory"); DEFINE_string(rocksdb_backup_dir, "", "Rocksdb backup directory, only used in PlainTable format"); DEFINE_int32(rocksdb_backup_interval_secs, 300, "Rocksdb backup directory, only used in PlainTable format"); DEFINE_bool(rocksdb_enable_kv_separation, false, "Whether or not to enable BlobDB (RocksDB key-value separation support)"); DEFINE_uint64(rocksdb_kv_separation_threshold, 0, "RocksDB key value separation threshold. Values at or above this threshold will be " "written to blob files during flush or compaction." "This value is only effective when enable_kv_separation is true."); DEFINE_string(rocksdb_blob_compression, "snappy", "Compression algorithm for blobs, " "options: no,snappy,lz4,lz4hc,zstd,zlib,bzip2"); DEFINE_bool(rocksdb_enable_blob_garbage_collection, true, "Set this to true to make BlobDB actively relocate valid blobs " "from the oldest blob files as they are encountered during compaction"); namespace nebula { namespace kvstore { static const std::unordered_map<std::string, rocksdb::CompressionType> kCompressionTypeMap = { {"no", rocksdb::kNoCompression}, {"snappy", rocksdb::kSnappyCompression}, {"lz4", rocksdb::kLZ4Compression}, {"lz4hc", rocksdb::kLZ4HCCompression}, {"zstd", rocksdb::kZSTD}, {"zlib", rocksdb::kZlibCompression}, {"bzip2", rocksdb::kBZip2Compression}}; static rocksdb::Status initRocksdbCompression(rocksdb::Options& baseOpts) { // Set the general compression algorithm { auto it = kCompressionTypeMap.find(FLAGS_rocksdb_compression); if (it == kCompressionTypeMap.end()) { LOG(ERROR) << "Unsupported compression type: " << FLAGS_rocksdb_compression; return rocksdb::Status::InvalidArgument(); } baseOpts.compression = it->second; } if (FLAGS_rocksdb_compression_per_level.empty()) { return rocksdb::Status::OK(); } // Set the per level compression algorithm, which will override the general // one. Given baseOpts.compression is lz4, "no:::::zstd" equals to // "no:lz4:lz4:lz4:lz4:zstd:lz4" std::vector<std::string> compressions; folly::split(":", FLAGS_rocksdb_compression_per_level, compressions, false); compressions.resize(baseOpts.num_levels); baseOpts.compression_per_level.resize(baseOpts.num_levels); for (auto i = 0u; i < compressions.size(); i++) { if (compressions[i].empty()) { compressions[i] = FLAGS_rocksdb_compression; } auto it = kCompressionTypeMap.find(compressions[i]); if (it == kCompressionTypeMap.end()) { LOG(ERROR) << "Unsupported compression type: " << compressions[i]; return rocksdb::Status::InvalidArgument(); } baseOpts.compression_per_level[i] = it->second; } LOG(INFO) << "compression per level: " << folly::join(":", compressions); return rocksdb::Status::OK(); } static rocksdb::Status initRocksdbKVSeparation(rocksdb::Options& baseOpts) { if (FLAGS_rocksdb_enable_kv_separation) { baseOpts.enable_blob_files = true; baseOpts.min_blob_size = FLAGS_rocksdb_kv_separation_threshold; // set blob compresstion algorithm auto it = kCompressionTypeMap.find(FLAGS_rocksdb_blob_compression); if (it == kCompressionTypeMap.end()) { LOG(ERROR) << "Unsupported compression type: " << FLAGS_rocksdb_blob_compression; return rocksdb::Status::InvalidArgument(); } baseOpts.blob_compression_type = it->second; // set blob gc baseOpts.enable_blob_garbage_collection = FLAGS_rocksdb_enable_blob_garbage_collection; } return rocksdb::Status::OK(); } rocksdb::Status initRocksdbOptions(rocksdb::Options& baseOpts, GraphSpaceID spaceId, int32_t vidLen) { rocksdb::Status s; rocksdb::DBOptions dbOpts; rocksdb::ColumnFamilyOptions cfOpts; rocksdb::BlockBasedTableOptions bbtOpts; // DBOptions std::unordered_map<std::string, std::string> dbOptsMap; if (!loadOptionsMap(dbOptsMap, FLAGS_rocksdb_db_options)) { return rocksdb::Status::InvalidArgument(); } s = GetDBOptionsFromMap(rocksdb::DBOptions(), dbOptsMap, &dbOpts, true); if (!s.ok()) { return s; } std::shared_ptr<rocksdb::Statistics> stats = getDBStatistics(); if (stats) { dbOpts.statistics = std::move(stats); dbOpts.stats_dump_period_sec = 0; // exposing statistics ourself } dbOpts.listeners.emplace_back(new EventListener()); // if rocksdb_wal_dir is set, specify it to rocksdb if (!FLAGS_rocksdb_wal_dir.empty()) { auto walDir = folly::stringPrintf("%s/rocksdb_wal/%d", FLAGS_rocksdb_wal_dir.c_str(), spaceId); if (fs::FileUtils::fileType(walDir.c_str()) == fs::FileType::NOTEXIST) { if (!fs::FileUtils::makeDir(walDir)) { LOG(FATAL) << "makeDir " << walDir << " failed"; } } LOG(INFO) << "set rocksdb wal of space " << spaceId << " to " << walDir; dbOpts.wal_dir = walDir; } // ColumnFamilyOptions std::unordered_map<std::string, std::string> cfOptsMap; if (!loadOptionsMap(cfOptsMap, FLAGS_rocksdb_column_family_options)) { return rocksdb::Status::InvalidArgument(); } s = GetColumnFamilyOptionsFromMap(rocksdb::ColumnFamilyOptions(), cfOptsMap, &cfOpts, true); if (!s.ok()) { return s; } baseOpts = rocksdb::Options(dbOpts, cfOpts); s = initRocksdbCompression(baseOpts); if (!s.ok()) { return s; } s = initRocksdbKVSeparation(baseOpts); if (!s.ok()) { return s; } if (FLAGS_num_compaction_threads > 0) { static std::shared_ptr<rocksdb::ConcurrentTaskLimiter> compaction_thread_limiter{ rocksdb::NewConcurrentTaskLimiter("compaction", FLAGS_num_compaction_threads)}; baseOpts.compaction_thread_limiter = compaction_thread_limiter; } if (FLAGS_rocksdb_rate_limit > 0) { static std::shared_ptr<rocksdb::RateLimiter> rate_limiter{ rocksdb::NewGenericRateLimiter(FLAGS_rocksdb_rate_limit * 1024 * 1024)}; baseOpts.rate_limiter = rate_limiter; } size_t prefixLength = sizeof(PartitionID) + vidLen; if (FLAGS_rocksdb_table_format == "BlockBasedTable") { // BlockBasedTableOptions std::unordered_map<std::string, std::string> bbtOptsMap; if (!loadOptionsMap(bbtOptsMap, FLAGS_rocksdb_block_based_table_options)) { return rocksdb::Status::InvalidArgument(); } s = GetBlockBasedTableOptionsFromMap( rocksdb::BlockBasedTableOptions(), bbtOptsMap, &bbtOpts, true); if (!s.ok()) { return s; } if (FLAGS_rocksdb_block_cache <= 0) { bbtOpts.no_block_cache = true; } else { static std::shared_ptr<rocksdb::Cache> blockCache = rocksdb::NewLRUCache(FLAGS_rocksdb_block_cache * 1024 * 1024, FLAGS_cache_bucket_exp); bbtOpts.block_cache = blockCache; } if (FLAGS_rocksdb_row_cache_num) { static std::shared_ptr<rocksdb::Cache> rowCache = rocksdb::NewLRUCache(FLAGS_rocksdb_row_cache_num, FLAGS_cache_bucket_exp); baseOpts.row_cache = rowCache; } bbtOpts.filter_policy.reset(rocksdb::NewBloomFilterPolicy(10, false)); if (FLAGS_enable_partitioned_index_filter) { bbtOpts.index_type = rocksdb::BlockBasedTableOptions::IndexType::kTwoLevelIndexSearch; bbtOpts.partition_filters = true; bbtOpts.cache_index_and_filter_blocks = true; bbtOpts.cache_index_and_filter_blocks_with_high_priority = true; bbtOpts.pin_top_level_index_and_filter = true; bbtOpts.pin_l0_filter_and_index_blocks_in_cache = baseOpts.compaction_style == rocksdb::CompactionStyle::kCompactionStyleLevel; } if (FLAGS_enable_rocksdb_prefix_filtering) { baseOpts.prefix_extractor.reset(rocksdb::NewCappedPrefixTransform(prefixLength)); } bbtOpts.whole_key_filtering = FLAGS_enable_rocksdb_whole_key_filtering; baseOpts.table_factory.reset(NewBlockBasedTableFactory(bbtOpts)); baseOpts.create_if_missing = true; } else if (FLAGS_rocksdb_table_format == "PlainTable") { // wal_dir need to be specified by rocksdb_wal_dir. // // WAL_ttl_seconds is 0 by default in rocksdb, which will check every 10 // mins, so rocksdb_backup_interval_secs is set to half of WAL_ttl_seconds // by default. WAL_ttl_seconds and rocksdb_backup_interval_secs need to be // modify together if necessary FLAGS_rocksdb_disable_wal = false; if (!FLAGS_enable_rocksdb_prefix_filtering) { return rocksdb::Status::InvalidArgument("PlainTable should use prefix bloom filter"); } baseOpts.prefix_extractor.reset(rocksdb::NewCappedPrefixTransform(prefixLength)); baseOpts.table_factory.reset(rocksdb::NewPlainTableFactory()); baseOpts.create_if_missing = true; } else { return rocksdb::Status::NotSupported("Illegal table format"); } return s; } bool loadOptionsMap(std::unordered_map<std::string, std::string>& map, const std::string& gflags) { conf::Configuration conf; auto status = conf.parseFromString(gflags); if (!status.ok()) { return false; } conf.forEachItem([&map](const std::string& key, const folly::dynamic& val) { LOG(INFO) << "Emplace rocksdb option " << key << "=" << val.asString(); map.emplace(key, val.asString()); }); return true; } static std::shared_ptr<rocksdb::Statistics> createDBStatistics() { std::shared_ptr<rocksdb::Statistics> dbstats = rocksdb::CreateDBStatistics(); if (FLAGS_rocksdb_stats_level == "kExceptHistogramOrTimers") { dbstats->set_stats_level(rocksdb::StatsLevel::kExceptHistogramOrTimers); } else if (FLAGS_rocksdb_stats_level == "kExceptTimers") { dbstats->set_stats_level(rocksdb::StatsLevel::kExceptTimers); } else if (FLAGS_rocksdb_stats_level == "kExceptDetailedTimers") { dbstats->set_stats_level(rocksdb::StatsLevel::kExceptDetailedTimers); } else if (FLAGS_rocksdb_stats_level == "kExceptTimeForMutex") { dbstats->set_stats_level(rocksdb::StatsLevel::kExceptTimeForMutex); } else { dbstats->set_stats_level(rocksdb::StatsLevel::kAll); } return dbstats; } std::shared_ptr<rocksdb::Statistics> getDBStatistics() { if (FLAGS_enable_rocksdb_statistics) { static std::shared_ptr<rocksdb::Statistics> dbstats = createDBStatistics(); return dbstats; } return nullptr; } } // namespace kvstore } // namespace nebula
1
33,139
a space after comma?
vesoft-inc-nebula
cpp
@@ -70,6 +70,11 @@ public final class CorrectForClockSkew { if (skew != null) { // the current span's skew may be a different endpoint than skewFromParent, adjust again. node.value(adjustTimestamps(node.value(), skew)); + } else { + if (skewFromParent != null && isLocalSpan(node.value())) { + //Propagate skewFromParent to local spans + skew = skewFromParent; + } } // propagate skew to any children for (Node<Span> child : node.children()) {
1
/** * Copyright 2015-2017 The OpenZipkin Authors * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ package zipkin.internal; import java.util.ArrayList; import java.util.Arrays; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import zipkin.Annotation; import zipkin.BinaryAnnotation; import zipkin.Constants; import zipkin.Endpoint; import zipkin.Span; /** * Adjusts spans whose children happen before their parents, based on core annotation values. */ public final class CorrectForClockSkew { static class ClockSkew { final Endpoint endpoint; final long skew; public ClockSkew(Endpoint endpoint, long skew) { this.endpoint = endpoint; this.skew = skew; } } public static List<Span> apply(List<Span> spans) { for (Span s : spans) { if (s.parentId == null) { Node<Span> tree = Node.constructTree(spans); adjust(tree, null); List<Span> result = new ArrayList<>(spans.size()); for (Iterator<Node<Span>> i = tree.traverse(); i.hasNext();) { result.add(i.next().value()); } return result; } } return spans; } /** * Recursively adjust the timestamps on the span tree. Root span is the reference point, all * children's timestamps gets adjusted based on that span's timestamps. */ static void adjust(Node<Span> node, @Nullable ClockSkew skewFromParent) { // adjust skew for the endpoint brought over from the parent span if (skewFromParent != null) { node.value(adjustTimestamps(node.value(), skewFromParent)); } // Is there any skew in the current span? ClockSkew skew = getClockSkew(node.value()); if (skew != null) { // the current span's skew may be a different endpoint than skewFromParent, adjust again. node.value(adjustTimestamps(node.value(), skew)); } // propagate skew to any children for (Node<Span> child : node.children()) { adjust(child, skew); } } /** If any annotation has an IP with skew associated, adjust accordingly. */ static Span adjustTimestamps(Span span, ClockSkew skew) { List<Annotation> annotations = null; Long annotationTimestamp = null; for (int i = 0, length = span.annotations.size(); i < length; i++) { Annotation a = span.annotations.get(i); if (a.endpoint == null) continue; if (ipsMatch(skew.endpoint, a.endpoint)) { if (annotations == null) annotations = new ArrayList<>(span.annotations); if (span.timestamp!= null && a.timestamp == span.timestamp) { annotationTimestamp = a.timestamp; } annotations.set(i, a.toBuilder().timestamp(a.timestamp - skew.skew).build()); } } if (annotations != null) { Span.Builder builder = span.toBuilder().annotations(annotations); if (annotationTimestamp != null) { builder.timestamp(annotationTimestamp - skew.skew); } return builder.build(); } // Search for a local span on the skewed endpoint for (int i = 0, length = span.binaryAnnotations.size(); i < length; i++) { BinaryAnnotation b = span.binaryAnnotations.get(i); if (b.endpoint == null) continue; if (b.key.equals(Constants.LOCAL_COMPONENT) && ipsMatch(skew.endpoint, b.endpoint)) { return span.toBuilder().timestamp(span.timestamp - skew.skew).build(); } } return span; } static boolean ipsMatch(Endpoint skew, Endpoint that) { return (skew.ipv6 != null && Arrays.equals(skew.ipv6, that.ipv6)) || (skew.ipv4 != 0 && skew.ipv4 == that.ipv4); } /** Use client/server annotations to determine if there's clock skew. */ @Nullable static ClockSkew getClockSkew(Span span) { Map<String, Annotation> annotations = asMap(span.annotations); Long clientSend = getTimestamp(annotations, Constants.CLIENT_SEND); Long clientRecv = getTimestamp(annotations, Constants.CLIENT_RECV); Long serverRecv = getTimestamp(annotations, Constants.SERVER_RECV); Long serverSend = getTimestamp(annotations, Constants.SERVER_SEND); if (clientSend == null || clientRecv == null || serverRecv == null || serverSend == null) { return null; } Endpoint server = annotations.get(Constants.SERVER_RECV).endpoint; server = server == null ? annotations.get(Constants.SERVER_SEND).endpoint : server; if (server == null) return null; long clientDuration = clientRecv - clientSend; long serverDuration = serverSend - serverRecv; // There is only clock skew if CS is after SR or CR is before SS boolean csAhead = clientSend < serverRecv; boolean crAhead = clientRecv > serverSend; if (serverDuration > clientDuration || (csAhead && crAhead)) { return null; } long latency = (clientDuration - serverDuration) / 2; long skew = serverRecv - latency - clientSend; if (skew != 0L) { return new ClockSkew(server, skew); } return null; } /** Get the annotations as a map with value to annotation bindings. */ static Map<String, Annotation> asMap(List<Annotation> annotations) { Map<String, Annotation> result = new LinkedHashMap<>(annotations.size()); for (Annotation a : annotations) { result.put(a.value, a); } return result; } @Nullable static Long getTimestamp(Map<String, Annotation> annotations, String value) { Annotation result = annotations.get(value); return result != null ? result.timestamp : null; } private CorrectForClockSkew() { } }
1
12,135
nit formatting here and below
openzipkin-zipkin
java
@@ -5,9 +5,9 @@ import ( "github.com/golang/mock/gomock" "github.com/spiffe/spire/pkg/common/util" - "github.com/spiffe/spire/proto/spire/api/registration" - "github.com/spiffe/spire/proto/spire/common" - mock_registration "github.com/spiffe/spire/test/mock/proto/api/registration" + "github.com/spiffe/spire/proto/spire/api/server/entry/v1" + "github.com/spiffe/spire/proto/spire/types" + mock_entry "github.com/spiffe/spire/test/mock/proto/api/entry" "github.com/stretchr/testify/suite" )
1
package entry import ( "testing" "github.com/golang/mock/gomock" "github.com/spiffe/spire/pkg/common/util" "github.com/spiffe/spire/proto/spire/api/registration" "github.com/spiffe/spire/proto/spire/common" mock_registration "github.com/spiffe/spire/test/mock/proto/api/registration" "github.com/stretchr/testify/suite" ) func TestShowTestSuite(t *testing.T) { suite.Run(t, new(ShowTestSuite)) } type ShowTestSuite struct { suite.Suite cli *ShowCLI mockClient *mock_registration.MockRegistrationClient } func (s *ShowTestSuite) SetupTest() { mockCtrl := gomock.NewController(s.T()) defer mockCtrl.Finish() s.mockClient = mock_registration.NewMockRegistrationClient(mockCtrl) cli := &ShowCLI{ Config: new(ShowConfig), Client: s.mockClient, Entries: []*common.RegistrationEntry{}, } s.cli = cli } func (s *ShowTestSuite) TestRunWithEntryID() { entryID := "123456" args := []string{ "-entryID", entryID, } req := &registration.RegistrationEntryID{Id: entryID} resp := s.registrationEntries(1)[0] s.mockClient.EXPECT().FetchEntry(gomock.Any(), req).Return(resp, nil) s.Require().Equal(0, s.cli.Run(args)) s.Assert().Equal(s.registrationEntries(1), s.cli.Entries) } func (s *ShowTestSuite) TestRunWithParentID() { entries := s.registrationEntries(2) args := []string{ "-parentID", entries[0].ParentId, } req := &registration.ParentID{Id: entries[0].ParentId} resp := &common.RegistrationEntries{Entries: entries} s.mockClient.EXPECT().ListByParentID(gomock.Any(), req).Return(resp, nil) s.Require().Equal(0, s.cli.Run(args)) util.SortRegistrationEntries(entries) s.Assert().Equal(entries, s.cli.Entries) } func (s *ShowTestSuite) TestRunWithSpiffeID() { entries := s.registrationEntries(1) entry := entries[0] args := []string{ "-spiffeID", entry.SpiffeId, } req := &registration.SpiffeID{Id: entry.SpiffeId} resp := &common.RegistrationEntries{Entries: entries} s.mockClient.EXPECT().ListBySpiffeID(gomock.Any(), req).Return(resp, nil) s.Require().Equal(0, s.cli.Run(args)) s.Assert().Equal(entries, s.cli.Entries) } func (s *ShowTestSuite) TestRunWithSelector() { entries := s.registrationEntries(2) args := []string{ "-selector", "foo:bar", } req := &common.Selectors{ Entries: []*common.Selector{ {Type: "foo", Value: "bar"}, }, } resp := &common.RegistrationEntries{Entries: entries} s.mockClient.EXPECT().ListBySelectors(gomock.Any(), req).Return(resp, nil) s.Require().Equal(0, s.cli.Run(args)) util.SortRegistrationEntries(entries) s.Assert().Equal(entries, s.cli.Entries) } func (s *ShowTestSuite) TestRunWithSelectors() { entries := s.registrationEntries(2) args := []string{ "-selector", "foo:bar", "-selector", "bar:baz", } req := &common.Selectors{ Entries: []*common.Selector{ {Type: "foo", Value: "bar"}, {Type: "bar", Value: "baz"}, }, } resp := &common.RegistrationEntries{Entries: entries} s.mockClient.EXPECT().ListBySelectors(gomock.Any(), req).Return(resp, nil) s.Require().Equal(0, s.cli.Run(args)) s.Assert().Equal(entries[1:2], s.cli.Entries) } func (s *ShowTestSuite) TestRunWithParentIDAndSelectors() { entries := s.registrationEntries(4)[2:4] args := []string{ "-parentID", entries[0].ParentId, "-selector", "bar:baz", } req1 := &registration.ParentID{Id: entries[0].ParentId} resp := &common.RegistrationEntries{Entries: entries} s.mockClient.EXPECT().ListByParentID(gomock.Any(), req1).Return(resp, nil) req2 := &common.Selectors{ Entries: []*common.Selector{ {Type: "bar", Value: "baz"}, }, } resp = &common.RegistrationEntries{Entries: entries[0:1]} s.mockClient.EXPECT().ListBySelectors(gomock.Any(), req2).Return(resp, nil) s.Require().Equal(0, s.cli.Run(args)) expectEntries := entries[0:1] util.SortRegistrationEntries(expectEntries) s.Assert().Equal(expectEntries, s.cli.Entries) } func (s *ShowTestSuite) TestRunWithFederatesWith() { resp := &common.RegistrationEntries{ Entries: s.registrationEntries(4), } s.mockClient.EXPECT().FetchEntries(gomock.Any(), &common.Empty{}).Return(resp, nil) args := []string{ "-federatesWith", "spiffe://domain.test", } s.Require().Equal(0, s.cli.Run(args)) expectEntries := s.registrationEntries(4)[2:3] util.SortRegistrationEntries(expectEntries) s.Assert().Equal(expectEntries, s.cli.Entries) } // registrationEntries returns `count` registration entry records. At most 4. func (ShowTestSuite) registrationEntries(count int) []*common.RegistrationEntry { selectors := []*common.Selector{ {Type: "foo", Value: "bar"}, {Type: "bar", Value: "baz"}, {Type: "baz", Value: "bat"}, } entries := []*common.RegistrationEntry{ { ParentId: "spiffe://example.org/father", SpiffeId: "spiffe://example.org/son", Selectors: []*common.Selector{selectors[0]}, EntryId: "00000000-0000-0000-0000-000000000000", }, { ParentId: "spiffe://example.org/father", SpiffeId: "spiffe://example.org/daughter", Selectors: []*common.Selector{selectors[0], selectors[1]}, EntryId: "00000000-0000-0000-0000-000000000001", }, { ParentId: "spiffe://example.org/mother", SpiffeId: "spiffe://example.org/daughter", Selectors: []*common.Selector{selectors[1], selectors[2]}, EntryId: "00000000-0000-0000-0000-000000000002", FederatesWith: []string{"spiffe://domain.test"}, }, { ParentId: "spiffe://example.org/mother", SpiffeId: "spiffe://example.org/son", Selectors: []*common.Selector{selectors[2]}, EntryId: "00000000-0000-0000-0000-000000000003", }, } e := []*common.RegistrationEntry{} for i := 0; i < count; i++ { e = append(e, entries[i]) } return e }
1
14,988
I think that there is a general consensus of trying to avoid this kind of mocks in the new tests that we write. I would suggest to have tests using fake service implementations. In this case, we can have a fake entry service. Examples of how tests have been written this way are the tests for the `spire-server agent` and `spire-server bundle` commands. It would be great if we can have that kind of testing here also. What do you think?
spiffe-spire
go
@@ -38,6 +38,6 @@ func (p *Provider) Type() string { return ProviderType } -func (p *Provider) RunQuery(ctx context.Context, query string) (bool, error) { - return false, nil +func (p *Provider) RunQuery(ctx context.Context, query string) (bool, string, error) { + return false, "", nil }
1
// Copyright 2020 The PipeCD Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package stackdriver import ( "context" "time" ) const ProviderType = "StackdriverLogging" // Provider is a client for stackdriver. type Provider struct { serviceAccount []byte timeout time.Duration } func NewProvider(serviceAccount []byte) (*Provider, error) { return &Provider{ serviceAccount: serviceAccount, }, nil } func (p *Provider) Type() string { return ProviderType } func (p *Provider) RunQuery(ctx context.Context, query string) (bool, error) { return false, nil }
1
15,353
`ctx` is unused in RunQuery
pipe-cd-pipe
go
@@ -276,12 +276,14 @@ def get_report_path_hash(report): """ Returns path hash for the given bug path. This can be used to filter deduplications of multiple reports. + + report type should be codechecker_common.Report """ report_path_hash = '' events = [i for i in report.bug_path if i.get('kind') == 'event'] - for event in events: - file_name = os.path.basename(report.files[event['location']['file']]) + file_name = \ + os.path.basename(report.files.get(event['location']['file'])) line = str(event['location']['line']) if 'location' in event else 0 col = str(event['location']['col']) if 'location' in event else 0
1
# ------------------------------------------------------------------------- # # Part of the CodeChecker project, under the Apache License v2.0 with # LLVM Exceptions. See LICENSE for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # # ------------------------------------------------------------------------- """ CodeChecker hash generation algorithms. """ import hashlib import logging import os import plistlib import sys import traceback LOG = logging.getLogger('codechecker_report_hash') handler = logging.StreamHandler() formatter = logging.Formatter('[%(levelname)s] - %(message)s') handler.setFormatter(formatter) LOG.setLevel(logging.INFO) LOG.addHandler(handler) class HashType(object): """ Report hash types. """ CONTEXT_FREE = 1 PATH_SENSITIVE = 2 def __get_line(file_name, line_no, errors='ignore'): """ Return the given line from the file. If line_no is larger than the number of lines in the file then empty string returns. If the file can't be opened for read, the function also returns empty string. Try to encode every file as utf-8 to read the line content do not depend on the platform settings. By default locale.getpreferredencoding() is used which depends on the platform. Changing the encoding error handling can influence the hash content! """ try: with open(file_name, mode='r', encoding='utf-8', errors=errors) as source_file: for line in source_file: line_no -= 1 if line_no == 0: return line return '' except IOError: LOG.error("Failed to open file %s", file_name) return '' def __str_to_hash(string_to_hash, errors='ignore'): """ Encodes the given string and generates a hash from it. """ string_hash = string_to_hash.encode(encoding="utf-8", errors=errors) return hashlib.md5(string_hash).hexdigest() def _remove_whitespace(line_content, old_col): """ This function removes white spaces from the line content parameter and calculates the new line location. Returns the line content without white spaces and the new column number. E.g.: line_content = " int foo = 17; sizeof(43); " ^ |- bug_col = 18 content_begin = " int foo = 17; " content_begin_strip = "intfoo=17;" line_strip_len = 18 - 10 => 8 ''.join(line_content.split()) => "intfoo=17;sizeof(43);" ^ |- until_col - line_strip_len 18 - 8 = 10 """ content_begin = line_content[:old_col] content_begin_strip = ''.join(content_begin.split()) line_strip_len = len(content_begin) - len(content_begin_strip) return ''.join(line_content.split()), \ old_col - line_strip_len def __get_report_hash_path_sensitive(diag, source_file): """ Report hash generation from the given diagnostic. Hash generation algorithm for older plist versions where no issue hash was generated or for the plists generated from Clang Tidy where the issue hash generation feature is still missing. As the main diagnostic section the last element from the bug path is used. High level overview of the hash content: * 'file_name' from the main diag section. * 'checker name' * 'checker message' * 'line content' from the source file if can be read up * 'column numbers' from the main diag section * 'range column numbers' only from the control diag sections if column number in the range is not the same as the previous control diag section number in the bug path. If there are no control sections event section column numbers are used. """ def compare_ctrl_sections(curr, prev): """ Compare two sections and return column numbers which should be included in the path hash or None if the two compared sections ranges are identical. """ curr_edges = curr['edges'] curr_start_range_begin = curr_edges[0]['start'][0] curr_start_range_end = curr_edges[0]['start'][1] prev_edges = prev['edges'] prev_end_range_begin = prev_edges[0]['end'][0] prev_end_range_end = prev_edges[0]['end'][1] if curr_start_range_begin != prev_end_range_begin and \ curr_start_range_end != prev_end_range_end: return (curr_start_range_begin['col'], curr_start_range_end['col']) return None path = diag['path'] # The last diag section from the bug path used as a main # diagnostic section. try: ctrl_sections = [x for x in path if x.get('kind') == 'control'] main_section = path[-1] m_loc = main_section.get('location') source_line = m_loc.get('line') from_col = m_loc.get('col') until_col = m_loc.get('col') # WARNING!!! Changing the error handling type for encoding errors # can influence the hash content! line_content = __get_line(source_file, source_line, errors='ignore') if line_content == '' and not os.path.isfile(source_file): LOG.error("Failed to generate report hash.") LOG.error('%s does not exists!', source_file) file_name = os.path.basename(source_file) msg = main_section.get('message') hash_content = [file_name, diag.get('check_name', 'unknown'), msg, line_content, str(from_col), str(until_col)] hash_from_ctrl_section = True for i, section in enumerate(ctrl_sections): edges = section['edges'] try: start_range_begin = edges[0]['start'][0] start_range_end = edges[0]['start'][1] end_range_begin = edges[0]['end'][0] end_range_end = edges[0]['end'][1] if i > 0: prev = ctrl_sections[i-1] col_to_append = compare_ctrl_sections(section, prev) if col_to_append: begin_col, end_col = col_to_append hash_content.append(str(begin_col)) hash_content.append(str(end_col)) else: hash_content.append(str(start_range_begin['col'])) hash_content.append(str(start_range_end['col'])) hash_content.append(str(end_range_begin['col'])) hash_content.append(str(end_range_end['col'])) except IndexError: # Edges might be empty. hash_from_ctrl_section = False # Hash generation from the control sections failed for some reason # use event section positions for hash generation. if not hash_from_ctrl_section: event_sections = [x for x in path if x.get('kind') == 'event'] for i, section in enumerate(event_sections): loc = section['location'] col_num = loc['col'] hash_content.append(str(col_num)) return __str_to_hash('|||'.join(hash_content)) except Exception as ex: LOG.error("Hash generation failed") LOG.error(ex) return '' def __get_report_hash_context_free(diag, source_file): """ Generate report hash without bug path. !!! NOT Compatible with the old hash generation method High level overview of the hash content: * 'file_name' from the main diag section. * 'checker message'. * 'line content' from the source file if can be read up. All the whitespaces from the source content are removed. * 'column numbers' from the main diag sections location. """ try: m_loc = diag.get('location') source_line = m_loc.get('line') from_col = m_loc.get('col') until_col = m_loc.get('col') # WARNING!!! Changing the error handling type for encoding errors # can influence the hash content! line_content = __get_line(source_file, source_line, errors='ignore') # Remove whitespaces so the hash will be independet of the # source code indentation. line_content, new_col = _remove_whitespace(line_content, from_col) # Update the column number in sync with the # removed whitespaces. until_col = until_col - (from_col-new_col) from_col = new_col if line_content == '' and not os.path.isfile(source_file): LOG.error("Failed to include soruce line in the report hash.") LOG.error('%s does not exists!', source_file) file_name = os.path.basename(source_file) msg = diag.get('description') hash_content = [file_name, msg, line_content, str(from_col), str(until_col)] return __str_to_hash('|||'.join(hash_content)) except Exception as ex: LOG.error("Hash generation failed") LOG.error(ex) return '' def get_report_hash(diag, file_path, hash_type): """ Get report hash for the given diagnostic. """ if hash_type == HashType.CONTEXT_FREE: return __get_report_hash_context_free(diag, file_path) elif hash_type == HashType.PATH_SENSITIVE: return __get_report_hash_path_sensitive(diag, file_path) else: raise Exception("Invalid report hash type: " + str(hash_type)) def get_report_path_hash(report): """ Returns path hash for the given bug path. This can be used to filter deduplications of multiple reports. """ report_path_hash = '' events = [i for i in report.bug_path if i.get('kind') == 'event'] for event in events: file_name = os.path.basename(report.files[event['location']['file']]) line = str(event['location']['line']) if 'location' in event else 0 col = str(event['location']['col']) if 'location' in event else 0 report_path_hash += line + '|' + col + '|' + event['message'] + \ file_name report_path_hash += report.check_name if not report_path_hash: LOG.error('Failed to generate report path hash!') LOG.error(report.bug_path) LOG.debug(report_path_hash) return __str_to_hash(report_path_hash) def replace_report_hash(plist_file, hash_type=HashType.CONTEXT_FREE): """ Override hash in the given file by using the given version hash. """ try: with open(plist_file, 'rb+') as pfile: plist = plistlib.load(pfile) pfile.seek(0) pfile.truncate() files = plist['files'] for diag in plist['diagnostics']: file_path = files[diag['location']['file']] report_hash = get_report_hash(diag, file_path, hash_type) diag['issue_hash_content_of_line_in_context'] = report_hash plistlib.dump(plist, pfile) except (TypeError, AttributeError, plistlib.InvalidFileException) as err: LOG.warning('Failed to process plist file: %s wrong file format?', plist_file) LOG.warning(err) except IndexError as iex: LOG.warning('Indexing error during processing plist file %s', plist_file) LOG.warning(type(iex)) LOG.warning(repr(iex)) _, _, exc_traceback = sys.exc_info() traceback.print_tb(exc_traceback, limit=1, file=sys.stdout) except Exception as ex: LOG.warning('Error during processing reports from the plist file: %s', plist_file) traceback.print_exc() LOG.warning(type(ex)) LOG.warning(ex)
1
12,584
Couldn't we use type hints to express this?
Ericsson-codechecker
c
@@ -57,12 +57,12 @@ define(['connectionManager', 'serverNotifications', 'events', 'globalize', 'emby function setState(button, likes, isFavorite, updateAttribute) { - var icon = button.querySelector('i'); + var icon = button.querySelector('.material-icons'); if (isFavorite) { if (icon) { - icon.innerHTML = 'favorite'; + icon.classList.add('favorite'); icon.classList.add('ratingbutton-icon-withrating'); }
1
define(['connectionManager', 'serverNotifications', 'events', 'globalize', 'emby-button'], function (connectionManager, serverNotifications, events, globalize, EmbyButtonPrototype) { 'use strict'; function addNotificationEvent(instance, name, handler) { var localHandler = handler.bind(instance); events.on(serverNotifications, name, localHandler); instance[name] = localHandler; } function removeNotificationEvent(instance, name) { var handler = instance[name]; if (handler) { events.off(serverNotifications, name, handler); instance[name] = null; } } function showPicker(button, apiClient, itemId, likes, isFavorite) { return apiClient.updateFavoriteStatus(apiClient.getCurrentUserId(), itemId, !isFavorite); } function onClick(e) { var button = this; var id = button.getAttribute('data-id'); var serverId = button.getAttribute('data-serverid'); var apiClient = connectionManager.getApiClient(serverId); var likes = this.getAttribute('data-likes'); var isFavorite = this.getAttribute('data-isfavorite') === 'true'; if (likes === 'true') { likes = true; } else if (likes === 'false') { likes = false; } else { likes = null; } showPicker(button, apiClient, id, likes, isFavorite).then(function (userData) { setState(button, userData.Likes, userData.IsFavorite); }); } function onUserDataChanged(e, apiClient, userData) { var button = this; if (userData.ItemId === button.getAttribute('data-id')) { setState(button, userData.Likes, userData.IsFavorite); } } function setState(button, likes, isFavorite, updateAttribute) { var icon = button.querySelector('i'); if (isFavorite) { if (icon) { icon.innerHTML = 'favorite'; icon.classList.add('ratingbutton-icon-withrating'); } button.classList.add('ratingbutton-withrating'); } else if (likes) { if (icon) { icon.innerHTML = 'favorite'; icon.classList.remove('ratingbutton-icon-withrating'); //icon.innerHTML = 'thumb_up'; } button.classList.remove('ratingbutton-withrating'); } else if (likes === false) { if (icon) { icon.innerHTML = 'favorite'; icon.classList.remove('ratingbutton-icon-withrating'); //icon.innerHTML = 'thumb_down'; } button.classList.remove('ratingbutton-withrating'); } else { if (icon) { icon.innerHTML = 'favorite'; icon.classList.remove('ratingbutton-icon-withrating'); //icon.innerHTML = 'thumbs_up_down'; } button.classList.remove('ratingbutton-withrating'); } if (updateAttribute !== false) { button.setAttribute('data-isfavorite', isFavorite); button.setAttribute('data-likes', (likes === null ? '' : likes)); } } function setTitle(button) { button.title = globalize.translate('Favorite'); var text = button.querySelector('.button-text'); if (text) { text.innerHTML = button.title; } } function clearEvents(button) { button.removeEventListener('click', onClick); removeNotificationEvent(button, 'UserDataChanged'); } function bindEvents(button) { clearEvents(button); button.addEventListener('click', onClick); addNotificationEvent(button, 'UserDataChanged', onUserDataChanged); } var EmbyRatingButtonPrototype = Object.create(EmbyButtonPrototype); EmbyRatingButtonPrototype.createdCallback = function () { // base method if (EmbyButtonPrototype.createdCallback) { EmbyButtonPrototype.createdCallback.call(this); } }; EmbyRatingButtonPrototype.attachedCallback = function () { // base method if (EmbyButtonPrototype.attachedCallback) { EmbyButtonPrototype.attachedCallback.call(this); } var itemId = this.getAttribute('data-id'); var serverId = this.getAttribute('data-serverid'); if (itemId && serverId) { var likes = this.getAttribute('data-likes'); var isFavorite = this.getAttribute('data-isfavorite') === 'true'; if (likes === 'true') { likes = true; } else if (likes === 'false') { likes = false; } else { likes = null; } setState(this, likes, isFavorite, false); bindEvents(this); } setTitle(this); }; EmbyRatingButtonPrototype.detachedCallback = function () { // base method if (EmbyButtonPrototype.detachedCallback) { EmbyButtonPrototype.detachedCallback.call(this); } clearEvents(this); }; EmbyRatingButtonPrototype.setItem = function (item) { if (item) { this.setAttribute('data-id', item.Id); this.setAttribute('data-serverid', item.ServerId); var userData = item.UserData || {}; setState(this, userData.Likes, userData.IsFavorite); bindEvents(this); } else { this.removeAttribute('data-id'); this.removeAttribute('data-serverid'); this.removeAttribute('data-likes'); this.removeAttribute('data-isfavorite'); clearEvents(this); } }; document.registerElement('emby-ratingbutton', { prototype: EmbyRatingButtonPrototype, extends: 'button' }); });
1
15,129
why you do `.classList.add()` here but `.replace()` in other places?
jellyfin-jellyfin-web
js
@@ -98,9 +98,14 @@ Then 'I see the section price is "$price"' do |price| end Then 'I see that one of the teachers is "$teacher_name"' do |teacher_name| + sleep 10 page.should have_css(".teachers", text: teacher_name) end +Then 'I should see that "$teacher_name" is teaching both sections' do |teacher_name| + page.should have_selector('h4', text: "Joe Teacher", count: 1) +end + Then %{I see "$teacher_name"'s avatar} do |teacher_name| teacher = Teacher.find_by_name!(teacher_name) page.should have_css("img[src^='https://secure.gravatar.com/avatar/#{teacher.gravatar_hash}']")
1
Then 'I see the empty section description' do page.should have_content('No courses are running at this time') end When 'I follow the external registration link' do url = find("*[@id='register-button']")[:href] url.should_not be_nil, "cannot find the external registration link" Misc.rails_app = Capybara.app Capybara.app = Sinatra::Application visit url end Then /^I should not see the external registration link$/ do page.should have_no_css("#register-button") end Then /^I should a registration link to be notified$/ do page.should have_css("#register-button[href='#new_follow_up']", text: "Get notified") end Then 'I see the section from "$start_date" to "$end_date"' do |start_date, end_date| section = section(start_date, end_date) course = section.course date_string = course_date_string(start_date, end_date) within("#course_#{course.id}") do page.should have_content(date_string) end end Then 'I see the home page section from "$start_date" to "$end_date"' do |start_date, end_date| section = section(start_date, end_date) course = section.course date_string = course_date_string(start_date, end_date) within("#section_#{section.id}") do page.should have_content(date_string) end end Then 'I do not see the home page section from "$start_date" to "$end_date"' do |start_date, end_date| section = section(start_date, end_date) course = section.course date_string = course_date_string(start_date, end_date) page.should_not have_css("#section_#{section.id}:contains('#{date_string}')") end Then 'I see that "$teacher_name" is teaching' do |teacher_name| find_field(teacher_name)['checked'].should be end Then 'I do not see that "$teacher_name" is teaching' do |teacher_name| find_field(teacher_name)['checked'].should_not be end Then "I see that the section teacher can't be blank" do page.should have_content("must specify at least one teacher") end Then '"$teacher_name" has a Gravatar for "$teacher_email"' do |teacher_name, teacher_email| gravatar_hash = Digest::MD5.hexdigest(teacher_email.strip.downcase) teacher = Teacher.find_by_email!(teacher_email) page.should have_css(%{img[src="https://secure.gravatar.com/avatar/#{gravatar_hash}?s=20"]}) end Then 'I see the user "$user_name" in the list of users' do |user_name| within('#student-list ul') do page.should have_content(user_name) end end Then 'I see the section location is "$location"' do |location| page.should have_css(".address", text: location) end Then %{I see the section location's name is "$location_name"} do |location_name| within("#location") do page.should have_content(location_name) end end Then 'I see the section date is "$section_date_range"' do |section_date_range| within("#register-date") do page.should have_content(section_date_range) end end Then 'I see the section time is "$section_time_range"' do |section_time_range| within("#register-time") do page.should have_content(section_time_range) end end Then 'I see the section price is "$price"' do |price| within("#amount") do page.should have_content(price) end end Then 'I see that one of the teachers is "$teacher_name"' do |teacher_name| page.should have_css(".teachers", text: teacher_name) end Then %{I see "$teacher_name"'s avatar} do |teacher_name| teacher = Teacher.find_by_name!(teacher_name) page.should have_css("img[src^='https://secure.gravatar.com/avatar/#{teacher.gravatar_hash}']") end When /^I should see "([^"]*)" before "([^"]*)"$/ do |section1_name, section2_name| page.body.should =~ /#{section1_name}.*#{section2_name}/m end When /^I follow the delete link to the section from "([^"]*)" to "([^"]*)"$/ do |start_date, end_date| section = section(start_date, end_date) inside section do click_link "Delete" end end Then /^I should not see the section from "([^"]*)" to "([^"]*)"$/ do |start_date, end_date| date_string = course_date_string(start_date, end_date) page.should have_no_css(".section", text: date_string) end When /^I follow the link to the section from "([^"]*)" to "([^"]*)"$/ do |starts_on, ends_on| section = Section.find_by_starts_on_and_ends_on!(Date.parse(starts_on), Date.parse(ends_on)) find("a:contains('#{section.date_range}')").click end Then 'I see that "$student_name" has paid' do |student_name| within('ul#paid') do page.should have_content(student_name) end end Then 'I see that "$student_name" has not paid' do |student_name| within('ul#unpaid') do page.should have_content(student_name) end end
1
6,404
What is this sleep here for?
thoughtbot-upcase
rb
@@ -478,4 +478,19 @@ describe('AutoRowSize', () => { expect(rowHeight(spec().$container, -1)).toBe(75); }); + + it('should properly count height', () => { + const hot = handsontable({ + data: [['Tomek', 'Tomek\nTomek', 'Romek\nRomek']], + rowHeaders: true, + colHeaders: true, + autoRowSize: true, + }); + // const cloneLeftHider = spec().$container.find('.ht_clone_left .wtHider')[0]; + const $rowsHeaders = spec().$container.find('.ht_clone_left tbody tr th'); + const plugin = hot.getPlugin('autoRowSize'); + + expect($rowsHeaders.height()).toEqual(42); + expect(plugin.heights[0]).toEqual(43); + }); });
1
describe('AutoRowSize', () => { const id = 'testContainer'; beforeEach(function() { this.$container = $(`<div id="${id}"></div>`).appendTo('body'); }); afterEach(function() { if (this.$container) { destroy(); this.$container.remove(); } }); function arrayOfObjects() { return [ { id: 'Short' }, { id: 'Somewhat\nlong' }, { id: 'The\nvery\nvery\nvery\nlongest one' } ]; } function arrayOfObjects2() { return [ { id: 'Short', name: 'Somewhat long' }, { id: 'Somewhat long', name: 'The very very longest one' }, { id: 'The very very very longest one', name: 'Short' } ]; } it('should apply auto size by default', () => { handsontable({ data: arrayOfObjects() }); const height0 = rowHeight(spec().$container, 0); const height1 = rowHeight(spec().$container, 1); const height2 = rowHeight(spec().$container, 2); expect(height0).toBeLessThan(height1); expect(height1).toBeLessThan(height2); }); it('should draw scrollbar correctly (proper height) after calculation when autoRowSize option is set (long text in row) #4000', (done) => { const row = ['This is very long text which will break this cell text into two lines']; const data = []; const nrOfRows = 200; const columnWidth = 100; for (let i = 0; i < nrOfRows; i += 1) { data.push(row); } handsontable({ data, colWidths() { return columnWidth; }, autoRowSize: true }); const oldHeight = spec().$container[0].scrollHeight; setTimeout(() => { const newHeight = spec().$container[0].scrollHeight; expect(oldHeight).toBeLessThan(newHeight); done(); }, 200); }); describe('should draw scrollbar correctly (proper height) after calculation when autoRowSize option is set (`table td` element height set by CSS) #4000', () => { const cellHeightInPx = 100; const nrOfColumns = 200; let nrOfRows = null; let style; const SYNC_CALCULATION_LIMIT = Handsontable.plugins.AutoRowSize.SYNC_CALCULATION_LIMIT; const CALCULATION_STEP = Handsontable.plugins.AutoRowSize.CALCULATION_STEP; beforeEach(function() { if (!this.$container) { this.$container = $(`<div id="${id}"></div>`).appendTo('body'); } const css = `.handsontable table td { height: ${cellHeightInPx}px !important }`; const head = document.head; style = document.createElement('style'); style.type = 'text/css'; if (style.styleSheet) { style.styleSheet.cssText = css; } else { style.appendChild(document.createTextNode(css)); } $(head).append(style); }); afterEach(function() { if (this.$container) { destroy(); this.$container.remove(); } if (style) { $(style).remove(); } }); it('(SYNC_CALCULATION_LIMIT - 1 rows)', (done) => { nrOfRows = SYNC_CALCULATION_LIMIT - 1; handsontable({ data: Handsontable.helper.createSpreadsheetData(nrOfRows, nrOfColumns), autoRowSize: true }); setTimeout(() => { const newHeight = spec().$container[0].scrollHeight; expect(newHeight).toEqual((((cellHeightInPx + 1) * nrOfRows) + 1)); done(); }, 200); }); it('(SYNC_CALCULATION_LIMIT + 1 rows)', (done) => { nrOfRows = SYNC_CALCULATION_LIMIT + 1; handsontable({ data: Handsontable.helper.createSpreadsheetData(nrOfRows, nrOfColumns), autoRowSize: true }); setTimeout(() => { const newHeight = spec().$container[0].scrollHeight; expect(newHeight).toEqual((((cellHeightInPx + 1) * nrOfRows) + 1)); done(); }, 200); }); it('(SYNC_CALCULATION_LIMIT + CALCULATION_STEP - 1 rows)', (done) => { nrOfRows = SYNC_CALCULATION_LIMIT + CALCULATION_STEP - 1; handsontable({ data: Handsontable.helper.createSpreadsheetData(nrOfRows, nrOfColumns), autoRowSize: true }); setTimeout(() => { const newHeight = spec().$container[0].scrollHeight; expect(newHeight).toEqual((((cellHeightInPx + 1) * nrOfRows) + 1)); done(); }, 200); }); it('(SYNC_CALCULATION_LIMIT + CALCULATION_STEP + 1 rows)', (done) => { nrOfRows = SYNC_CALCULATION_LIMIT + CALCULATION_STEP + 1; handsontable({ data: Handsontable.helper.createSpreadsheetData(nrOfRows, nrOfColumns), autoRowSize: true }); setTimeout(() => { const newHeight = spec().$container[0].scrollHeight; expect(newHeight).toEqual((((cellHeightInPx + 1) * nrOfRows) + 1)); done(); }, 200); }); }); it('should correctly detect row height when table is hidden on init (display: none)', async() => { spec().$container.css('display', 'none'); const hot = handsontable({ data: arrayOfObjects(), rowHeaders: true, autoRowSize: true }); await sleep(200); spec().$container.css('display', 'block'); hot.render(); expect(rowHeight(spec().$container, 0)).toBe(24); expect(rowHeight(spec().$container, 1)).toBe(43); expect([106, 127]).toEqual(jasmine.arrayContaining([rowHeight(spec().$container, 2)])); }); it('should be possible to disable plugin using updateSettings', () => { const hot = handsontable({ data: arrayOfObjects() }); const height0 = rowHeight(spec().$container, 0); const height1 = rowHeight(spec().$container, 1); const height2 = rowHeight(spec().$container, 2); expect(height0).toBeLessThan(height1); expect(height1).toBeLessThan(height2); updateSettings({ autoRowSize: false }); hot.setDataAtCell(0, 0, 'A\nB\nC'); const height4 = rowHeight(spec().$container, 0); expect(height4).toBeGreaterThan(height0); }); it('should be possible to enable plugin using updateSettings', () => { handsontable({ data: arrayOfObjects(), autoRowSize: false }); let height0 = parseInt(getCell(0, 0).style.height, 10); let height1 = parseInt(getCell(1, 0).style.height, 10); let height2 = parseInt(getCell(2, 0).style.height, 10); expect(height0).toEqual(height1); expect(height0).toEqual(height2); expect(height1).toEqual(height2); updateSettings({ autoRowSize: true }); height0 = parseInt(getCell(0, 0).style.height, 10); height1 = parseInt(getCell(1, 0).style.height, 10); height2 = parseInt(getCell(2, 0).style.height, 10); expect(height0).toBeLessThan(height1); expect(height1).toBeLessThan(height2); }); it('should consider CSS style of each instance separately', () => { const $style = $('<style>.big .htCore td {font-size: 40px;line-height: 1.1}</style>').appendTo('head'); const $container1 = $('<div id="hot1"></div>').appendTo('body').handsontable({ data: arrayOfObjects(), autoRowSize: true }); const $container2 = $('<div id="hot2"></div>').appendTo('body').handsontable({ data: arrayOfObjects(), autoRowSize: true }); const hot1 = $container1.handsontable('getInstance'); const hot2 = $container2.handsontable('getInstance'); expect(parseInt(hot1.getCell(0, 0).style.height, 10)).toEqual(parseInt(hot2.getCell(0, 0).style.height, 10)); $container1.addClass('big'); hot1.render(); hot2.render(); expect(parseInt(hot1.getCell(2, 0).style.height, 10)).toBeGreaterThan(parseInt(hot2.getCell(2, 0).style.height, 10)); $container1.removeClass('big'); hot1.render(); $container2.addClass('big'); hot2.render(); expect(parseInt(hot1.getCell(2, 0).style.height, 10)).toBeLessThan(parseInt(hot2.getCell(2, 0).style.height, 10)); $style.remove(); $container1.handsontable('destroy'); $container1.remove(); $container2.handsontable('destroy'); $container2.remove(); }); it('should consider CSS class of the <table> element (e.g. when used with Bootstrap)', () => { const $style = $('<style>.htCore.big-table td {font-size: 32px;line-height: 1.1}</style>').appendTo('head'); const hot = handsontable({ data: arrayOfObjects(), autoRowSize: true }); const height = parseInt(hot.getCell(2, 0).style.height, 10); spec().$container.find('table').addClass('big-table'); hot.getPlugin('autoRowSize').clearCache(); render(); expect(parseInt(hot.getCell(2, 0).style.height, 10)).toBeGreaterThan(height); $style.remove(); }); it('should not trigger autoColumnSize when column width is defined (through colWidths)', () => { const hot = handsontable({ data: arrayOfObjects(), autoRowSize: true, rowHeights: [70, 70, 70], width: 500, height: 100, rowHeaders: true }); setDataAtCell(0, 0, 'LongLongLongLong'); expect(parseInt(hot.getCell(0, -1).style.height, 10)).toBe(69); // -1px of cell border }); // Currently columns.height is not supported xit('should not trigger autoRowSize when column height is defined (through columns.height)', () => { const hot = handsontable({ data: arrayOfObjects(), autoRowSize: true, rowHeights: 77, columns: [ { height: 70 }, { height: 70 }, { height: 70 } ], width: 500, height: 100, rowHeaders: true }); setDataAtCell(0, 0, 'LongLongLongLong'); expect(parseInt(hot.getCell(0, -1).style.height, 10)).toBe(69); // -1px of cell border }); it('should consider renderer that uses conditional formatting for specific row & column index', () => { const data = arrayOfObjects(); data.push({ id: '2', name: 'Rocket Man', lastName: 'In a tin can' }); const hot = handsontable({ data, columns: [ { data: 'id' }, { data: 'name' } ], autoRowSize: true, renderer(instance, td, row, col, ...args) { // taken from demo/renderers.html Handsontable.renderers.TextRenderer.apply(this, [instance, td, row, col, ...args]); if (row === 1 && col === 0) { td.style.padding = '100px'; } } }); expect(parseInt(hot.getCell(1, 0).style.height || 0, 10)).toBe(242); }); it('should destroy temporary element', () => { handsontable({ autoRowSize: true }); expect(document.querySelector('.htAutoSize')).toBe(null); }); it('should recalculate heights after column resize', function() { const hot = handsontable({ data: arrayOfObjects2(), colWidths: 250, manualColumnResize: true, autoRowSize: true, rowHeaders: true, colHeaders: true }); expect(parseInt(hot.getCell(0, -1).style.height, 10)).toBe(22); // -1px of cell border expect(parseInt(hot.getCell(1, -1).style.height, 10)).toBe(22); // -1px of cell border expect(parseInt(hot.getCell(2, -1).style.height, 10)).toBeInArray([22, 42]); // -1px of cell border resizeColumn.call(this, 1, 100); expect(parseInt(hot.getCell(0, -1).style.height, 10)).toBe(22); expect(parseInt(hot.getCell(1, -1).style.height, 10)).toBe(42); expect([63, 84]).toEqual(jasmine.arrayContaining([parseInt(hot.getCell(2, -1).style.height, 10)])); resizeColumn.call(this, 1, 50); expect(parseInt(hot.getCell(0, -1).style.height, 10)).toBe(22); expect(parseInt(hot.getCell(1, -1).style.height, 10)).toBe(42); expect(parseInt(hot.getCell(2, -1).style.height, 10)).toBe(126); resizeColumn.call(this, 1, 200); expect(parseInt(hot.getCell(0, -1).style.height, 10)).toBe(22); expect(parseInt(hot.getCell(1, -1).style.height, 10)).toBe(22); expect(parseInt(hot.getCell(2, -1).style.height, 10)).toBe(42); }); it('should recalculate heights after column moved', () => { const hot = handsontable({ data: arrayOfObjects2(), colWidths: [250, 50], manualColumnMove: true, autoRowSize: true, rowHeaders: true, colHeaders: true }); const plugin = hot.getPlugin('manualColumnMove'); expect(parseInt(hot.getCell(0, -1).style.height, 10)).toBe(42); // -1px of cell border expect(parseInt(hot.getCell(1, -1).style.height, 10)).toBe(105); // -1px of cell border expect(parseInt(hot.getCell(2, -1).style.height, 10)).toBeInArray([22, 42]); // -1px of cell border plugin.moveColumn(0, 2); hot.render(); expect(parseInt(hot.getCell(0, -1).style.height, 10)).toBe(22); expect(parseInt(hot.getCell(1, -1).style.height, 10)).toBe(42); expect(parseInt(hot.getCell(2, -1).style.height, 10)).toBe(126); }); it('should recalculate heights with manualRowResize when changing text to multiline', () => { const hot = handsontable({ data: arrayOfObjects2(), colWidths: 250, manualRowResize: [23, 50], autoRowSize: true, rowHeaders: true, colHeaders: true }); expect(parseInt(hot.getCell(0, -1).style.height, 10)).toBe(22); // -1px of cell border expect(parseInt(hot.getCell(1, -1).style.height, 10)).toBe(49); // -1px of cell border expect(parseInt(hot.getCell(2, -1).style.height, 10)).toBeInArray([22, 42]); // -1px of cell border hot.setDataAtCell(1, 0, 'A\nB\nC\nD\nE'); expect(parseInt(hot.getCell(0, -1).style.height, 10)).toBe(22); expect(parseInt(hot.getCell(1, -1).style.height, 10)).toBe(105); expect(parseInt(hot.getCell(2, -1).style.height, 10)).toBeInArray([22, 42]); }); it('should recalculate heights after moved row', () => { const hot = handsontable({ data: arrayOfObjects2(), colWidths: 250, manualRowResize: [23, 50], manualRowMove: true, autoRowSize: true, rowHeaders: true, colHeaders: true }); expect(parseInt(hot.getCell(0, -1).style.height, 10)).toBe(22); // -1px of cell border expect(parseInt(hot.getCell(1, -1).style.height, 10)).toBe(49); // -1px of cell border expect(parseInt(hot.getCell(2, -1).style.height, 10)).toBeInArray([22, 42]); // -1px of cell border const plugin = hot.getPlugin('manualRowMove'); plugin.moveRow(1, 0); hot.render(); expect(parseInt(hot.getCell(0, -1).style.height, 10)).toBe(49); expect(parseInt(hot.getCell(1, -1).style.height, 10)).toBe(22); expect(parseInt(hot.getCell(2, -1).style.height, 10)).toBeInArray([22, 42]); // -1px of cell border }); it('should resize the column headers properly, according the their content sizes', () => { handsontable({ data: Handsontable.helper.createSpreadsheetData(30, 30), colHeaders(index) { if (index === 22) { return 'a<br>much<br>longer<br>label'; } return 'test'; }, autoRowSize: true, rowHeaders: true, width: 300, height: 300 }); expect(rowHeight(spec().$container, -1)).toBe(75); }); });
1
15,000
I think that this test doesn't cover this bug correctly. When I attached the older version of the handsontable to this test it passes, should fail. Can you check that?
handsontable-handsontable
js
@@ -186,6 +186,16 @@ type tlfJournal struct { mdJournal *mdJournal disabled bool lastFlushErr error + // The cache of unflushed paths. If `unflushedReady` is not nil, + // then any callers must wait for it to be closed before accessing + // `unflushedPaths`. `unflushedReady` only transitions + // nil->non-nil->nil at most one time during the lifetime of the + // journal. If `unflushedPaths` is non-nil, then `chainsPopulator` + // must also be non-nil. These three fields are protected by + // `journalLock`. + unflushedPaths map[MetadataRevision]map[string]bool + unflushedReady chan struct{} + chainsPopulator chainsPathPopulator bwDelegate tlfJournalBWDelegate }
1
// Copyright 2016 Keybase Inc. All rights reserved. // Use of this source code is governed by a BSD // license that can be found in the LICENSE file. package libkbfs import ( "encoding/json" "errors" "fmt" "io/ioutil" "os" "path/filepath" "sync" "time" "github.com/keybase/backoff" "github.com/keybase/client/go/logger" "github.com/keybase/client/go/protocol/keybase1" "github.com/keybase/kbfs/kbfscodec" "github.com/keybase/kbfs/kbfscrypto" "github.com/keybase/kbfs/kbfssync" "golang.org/x/net/context" ) // tlfJournalConfig is the subset of the Config interface needed by // tlfJournal (for ease of testing). type tlfJournalConfig interface { BlockSplitter() BlockSplitter Codec() kbfscodec.Codec Crypto() Crypto BlockCache() BlockCache BlockOps() BlockOps MDCache() MDCache Reporter() Reporter encryptionKeyGetter() encryptionKeyGetter mdDecryptionKeyGetter() mdDecryptionKeyGetter MDServer() MDServer usernameGetter() normalizedUsernameGetter MakeLogger(module string) logger.Logger } // tlfJournalConfigWrapper is an adapter for Config objects to the // tlfJournalConfig interface. type tlfJournalConfigAdapter struct { Config } func (ca tlfJournalConfigAdapter) encryptionKeyGetter() encryptionKeyGetter { return ca.Config.KeyManager() } func (ca tlfJournalConfigAdapter) mdDecryptionKeyGetter() mdDecryptionKeyGetter { return ca.Config.KeyManager() } func (ca tlfJournalConfigAdapter) usernameGetter() normalizedUsernameGetter { return ca.Config.KBPKI() } const ( // Maximum number of blocks that can be flushed in a single batch // by the journal. TODO: make this configurable, so that users // can choose how much bandwidth is used by the journal. maxJournalBlockFlushBatchSize = 25 // This will be the final entry for unflushed paths if there are // too many revisions to process at once. incompleteUnflushedPathsMarker = "..." ) // TLFJournalStatus represents the status of a TLF's journal for // display in diagnostics. It is suitable for encoding directly as // JSON. type TLFJournalStatus struct { Dir string RevisionStart MetadataRevision RevisionEnd MetadataRevision BranchID string BlockOpCount uint64 UnflushedBytes int64 // (signed because os.FileInfo.Size() is signed) UnflushedPaths []string LastFlushErr string `json:",omitempty"` } // TLFJournalBackgroundWorkStatus indicates whether a journal should // be doing background work or not. type TLFJournalBackgroundWorkStatus int const ( // TLFJournalBackgroundWorkPaused indicates that the journal // should not currently be doing background work. TLFJournalBackgroundWorkPaused TLFJournalBackgroundWorkStatus = iota // TLFJournalBackgroundWorkEnabled indicates that the journal // should be doing background work. TLFJournalBackgroundWorkEnabled ) func (bws TLFJournalBackgroundWorkStatus) String() string { switch bws { case TLFJournalBackgroundWorkEnabled: return "Background work enabled" case TLFJournalBackgroundWorkPaused: return "Background work paused" default: return fmt.Sprintf("TLFJournalBackgroundWorkStatus(%d)", bws) } } // bwState indicates the state of the background work goroutine. type bwState int const ( bwBusy bwState = iota bwIdle bwPaused ) func (bws bwState) String() string { switch bws { case bwBusy: return "bwBusy" case bwIdle: return "bwIdle" case bwPaused: return "bwPaused" default: return fmt.Sprintf("bwState(%d)", bws) } } // tlfJournalBWDelegate is used by tests to know what the background // goroutine is doing, and also to enforce a timeout (via the // context). type tlfJournalBWDelegate interface { GetBackgroundContext() context.Context OnNewState(ctx context.Context, bws bwState) OnShutdown(ctx context.Context) } // A tlfJournal contains all the journals for a (TLF, user, device) // tuple and controls the synchronization between the objects that are // adding to those journals (via journalBlockServer or journalMDOps) // and a background goroutine that flushes journal entries to the // servers. // // The maximum number of characters added to the root dir by a TLF // journal is 59, which just the max of the block journal and MD // journal numbers. type tlfJournal struct { uid keybase1.UID key kbfscrypto.VerifyingKey tlfID TlfID dir string config tlfJournalConfig delegateBlockServer BlockServer log logger.Logger deferLog logger.Logger onBranchChange branchChangeListener onMDFlush mdFlushListener // All the channels below are used as simple on/off // signals. They're buffered for one object, and all sends are // asynchronous, so multiple sends get collapsed into one // signal. hasWorkCh chan struct{} needPauseCh chan struct{} needResumeCh chan struct{} needShutdownCh chan struct{} // This channel is closed when background work shuts down. backgroundShutdownCh chan struct{} // Serializes all flushes. flushLock sync.Mutex // Tracks background work. wg kbfssync.RepeatedWaitGroup // Protects all operations on blockJournal and mdJournal. // // TODO: Consider using https://github.com/pkg/singlefile // instead. journalLock sync.RWMutex // both of these are nil after shutdown() is called. blockJournal *blockJournal mdJournal *mdJournal disabled bool lastFlushErr error bwDelegate tlfJournalBWDelegate } func getTLFJournalInfoFilePath(dir string) string { return filepath.Join(dir, "info.json") } type tlfJournalInfo struct { UID keybase1.UID VerifyingKey kbfscrypto.VerifyingKey TlfID TlfID } func readTLFJournalInfoFile(dir string) ( keybase1.UID, kbfscrypto.VerifyingKey, TlfID, error) { infoJSON, err := ioutil.ReadFile(getTLFJournalInfoFilePath(dir)) if err != nil { return keybase1.UID(""), kbfscrypto.VerifyingKey{}, TlfID{}, err } var info tlfJournalInfo err = json.Unmarshal(infoJSON, &info) if err != nil { return keybase1.UID(""), kbfscrypto.VerifyingKey{}, TlfID{}, err } return info.UID, info.VerifyingKey, info.TlfID, nil } func writeTLFJournalInfoFile(dir string, uid keybase1.UID, key kbfscrypto.VerifyingKey, tlfID TlfID) error { info := tlfJournalInfo{uid, key, tlfID} infoJSON, err := json.Marshal(info) if err != nil { return err } err = os.MkdirAll(dir, 0700) if err != nil { return err } return ioutil.WriteFile(getTLFJournalInfoFilePath(dir), infoJSON, 0600) } func makeTLFJournal( ctx context.Context, uid keybase1.UID, key kbfscrypto.VerifyingKey, dir string, tlfID TlfID, config tlfJournalConfig, delegateBlockServer BlockServer, bws TLFJournalBackgroundWorkStatus, bwDelegate tlfJournalBWDelegate, onBranchChange branchChangeListener, onMDFlush mdFlushListener) (*tlfJournal, error) { if uid == keybase1.UID("") { return nil, errors.New("Empty user") } if key == (kbfscrypto.VerifyingKey{}) { return nil, errors.New("Empty verifying key") } if tlfID == (TlfID{}) { return nil, errors.New("Empty TlfID") } readUID, readKey, readTlfID, err := readTLFJournalInfoFile(dir) switch { case os.IsNotExist(err): // Info file doesn't exist, so write it. err := writeTLFJournalInfoFile(dir, uid, key, tlfID) if err != nil { return nil, err } case err != nil: return nil, err default: // Info file exists, so it should match passed-in // parameters. if uid != readUID { return nil, fmt.Errorf( "Expected UID %s, got %s", uid, readUID) } if key != readKey { return nil, fmt.Errorf( "Expected verifying key %s, got %s", key, readKey) } if tlfID != readTlfID { return nil, fmt.Errorf( "Expected TLF ID %s, got %s", tlfID, readTlfID) } } log := config.MakeLogger("TLFJ") blockJournal, err := makeBlockJournal( ctx, config.Codec(), config.Crypto(), dir, log) if err != nil { return nil, err } mdJournal, err := makeMDJournal( uid, key, config.Codec(), config.Crypto(), dir, log) if err != nil { return nil, err } j := &tlfJournal{ uid: uid, key: key, tlfID: tlfID, dir: dir, config: config, delegateBlockServer: delegateBlockServer, log: log, deferLog: log.CloneWithAddedDepth(1), onBranchChange: onBranchChange, onMDFlush: onMDFlush, hasWorkCh: make(chan struct{}, 1), needPauseCh: make(chan struct{}, 1), needResumeCh: make(chan struct{}, 1), needShutdownCh: make(chan struct{}, 1), backgroundShutdownCh: make(chan struct{}), blockJournal: blockJournal, mdJournal: mdJournal, bwDelegate: bwDelegate, } go j.doBackgroundWorkLoop(bws, backoff.NewExponentialBackOff()) // Signal work to pick up any existing journal entries. j.signalWork() j.log.CDebugf(ctx, "Enabled journal for %s with path %s", tlfID, dir) return j, nil } func (j *tlfJournal) signalWork() { j.wg.Add(1) select { case j.hasWorkCh <- struct{}{}: default: j.wg.Done() } } // CtxJournalTagKey is the type used for unique context tags within // background journal work. type CtxJournalTagKey int const ( // CtxJournalIDKey is the type of the tag for unique operation IDs // within background journal work. CtxJournalIDKey CtxJournalTagKey = iota ) // CtxJournalOpID is the display name for the unique operation // enqueued journal ID tag. const CtxJournalOpID = "JID" // doBackgroundWorkLoop is the main function for the background // goroutine. It spawns off a worker goroutine to call // doBackgroundWork whenever there is work, and can be paused and // resumed. func (j *tlfJournal) doBackgroundWorkLoop( bws TLFJournalBackgroundWorkStatus, retry backoff.BackOff) { ctx := context.Background() if j.bwDelegate != nil { ctx = j.bwDelegate.GetBackgroundContext() } // Non-nil when a retry has been scheduled for the future. var retryTimer *time.Timer defer func() { close(j.backgroundShutdownCh) if j.bwDelegate != nil { j.bwDelegate.OnShutdown(ctx) } if retryTimer != nil { retryTimer.Stop() } }() // Below we have a state machine with three states: // // 1) Idle, where we wait for new work or to be paused; // 2) Busy, where we wait for the worker goroutine to // finish, or to be paused; // 3) Paused, where we wait to be resumed. // // We run this state machine until we are shutdown. Also, if // we exit the busy state for any reason other than the worker // goroutine finished, we stop the worker goroutine (via // bwCancel below). // errCh and bwCancel are non-nil only when we're in the busy // state. errCh is the channel on which we receive the error // from the worker goroutine, and bwCancel is the CancelFunc // corresponding to the context passed to the worker // goroutine. var errCh <-chan error var bwCancel context.CancelFunc // Handle the case where we panic while in the busy state. defer func() { if bwCancel != nil { bwCancel() } }() for { ctx := ctxWithRandomIDReplayable(ctx, CtxJournalIDKey, CtxJournalOpID, j.log) switch { case bws == TLFJournalBackgroundWorkEnabled && errCh == nil: // 1) Idle. if j.bwDelegate != nil { j.bwDelegate.OnNewState(ctx, bwIdle) } j.log.CDebugf( ctx, "Waiting for the work signal for %s", j.tlfID) select { case <-j.hasWorkCh: j.log.CDebugf(ctx, "Got work signal for %s", j.tlfID) if retryTimer != nil { retryTimer.Stop() retryTimer = nil } bwCtx, cancel := context.WithCancel(ctx) errCh = j.doBackgroundWork(bwCtx) bwCancel = cancel case <-j.needPauseCh: j.log.CDebugf(ctx, "Got pause signal for %s", j.tlfID) bws = TLFJournalBackgroundWorkPaused case <-j.needShutdownCh: j.log.CDebugf(ctx, "Got shutdown signal for %s", j.tlfID) return } case bws == TLFJournalBackgroundWorkEnabled && errCh != nil: // 2) Busy. if j.bwDelegate != nil { j.bwDelegate.OnNewState(ctx, bwBusy) } j.log.CDebugf(ctx, "Waiting for background work to be done for %s", j.tlfID) needShutdown := false select { case err := <-errCh: if retryTimer != nil { panic("Retry timer should be nil after work is done") } if err != nil { j.log.CWarningf(ctx, "Background work error for %s: %v", j.tlfID, err) bTime := retry.NextBackOff() if bTime != backoff.Stop { j.log.CWarningf(ctx, "Retrying in %s", bTime) retryTimer = time.AfterFunc(bTime, j.signalWork) } } else { retry.Reset() } case <-j.needPauseCh: j.log.CDebugf(ctx, "Got pause signal for %s", j.tlfID) bws = TLFJournalBackgroundWorkPaused case <-j.needShutdownCh: j.log.CDebugf(ctx, "Got shutdown signal for %s", j.tlfID) needShutdown = true } errCh = nil // Cancel the worker goroutine as we exit this // state. bwCancel() bwCancel = nil if needShutdown { return } case bws == TLFJournalBackgroundWorkPaused: // 3) Paused j.wg.Pause() if j.bwDelegate != nil { j.bwDelegate.OnNewState(ctx, bwPaused) } j.log.CDebugf( ctx, "Waiting to resume background work for %s", j.tlfID) select { case <-j.needResumeCh: j.log.CDebugf(ctx, "Got resume signal for %s", j.tlfID) bws = TLFJournalBackgroundWorkEnabled case <-j.needShutdownCh: j.log.CDebugf(ctx, "Got shutdown signal for %s", j.tlfID) return } default: j.log.CErrorf( ctx, "Unknown TLFJournalBackgroundStatus %s", bws) return } } } // doBackgroundWork currently only does auto-flushing. It assumes that // ctx is canceled when the background processing should stop. // // TODO: Handle garbage collection too. func (j *tlfJournal) doBackgroundWork(ctx context.Context) <-chan error { errCh := make(chan error, 1) // TODO: Handle panics. go func() { defer j.wg.Done() errCh <- j.flush(ctx) }() return errCh } // We don't guarantee that pause/resume requests will be processed in // strict FIFO order. In particular, multiple pause requests are // collapsed into one (also multiple resume requests), so it's // possible that a pause-resume-pause sequence will be processed as // pause-resume. But that's okay, since these are just for infrequent // ad-hoc testing. func (j *tlfJournal) pauseBackgroundWork() { select { case j.needPauseCh <- struct{}{}: default: } } func (j *tlfJournal) resumeBackgroundWork() { select { case j.needResumeCh <- struct{}{}: // Resume the wait group right away, so future callers will block // even before the background goroutine picks up this signal. j.wg.Resume() default: } } func (j *tlfJournal) getJournalEnds(ctx context.Context) ( blockEnd journalOrdinal, mdEnd MetadataRevision, err error) { j.journalLock.RLock() defer j.journalLock.RUnlock() blockEnd, err = j.blockJournal.end() if err != nil { return 0, 0, err } mdEnd, err = j.mdJournal.end() if err != nil { return 0, 0, err } return blockEnd, mdEnd, nil } func (j *tlfJournal) flush(ctx context.Context) (err error) { j.flushLock.Lock() defer j.flushLock.Unlock() flushedBlockEntries := 0 flushedMDEntries := 0 defer func() { if err != nil { j.deferLog.CDebugf(ctx, "Flushed %d block entries and %d MD entries "+ "for %s, but got error %v", flushedBlockEntries, flushedMDEntries, j.tlfID, err) } j.journalLock.Lock() j.lastFlushErr = err j.journalLock.Unlock() }() // TODO: Avoid starving flushing MD ops if there are many // block ops. See KBFS-1502. for { blockEnd, mdEnd, err := j.getJournalEnds(ctx) if err != nil { return err } if blockEnd == 0 && mdEnd == MetadataRevisionUninitialized { j.log.CDebugf(ctx, "Nothing else to flush") break } j.log.CDebugf(ctx, "Flushing up to blockEnd=%d and mdEnd=%d", blockEnd, mdEnd) // Flush the block journal ops in parallel. numFlushed, maxMDRevToFlush, err := j.flushBlockEntries(ctx, blockEnd) if err != nil { return err } flushedBlockEntries += numFlushed if numFlushed == 0 { // There were no blocks to flush, so we can flush all of // the remaining MDs. maxMDRevToFlush = mdEnd } // TODO: Flush MDs in batch. for { flushed, err := j.flushOneMDOp(ctx, mdEnd, maxMDRevToFlush) if err != nil { return err } if !flushed { break } flushedMDEntries++ } } j.log.CDebugf(ctx, "Flushed %d block entries and %d MD entries for %s", flushedBlockEntries, flushedMDEntries, j.tlfID) return nil } var errTLFJournalShutdown = errors.New("tlfJournal is shutdown") var errTLFJournalDisabled = errors.New("tlfJournal is disabled") var errTLFJournalNotEmpty = errors.New("tlfJournal is not empty") func (j *tlfJournal) checkEnabledLocked() error { if j.blockJournal == nil || j.mdJournal == nil { return errTLFJournalShutdown } if j.disabled { return errTLFJournalDisabled } return nil } func (j *tlfJournal) getNextBlockEntriesToFlush( ctx context.Context, end journalOrdinal) ( entries blockEntriesToFlush, maxMDRevToFlush MetadataRevision, err error) { j.journalLock.RLock() defer j.journalLock.RUnlock() if err := j.checkEnabledLocked(); err != nil { return blockEntriesToFlush{}, MetadataRevisionUninitialized, err } return j.blockJournal.getNextEntriesToFlush(ctx, end, maxJournalBlockFlushBatchSize) } func (j *tlfJournal) removeFlushedBlockEntries(ctx context.Context, entries blockEntriesToFlush) error { j.journalLock.Lock() defer j.journalLock.Unlock() if err := j.checkEnabledLocked(); err != nil { return err } return j.blockJournal.removeFlushedEntries(ctx, entries, j.tlfID, j.config.Reporter()) } func (j *tlfJournal) flushBlockEntries( ctx context.Context, end journalOrdinal) (int, MetadataRevision, error) { entries, maxMDRevToFlush, err := j.getNextBlockEntriesToFlush(ctx, end) if err != nil { return 0, MetadataRevisionUninitialized, err } if entries.length() == 0 { return 0, maxMDRevToFlush, nil } // TODO: fill this in for logging/error purposes. var tlfName CanonicalTlfName err = flushBlockEntries(ctx, j.log, j.delegateBlockServer, j.config.BlockCache(), j.config.Reporter(), j.tlfID, tlfName, entries) if err != nil { return 0, MetadataRevisionUninitialized, err } err = j.removeFlushedBlockEntries(ctx, entries) if err != nil { return 0, MetadataRevisionUninitialized, err } return entries.length(), maxMDRevToFlush, nil } func (j *tlfJournal) getNextMDEntryToFlush(ctx context.Context, end MetadataRevision) (MdID, *RootMetadataSigned, error) { j.journalLock.RLock() defer j.journalLock.RUnlock() if err := j.checkEnabledLocked(); err != nil { return MdID{}, nil, err } return j.mdJournal.getNextEntryToFlush(ctx, end, j.config.Crypto()) } func (j *tlfJournal) convertMDsToBranchAndGetNextEntry( ctx context.Context, nextEntryEnd MetadataRevision) ( MdID, *RootMetadataSigned, error) { j.journalLock.Lock() defer j.journalLock.Unlock() if err := j.checkEnabledLocked(); err != nil { return MdID{}, nil, err } bid, err := j.mdJournal.convertToBranch( ctx, j.config.Crypto(), j.config.Codec(), j.tlfID, j.config.MDCache()) if err != nil { return MdID{}, nil, err } if j.onBranchChange != nil { j.onBranchChange.onTLFBranchChange(j.tlfID, bid) } return j.mdJournal.getNextEntryToFlush( ctx, nextEntryEnd, j.config.Crypto()) } func (j *tlfJournal) removeFlushedMDEntry(ctx context.Context, mdID MdID, rmds *RootMetadataSigned) error { j.journalLock.Lock() defer j.journalLock.Unlock() if err := j.checkEnabledLocked(); err != nil { return err } return j.mdJournal.removeFlushedEntry(ctx, mdID, rmds) } func (j *tlfJournal) flushOneMDOp( ctx context.Context, end MetadataRevision, maxMDRevToFlush MetadataRevision) (flushed bool, err error) { j.log.CDebugf(ctx, "Flushing one MD to server") defer func() { if err != nil { j.deferLog.CDebugf(ctx, "Flush failed with %v", err) } }() mdServer := j.config.MDServer() mdID, rmds, err := j.getNextMDEntryToFlush(ctx, end) if err != nil { return false, err } if mdID == (MdID{}) { return false, nil } // Only flush MDs for which the blocks have been fully flushed. if rmds.MD.RevisionNumber() > maxMDRevToFlush { j.log.CDebugf(ctx, "Haven't flushed all the blocks for TLF=%s "+ "with id=%s, rev=%s, bid=%s yet (maxMDRevToFlush=%d)", rmds.MD.TlfID(), mdID, rmds.MD.RevisionNumber(), rmds.MD.BID(), maxMDRevToFlush) return false, nil } j.log.CDebugf(ctx, "Flushing MD for TLF=%s with id=%s, rev=%s, bid=%s", rmds.MD.TlfID(), mdID, rmds.MD.RevisionNumber(), rmds.MD.BID()) // MDv3 TODO: pass actual key bundles pushErr := mdServer.Put(ctx, rmds, nil) if isRevisionConflict(pushErr) { headMdID, err := getMdID(ctx, mdServer, j.mdJournal.crypto, rmds.MD.TlfID(), rmds.MD.BID(), rmds.MD.MergedStatus(), rmds.MD.RevisionNumber()) if err != nil { j.log.CWarningf(ctx, "getMdID failed for TLF %s, BID %s, and revision %d: %v", rmds.MD.TlfID(), rmds.MD.BID(), rmds.MD.RevisionNumber(), err) } else if headMdID == mdID { if headMdID == (MdID{}) { panic("nil earliestID and revision conflict error returned by pushEarliestToServer") } // We must have already flushed this MD, so continue. pushErr = nil } else if rmds.MD.MergedStatus() == Merged { j.log.CDebugf(ctx, "Conflict detected %v", pushErr) // Convert MDs to a branch and retry the put. mdID, rmds, err = j.convertMDsToBranchAndGetNextEntry( ctx, end) if err != nil { return false, err } if mdID == (MdID{}) { return false, errors.New("Unexpected nil MdID") } j.log.CDebugf(ctx, "Flushing newly-unmerged MD for TLF=%s with id=%s, rev=%s, bid=%s", rmds.MD.TlfID(), mdID, rmds.MD.RevisionNumber(), rmds.MD.BID()) // MDv3 TODO: pass actual key bundles pushErr = mdServer.Put(ctx, rmds, nil) } } if pushErr != nil { return false, pushErr } if j.onMDFlush != nil { j.onMDFlush.onMDFlush(rmds.MD.TlfID(), rmds.MD.BID(), rmds.MD.RevisionNumber()) } err = j.removeFlushedMDEntry(ctx, mdID, rmds) if err != nil { return false, err } return true, nil } func (j *tlfJournal) getJournalEntryCounts() ( blockEntryCount, mdEntryCount uint64, err error) { j.journalLock.RLock() defer j.journalLock.RUnlock() if err := j.checkEnabledLocked(); err != nil { return 0, 0, err } blockEntryCount, err = j.blockJournal.length() if err != nil { return 0, 0, err } mdEntryCount, err = j.mdJournal.length() if err != nil { return 0, 0, err } return blockEntryCount, mdEntryCount, nil } func (j *tlfJournal) getJournalStatusLocked() (TLFJournalStatus, error) { if err := j.checkEnabledLocked(); err != nil { return TLFJournalStatus{}, err } earliestRevision, err := j.mdJournal.readEarliestRevision() if err != nil { return TLFJournalStatus{}, err } latestRevision, err := j.mdJournal.readLatestRevision() if err != nil { return TLFJournalStatus{}, err } blockEntryCount, err := j.blockJournal.length() if err != nil { return TLFJournalStatus{}, err } lastFlushErr := "" if j.lastFlushErr != nil { lastFlushErr = j.lastFlushErr.Error() } return TLFJournalStatus{ Dir: j.dir, BranchID: j.mdJournal.getBranchID().String(), RevisionStart: earliestRevision, RevisionEnd: latestRevision, BlockOpCount: blockEntryCount, UnflushedBytes: j.blockJournal.unflushedBytes, LastFlushErr: lastFlushErr, }, nil } func (j *tlfJournal) getJournalStatus() (TLFJournalStatus, error) { j.journalLock.RLock() defer j.journalLock.RUnlock() return j.getJournalStatusLocked() } func (j *tlfJournal) getJournalStatusWithRange() ( TLFJournalStatus, []ImmutableBareRootMetadata, bool, error) { j.journalLock.RLock() defer j.journalLock.RUnlock() jStatus, err := j.getJournalStatusLocked() if err != nil { return TLFJournalStatus{}, nil, false, err } if jStatus.RevisionEnd == MetadataRevisionUninitialized { return jStatus, nil, true, nil } stop := jStatus.RevisionEnd complete := true if stop > jStatus.RevisionStart+1000 { stop = jStatus.RevisionStart + 1000 complete = false } ibrmds, err := j.mdJournal.getRange( jStatus.RevisionStart, stop) if err != nil { return TLFJournalStatus{}, nil, false, err } return jStatus, ibrmds, complete, nil } func (j *tlfJournal) getJournalStatusWithPaths(ctx context.Context, cpp chainsPathPopulator) (TLFJournalStatus, error) { jStatus, ibrmds, complete, err := j.getJournalStatusWithRange() if err != nil { return TLFJournalStatus{}, err } if len(ibrmds) == 0 { return jStatus, nil } ibrmdBareHandle, err := ibrmds[0].MakeBareTlfHandle(nil) if err != nil { return TLFJournalStatus{}, err } irmds := make([]ImmutableRootMetadata, 0, len(ibrmds)) handle, err := MakeTlfHandle( ctx, ibrmdBareHandle, j.config.usernameGetter()) if err != nil { return TLFJournalStatus{}, err } for _, ibrmd := range ibrmds { irmd, err := j.convertImmutableBareRMDToIRMD(ctx, ibrmd, handle) if err != nil { return TLFJournalStatus{}, err } irmds = append(irmds, irmd) } // Make chains over the entire range to get the unflushed files. // TODO: cache this chains object and update it when new revisions // appear and when revisions are flushed, instead of rebuilding // every time. chains := newCRChainsEmpty() for _, irmd := range irmds { winfo := writerInfo{ uid: j.uid, kid: j.key.KID(), revision: irmd.Revision(), // There won't be any conflicts, so no need for the // username/devicename. } err := chains.addOps(j.config.Codec(), irmd.data, winfo, irmd.localTimestamp) if err != nil { return TLFJournalStatus{}, nil } } chains.mostRecentMD = irmds[len(irmds)-1] err = cpp.populateChainPaths(ctx, j.log, chains, true) if err != nil { return TLFJournalStatus{}, err } for _, chain := range chains.byOriginal { if len(chain.ops) > 0 { jStatus.UnflushedPaths = append(jStatus.UnflushedPaths, chain.ops[0].getFinalPath().String()) } } if !complete { jStatus.UnflushedPaths = append(jStatus.UnflushedPaths, incompleteUnflushedPathsMarker) } return jStatus, nil } func (j *tlfJournal) getUnflushedBytes() int64 { j.journalLock.RLock() defer j.journalLock.RUnlock() return j.blockJournal.unflushedBytes } func (j *tlfJournal) shutdown() { select { case j.needShutdownCh <- struct{}{}: default: } <-j.backgroundShutdownCh // This may happen before the background goroutine finishes, // but that's ok. j.journalLock.Lock() defer j.journalLock.Unlock() if err := j.checkEnabledLocked(); err != nil { // Already shutdown. return } blockJournal := j.blockJournal // Make further accesses error out. j.blockJournal = nil j.mdJournal = nil ctx := context.Background() err := blockJournal.checkInSync(ctx) if err != nil { panic(err) } } // disable prevents new operations from hitting the journal. Will // fail unless the journal is completely empty. func (j *tlfJournal) disable() (wasEnabled bool, err error) { j.journalLock.Lock() defer j.journalLock.Unlock() err = j.checkEnabledLocked() if err != nil { if err == errTLFJournalDisabled { // Already disabled. return false, nil } // Already shutdown. return false, err } blockEntryCount, err := j.blockJournal.length() if err != nil { return false, err } mdEntryCount, err := j.mdJournal.length() if err != nil { return false, err } // You can only disable an empty journal. if blockEntryCount > 0 || mdEntryCount > 0 { return false, errTLFJournalNotEmpty } j.disabled = true return true, nil } func (j *tlfJournal) enable() error { j.journalLock.Lock() defer j.journalLock.Unlock() err := j.checkEnabledLocked() if err == nil { // Already enabled. return nil } else if err != errTLFJournalDisabled { return err } j.disabled = false return nil } // All the functions below just do the equivalent blockJournal or // mdJournal function under j.journalLock. func (j *tlfJournal) getBlockDataWithContext( id BlockID, context BlockContext) ( []byte, kbfscrypto.BlockCryptKeyServerHalf, error) { j.journalLock.RLock() defer j.journalLock.RUnlock() if err := j.checkEnabledLocked(); err != nil { return nil, kbfscrypto.BlockCryptKeyServerHalf{}, err } return j.blockJournal.getDataWithContext(id, context) } func (j *tlfJournal) putBlockData( ctx context.Context, id BlockID, context BlockContext, buf []byte, serverHalf kbfscrypto.BlockCryptKeyServerHalf) error { j.journalLock.Lock() defer j.journalLock.Unlock() if err := j.checkEnabledLocked(); err != nil { return err } err := j.blockJournal.putData(ctx, id, context, buf, serverHalf) if err != nil { return err } j.config.Reporter().NotifySyncStatus(ctx, &keybase1.FSPathSyncStatus{ PublicTopLevelFolder: j.tlfID.IsPublic(), // Path: TODO, // TODO: should this be the complete total for the file/directory, // rather than the diff? SyncingBytes: int64(len(buf)), // SyncingOps: TODO, }) j.signalWork() return nil } func (j *tlfJournal) addBlockReference( ctx context.Context, id BlockID, context BlockContext) error { j.journalLock.Lock() defer j.journalLock.Unlock() if err := j.checkEnabledLocked(); err != nil { return err } err := j.blockJournal.addReference(ctx, id, context) if err != nil { return err } j.signalWork() return nil } func (j *tlfJournal) removeBlockReferences( ctx context.Context, contexts map[BlockID][]BlockContext) ( liveCounts map[BlockID]int, err error) { j.journalLock.Lock() defer j.journalLock.Unlock() if err := j.checkEnabledLocked(); err != nil { return nil, err } // Don't remove the block data if we remove the last // reference; we still need it to flush the initial put // operation. // // TODO: It would be nice if we could detect that case and // avoid having to flush the put. liveCounts, err = j.blockJournal.removeReferences(ctx, contexts) if err != nil { return nil, err } j.signalWork() return liveCounts, nil } func (j *tlfJournal) archiveBlockReferences( ctx context.Context, contexts map[BlockID][]BlockContext) error { j.journalLock.Lock() defer j.journalLock.Unlock() if err := j.checkEnabledLocked(); err != nil { return err } err := j.blockJournal.archiveReferences(ctx, contexts) if err != nil { return err } j.signalWork() return nil } // convertImmutableBareRMDToIRMD decrypts the MD in the given bare root // MD. The caller must NOT hold `j.journalLock`, because blocks // from the journal may need to be read as part of the decryption. func (j *tlfJournal) convertImmutableBareRMDToIRMD(ctx context.Context, ibrmd ImmutableBareRootMetadata, handle *TlfHandle) ( ImmutableRootMetadata, error) { brmd, ok := ibrmd.BareRootMetadata.(MutableBareRootMetadata) if !ok { return ImmutableRootMetadata{}, MutableBareRootMetadataNoImplError{} } rmd := RootMetadata{ bareMd: brmd, tlfHandle: handle, } err := decryptMDPrivateData( ctx, j.config.Codec(), j.config.Crypto(), j.config.BlockCache(), j.config.BlockOps(), j.config.mdDecryptionKeyGetter(), j.uid, &rmd, rmd.ReadOnly()) if err != nil { return ImmutableRootMetadata{}, err } irmd := MakeImmutableRootMetadata(&rmd, ibrmd.mdID, ibrmd.localTimestamp) return irmd, nil } func (j *tlfJournal) getMDHead( ctx context.Context) (ImmutableBareRootMetadata, error) { j.journalLock.RLock() defer j.journalLock.RUnlock() if err := j.checkEnabledLocked(); err != nil { return ImmutableBareRootMetadata{}, err } return j.mdJournal.getHead() } func (j *tlfJournal) getMDRange( ctx context.Context, start, stop MetadataRevision) ( []ImmutableBareRootMetadata, error) { j.journalLock.RLock() defer j.journalLock.RUnlock() if err := j.checkEnabledLocked(); err != nil { return nil, err } return j.mdJournal.getRange(start, stop) } func (j *tlfJournal) putMD(ctx context.Context, rmd *RootMetadata) ( MdID, error) { j.journalLock.Lock() defer j.journalLock.Unlock() if err := j.checkEnabledLocked(); err != nil { return MdID{}, err } mdID, err := j.mdJournal.put(ctx, j.config.Crypto(), j.config.encryptionKeyGetter(), j.config.BlockSplitter(), rmd) if err != nil { return MdID{}, err } err = j.blockJournal.markMDRevision(ctx, rmd.Revision()) if err != nil { return MdID{}, err } j.signalWork() return mdID, nil } func (j *tlfJournal) clearMDs(ctx context.Context, bid BranchID) error { if j.onBranchChange != nil { j.onBranchChange.onTLFBranchChange(j.tlfID, NullBranchID) } j.journalLock.Lock() defer j.journalLock.Unlock() if err := j.checkEnabledLocked(); err != nil { return err } // No need to signal work in this case. return j.mdJournal.clear(ctx, bid) } func (j *tlfJournal) wait(ctx context.Context) error { workLeft, err := j.wg.WaitUnlessPaused(ctx) if err != nil { return err } if workLeft { j.log.CDebugf(ctx, "Wait completed with work left, "+ "due to paused journal") } return nil }
1
13,832
Maybe this should just be passed in on construction time
keybase-kbfs
go
@@ -367,6 +367,11 @@ struct wlr_backend *wlr_backend_autocreate(struct wl_display *display) { struct wlr_backend *primary_drm = attempt_drm_backend(display, backend, multi->session); + + // discard the returned pointer - the monitor should destroy itself + // when the multi-backend is destroyed + wlr_drm_backend_monitor_create(backend, primary_drm, multi->session); + if (!primary_drm) { wlr_log(WLR_ERROR, "Failed to open any DRM device"); wlr_backend_destroy(libinput);
1
#define _POSIX_C_SOURCE 200809L #include <assert.h> #include <errno.h> #include <libinput.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <wayland-server-core.h> #include <wlr/backend/drm.h> #include <wlr/backend/headless.h> #include <wlr/backend/interface.h> #include <wlr/backend/libinput.h> #include <wlr/backend/multi.h> #include <wlr/backend/noop.h> #include <wlr/backend/session.h> #include <wlr/backend/wayland.h> #include <wlr/config.h> #include <wlr/render/wlr_renderer.h> #include <wlr/util/log.h> #include "backend/backend.h" #include "backend/multi.h" #include "render/allocator.h" #include "util/signal.h" #if WLR_HAS_X11_BACKEND #include <wlr/backend/x11.h> #endif void wlr_backend_init(struct wlr_backend *backend, const struct wlr_backend_impl *impl) { assert(backend); backend->impl = impl; wl_signal_init(&backend->events.destroy); wl_signal_init(&backend->events.new_input); wl_signal_init(&backend->events.new_output); } void wlr_backend_finish(struct wlr_backend *backend) { wlr_signal_emit_safe(&backend->events.destroy, backend); wlr_allocator_destroy(backend->allocator); wlr_renderer_destroy(backend->renderer); } bool wlr_backend_start(struct wlr_backend *backend) { if (backend->impl->start) { return backend->impl->start(backend); } return true; } void wlr_backend_destroy(struct wlr_backend *backend) { if (!backend) { return; } if (backend->impl && backend->impl->destroy) { backend->impl->destroy(backend); } else { free(backend); } } static bool backend_create_renderer(struct wlr_backend *backend) { if (backend->renderer != NULL) { return true; } backend->renderer = wlr_renderer_autocreate(backend); if (backend->renderer == NULL) { return false; } return true; } struct wlr_renderer *wlr_backend_get_renderer(struct wlr_backend *backend) { if (backend->impl->get_renderer) { return backend->impl->get_renderer(backend); } if (backend_get_buffer_caps(backend) != 0) { // If the backend is capable of presenting buffers, automatically create // the renderer if necessary. if (!backend_create_renderer(backend)) { wlr_log(WLR_ERROR, "Failed to create backend renderer"); return NULL; } return backend->renderer; } return NULL; } struct wlr_session *wlr_backend_get_session(struct wlr_backend *backend) { if (backend->impl->get_session) { return backend->impl->get_session(backend); } return NULL; } clockid_t wlr_backend_get_presentation_clock(struct wlr_backend *backend) { if (backend->impl->get_presentation_clock) { return backend->impl->get_presentation_clock(backend); } return CLOCK_MONOTONIC; } int wlr_backend_get_drm_fd(struct wlr_backend *backend) { if (!backend->impl->get_drm_fd) { return -1; } return backend->impl->get_drm_fd(backend); } uint32_t backend_get_buffer_caps(struct wlr_backend *backend) { if (!backend->impl->get_buffer_caps) { return 0; } return backend->impl->get_buffer_caps(backend); } struct wlr_allocator *backend_get_allocator(struct wlr_backend *backend) { if (backend->allocator != NULL) { return backend->allocator; } struct wlr_renderer *renderer = wlr_backend_get_renderer(backend); if (renderer == NULL) { return NULL; } backend->allocator = wlr_allocator_autocreate(backend, renderer); if (backend->allocator == NULL) { wlr_log(WLR_ERROR, "Failed to create backend allocator"); } return backend->allocator; } static size_t parse_outputs_env(const char *name) { const char *outputs_str = getenv(name); if (outputs_str == NULL) { return 1; } char *end; int outputs = (int)strtol(outputs_str, &end, 10); if (*end || outputs < 0) { wlr_log(WLR_ERROR, "%s specified with invalid integer, ignoring", name); return 1; } return outputs; } static struct wlr_backend *attempt_wl_backend(struct wl_display *display) { struct wlr_backend *backend = wlr_wl_backend_create(display, NULL); if (backend == NULL) { return NULL; } size_t outputs = parse_outputs_env("WLR_WL_OUTPUTS"); for (size_t i = 0; i < outputs; ++i) { wlr_wl_output_create(backend); } return backend; } #if WLR_HAS_X11_BACKEND static struct wlr_backend *attempt_x11_backend(struct wl_display *display, const char *x11_display) { struct wlr_backend *backend = wlr_x11_backend_create(display, x11_display); if (backend == NULL) { return NULL; } size_t outputs = parse_outputs_env("WLR_X11_OUTPUTS"); for (size_t i = 0; i < outputs; ++i) { wlr_x11_output_create(backend); } return backend; } #endif static struct wlr_backend *attempt_headless_backend( struct wl_display *display) { struct wlr_backend *backend = wlr_headless_backend_create(display); if (backend == NULL) { return NULL; } size_t outputs = parse_outputs_env("WLR_HEADLESS_OUTPUTS"); for (size_t i = 0; i < outputs; ++i) { wlr_headless_add_output(backend, 1280, 720); } return backend; } static struct wlr_backend *attempt_noop_backend(struct wl_display *display) { struct wlr_backend *backend = wlr_noop_backend_create(display); if (backend == NULL) { return NULL; } size_t outputs = parse_outputs_env("WLR_NOOP_OUTPUTS"); for (size_t i = 0; i < outputs; ++i) { wlr_noop_add_output(backend); } return backend; } static struct wlr_backend *attempt_drm_backend(struct wl_display *display, struct wlr_backend *backend, struct wlr_session *session) { struct wlr_device *gpus[8]; ssize_t num_gpus = wlr_session_find_gpus(session, 8, gpus); if (num_gpus < 0) { wlr_log(WLR_ERROR, "Failed to find GPUs"); return NULL; } wlr_log(WLR_INFO, "Found %zu GPUs", num_gpus); struct wlr_backend *primary_drm = NULL; for (size_t i = 0; i < (size_t)num_gpus; ++i) { struct wlr_backend *drm = wlr_drm_backend_create(display, session, gpus[i], primary_drm); if (!drm) { wlr_log(WLR_ERROR, "Failed to create DRM backend"); continue; } if (!primary_drm) { primary_drm = drm; } wlr_multi_backend_add(backend, drm); } return primary_drm; } static struct wlr_backend *attempt_backend_by_name(struct wl_display *display, struct wlr_backend *backend, struct wlr_session **session, const char *name) { if (strcmp(name, "wayland") == 0) { return attempt_wl_backend(display); #if WLR_HAS_X11_BACKEND } else if (strcmp(name, "x11") == 0) { return attempt_x11_backend(display, NULL); #endif } else if (strcmp(name, "headless") == 0) { return attempt_headless_backend(display); } else if (strcmp(name, "noop") == 0) { return attempt_noop_backend(display); } else if (strcmp(name, "drm") == 0 || strcmp(name, "libinput") == 0) { // DRM and libinput need a session if (!*session) { *session = wlr_session_create(display); if (!*session) { wlr_log(WLR_ERROR, "failed to start a session"); return NULL; } } if (strcmp(name, "libinput") == 0) { return wlr_libinput_backend_create(display, *session); } else { return attempt_drm_backend(display, backend, *session); } } wlr_log(WLR_ERROR, "unrecognized backend '%s'", name); return NULL; } struct wlr_backend *wlr_backend_autocreate(struct wl_display *display) { struct wlr_backend *backend = wlr_multi_backend_create(display); struct wlr_multi_backend *multi = (struct wlr_multi_backend *)backend; if (!backend) { wlr_log(WLR_ERROR, "could not allocate multibackend"); return NULL; } char *names = getenv("WLR_BACKENDS"); if (names) { wlr_log(WLR_INFO, "Loading user-specified backends due to WLR_BACKENDS: %s", names); names = strdup(names); if (names == NULL) { wlr_log(WLR_ERROR, "allocation failed"); wlr_backend_destroy(backend); return NULL; } char *saveptr; char *name = strtok_r(names, ",", &saveptr); while (name != NULL) { struct wlr_backend *subbackend = attempt_backend_by_name(display, backend, &multi->session, name); if (subbackend == NULL) { wlr_log(WLR_ERROR, "failed to start backend '%s'", name); wlr_session_destroy(multi->session); wlr_backend_destroy(backend); free(names); return NULL; } if (!wlr_multi_backend_add(backend, subbackend)) { wlr_log(WLR_ERROR, "failed to add backend '%s'", name); wlr_session_destroy(multi->session); wlr_backend_destroy(backend); free(names); return NULL; } name = strtok_r(NULL, ",", &saveptr); } free(names); return backend; } if (getenv("WAYLAND_DISPLAY") || getenv("WAYLAND_SOCKET")) { struct wlr_backend *wl_backend = attempt_wl_backend(display); if (!wl_backend) { goto error; } wlr_multi_backend_add(backend, wl_backend); return backend; } #if WLR_HAS_X11_BACKEND const char *x11_display = getenv("DISPLAY"); if (x11_display) { struct wlr_backend *x11_backend = attempt_x11_backend(display, x11_display); if (!x11_backend) { goto error; } wlr_multi_backend_add(backend, x11_backend); return backend; } #endif // Attempt DRM+libinput multi->session = wlr_session_create(display); if (!multi->session) { wlr_log(WLR_ERROR, "Failed to start a DRM session"); wlr_backend_destroy(backend); return NULL; } struct wlr_backend *libinput = wlr_libinput_backend_create(display, multi->session); if (!libinput) { wlr_log(WLR_ERROR, "Failed to start libinput backend"); wlr_session_destroy(multi->session); wlr_backend_destroy(backend); return NULL; } wlr_multi_backend_add(backend, libinput); struct wlr_backend *primary_drm = attempt_drm_backend(display, backend, multi->session); if (!primary_drm) { wlr_log(WLR_ERROR, "Failed to open any DRM device"); wlr_backend_destroy(libinput); wlr_session_destroy(multi->session); wlr_backend_destroy(backend); return NULL; } return backend; error: wlr_backend_destroy(backend); return NULL; }
1
16,652
We should create the monitor after the `if (!primary_drm)` check.
swaywm-wlroots
c
@@ -85,6 +85,7 @@ def whitelist_generator(): yield 'qutebrowser.utils.log.QtWarningFilter.filter' yield 'logging.LogRecord.log_color' yield 'qutebrowser.browser.pdfjs.is_available' + yield 'qutebrowser.browser.tab.TabData._initializing' # vulture doesn't notice the hasattr() and thus thinks netrc_used is unused # in NetworkManager.on_authentication_required yield 'PyQt5.QtNetwork.QNetworkReply.netrc_used'
1
#!/usr/bin/env python # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2015-2016 Florian Bruhin (The Compiler) <[email protected]> # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. """Run vulture on the source files and filter out false-positives.""" import sys import os import re import tempfile import inspect import argparse import vulture import qutebrowser.app # pylint: disable=unused-import from qutebrowser.commands import cmdutils from qutebrowser.utils import utils from qutebrowser.browser.webkit import rfc6266 from qutebrowser.browser.webkit.network import qutescheme def whitelist_generator(): """Generator which yields lines to add to a vulture whitelist.""" # qutebrowser commands for cmd in cmdutils.cmd_dict.values(): yield utils.qualname(cmd.handler) # pyPEG2 classes for name, member in inspect.getmembers(rfc6266, inspect.isclass): for attr in ('grammar', 'regex'): if hasattr(member, attr): yield 'qutebrowser.browser.webkit.rfc6266.{}.{}'.format(name, attr) # PyQt properties for attr in ('prompt_active', 'command_active', 'insert_active', 'caret_mode'): yield 'qutebrowser.mainwindow.statusbar.bar.StatusBar.' + attr yield 'qutebrowser.mainwindow.statusbar.url.UrlText.urltype' # Not used yet, but soon (or when debugging) yield 'qutebrowser.config.configtypes.Regex' yield 'qutebrowser.utils.debug.log_events' yield 'qutebrowser.utils.debug.log_signals' yield 'qutebrowser.utils.debug.qflags_key' yield 'qutebrowser.utils.qtutils.QtOSError.qt_errno' yield 'qutebrowser.utils.usertypes.NeighborList.firstitem' yield 'scripts.utils.bg_colors' yield 'scripts.utils.print_subtitle' # Qt attributes yield 'PyQt5.QtWebKit.QWebPage.ErrorPageExtensionReturn().baseUrl' yield 'PyQt5.QtWebKit.QWebPage.ErrorPageExtensionReturn().content' yield 'PyQt5.QtWebKit.QWebPage.ErrorPageExtensionReturn().encoding' yield 'PyQt5.QtWebKit.QWebPage.ErrorPageExtensionReturn().fileNames' yield 'PyQt5.QtGui.QAbstractTextDocumentLayout.PaintContext().clip' yield 'PyQt5.QtWidgets.QStyleOptionViewItem.backgroundColor' # qute:... handlers for func in qutescheme.HANDLERS.values(): yield 'qutebrowser.browser.webkit.network.qutescheme.' + func.__name__ # Other false-positives yield ('qutebrowser.completion.models.sortfilter.CompletionFilterModel().' 'lessThan') yield 'qutebrowser.utils.jinja.Loader.get_source' yield 'qutebrowser.utils.log.VDEBUG' yield 'qutebrowser.utils.log.QtWarningFilter.filter' yield 'logging.LogRecord.log_color' yield 'qutebrowser.browser.pdfjs.is_available' # vulture doesn't notice the hasattr() and thus thinks netrc_used is unused # in NetworkManager.on_authentication_required yield 'PyQt5.QtNetwork.QNetworkReply.netrc_used' for attr in ('fileno', 'truncate', 'closed', 'readable'): yield 'qutebrowser.utils.qtutils.PyQIODevice.' + attr for attr in ('priority', 'visit_call'): yield 'scripts.dev.pylint_checkers.config.' + attr yield 'scripts.dev.pylint_checkers.modeline.process_module' for attr in ('_get_default_metavar_for_optional', '_get_default_metavar_for_positional', '_metavar_formatter'): yield 'scripts.dev.src2asciidoc.UsageFormatter.' + attr def filter_func(item): """Check if a missing function should be filtered or not. Return: True if the missing function should be filtered/ignored, False otherwise. """ return bool(re.match(r'[a-z]+[A-Z][a-zA-Z]+', str(item))) def report(items): """Generate a report based on the given vulture.Item's. Based on vulture.Vulture.report, but we can't use that as we can't set the properties which get used for the items. """ output = [] for item in sorted(items, key=lambda e: (e.filename.lower(), e.lineno)): relpath = os.path.relpath(item.filename) path = relpath if not relpath.startswith('..') else item.filename output.append("{}:{}: Unused {} '{}'".format(path, item.lineno, item.typ, item)) return output def run(files): """Run vulture over the given files.""" with tempfile.NamedTemporaryFile(mode='w', delete=False) as whitelist_file: for line in whitelist_generator(): whitelist_file.write(line + '\n') whitelist_file.close() vult = vulture.Vulture(exclude=[], verbose=False) vult.scavenge(files + [whitelist_file.name]) os.remove(whitelist_file.name) filters = { 'unused_funcs': filter_func, 'unused_props': lambda item: False, 'unused_vars': lambda item: False, 'unused_attrs': lambda item: False, } items = [] for attr, func in filters.items(): sub_items = getattr(vult, attr) for item in sub_items: filtered = func(item) if not filtered: items.append(item) return report(items) def main(): parser = argparse.ArgumentParser() parser.add_argument('files', nargs='*', default=['qutebrowser', 'scripts']) args = parser.parse_args() out = run(args.files) for line in out: print(line) sys.exit(bool(out)) if __name__ == '__main__': main()
1
15,152
Can be removed now that slots are used
qutebrowser-qutebrowser
py
@@ -216,7 +216,7 @@ describe 'AccountsController' do describe 'edit' do it 'must respond with not_found when account does not exist' do get :edit, id: :anything - must_respond_with :not_found + must_respond_with :unauthorized end it 'must respond with success' do
1
require 'test_helper' describe 'AccountsController' do let(:user) { accounts(:user) } let(:start_date) do (Date.today - 6.years).beginning_of_month end let(:cbp) do [{ month: Time.parse('2010-04-30 20:00:00 -0400'), commits: 1, position_id: 3 }, { month: Time.parse('2010-04-30 20:00:00 -0400'), commits: 6, position_id: 1 }, { month: Time.parse('2011-01-01 00:00:00'), commits: 1, position_id: 3 }, { month: Time.parse('2012-11-01 00:00:00'), commits: 1, position_id: 1 }] end def start_date_str(month = 0) (Time.now - 6.years + month.months).beginning_of_month.strftime('%Y-%m-01 00:00:00') end let(:user) do account = accounts(:user) account.best_vita.vita_fact.destroy create(:vita_fact, vita_id: account.best_vita_id) account end let(:admin) { accounts(:admin) } describe 'index' do it 'should return claimed persons with their cbp_map and positions_map' do user.best_vita.vita_fact.reload.commits_by_project get :index must_respond_with :ok assigns(:positions_map).length.must_equal 2 assigns(:people).length.must_equal 9 assigns(:cbp_map).length.must_equal 9 end end describe 'show' do it 'should set the account and logos' do get :show, id: admin.login must_respond_with :ok assigns(:account).must_equal admin assigns(:logos).must_be_empty end it 'should redirect if account is disabled' do Account::Access.any_instance.stubs(:disabled?).returns(true) get :show, id: admin.login must_redirect_to disabled_account_url(admin) end it 'should redirect if account is labeled a spammer' do account = create(:account) account_access = Account::Access.new(account) account_access.spam! account_access.spam?.must_equal true account.level.must_equal Account::Access::SPAM get :show, id: account.id must_redirect_to disabled_account_url(account) end end describe 'unsubscribe_emails' do it 'a valid key for a account should unsubscribe the user' do key = Ohloh::Cipher.encrypt(create(:account).id.to_s) get :unsubscribe_emails, key: CGI.unescape(key) must_respond_with :ok assigns(:account).email_master.must_equal false end end describe 'new' do it 'must respond with success' do get :new must_respond_with :success end it 'must redirect to maintenance during read only mode' do ApplicationController.any_instance.stubs(:read_only_mode?).returns(true) get :new must_redirect_to maintenance_path end end describe 'new' do it 'must respond with success' do get :disabled, id: create(:spammer).to_param must_respond_with :success end end describe 'create' do let(:account_attributes) do FactoryGirl.attributes_for(:account).select do |k, _v| %w(login email email_confirmation password password_confirmation).include?(k.to_s) end end let(:valid_params) do valid_honeypot_and_captcha_params = { token: :valid_token, honeypot: '', recaptcha_challenge_field: :challenge, recaptcha_response_field: :response } { account: account_attributes }.merge(valid_honeypot_and_captcha_params) end before do HoneyPotField.create!(field_name: :honeypot, token: :valid_token) AccountsController.any_instance.stubs(:verify_recaptcha) end it 'must render the new template when validations fail' do post :create, valid_params.merge(account: { email: '' }) assigns(:account).wont_be :valid? must_render_template :new end it 'must render the new template for invalid captcha' do assert_no_difference 'Account.count' do stub_verify_recaptcha_to_add_captcha_error post :create, valid_params.merge(recaptcha_response_field: '') assigns(:account).errors.messages[:captcha].must_be :present? must_render_template :new end end it 'must redirect for valid captcha' do assert_difference 'Account.count', 1 do post :create, valid_params must_respond_with :redirect end end it 'must redirect to home page if honeypot field is filled' do post :create, valid_params.merge(honeypot: :filled_by_bot) must_redirect_to root_path end it 'must redirect to home page if token field value is invalid' do post :create, valid_params.merge(token: :invalid_token) must_redirect_to root_path end it 'must redirect to home page if token field is expired' do HoneyPotField.last.update!(expired: true) post :create, valid_params must_redirect_to root_path end it 'must redirect to maintenance during read only mode' do ApplicationController.any_instance.stubs(:read_only_mode?).returns(true) assert_no_difference 'Account.count' do post :create, valid_params must_redirect_to maintenance_path end end it 'must require login' do assert_no_difference 'Account.count' do post :create, valid_params.merge(account: { login: '' }) assigns(:account).errors.messages[:login].must_be :present? end end it 'must require password' do assert_no_difference 'Account.count' do post :create, valid_params.merge(account: { password: '' }) assigns(:account).errors.messages[:password].must_be :present? end end it 'must require email and email_confirmation' do assert_no_difference 'Account.count' do post :create, valid_params.merge(account: { email_confirmation: '', email: '' }) assigns(:account).errors.messages[:email_confirmation].must_be :present? assigns(:account).errors.messages[:email_confirmation].must_be :present? end end it 'must render the new account page for a blacklisted email domain' do bad_domain = 'really_bad_domain.com' DomainBlacklist.create(domain: bad_domain) assert_no_difference 'Account.count' do email = "bad_guy@#{ bad_domain }" post :create, valid_params.merge(account: { email: email, email_confirmation: email }) must_render_template :new Account.find_by(email: email).wont_be :present? end end it 'must create an action record when relevant params are passed' do person = create(:person) assert_difference 'Action.count', 1 do post :create, valid_params.merge(_action: "claim_#{ person.id }") end action = Action.last action.status.must_equal 'after_activation' action.claim_person_id.must_equal person.id action.account_id.must_equal Account.last.id end end describe 'edit' do it 'must respond with not_found when account does not exist' do get :edit, id: :anything must_respond_with :not_found end it 'must respond with success' do account = create(:account) login_as account get :edit, id: account.to_param must_render_template 'edit' must_respond_with :success end it 'must render error if not logged in' do get :edit, id: create(:account).id must_render_template 'error.html' end it 'must redirect to new_session if account is not owned' do account = create(:account) login_as account get :edit, id: create(:account).id must_redirect_to new_session_path end it 'must logout spammer trying to edit or update' do skip 'FIXME: Integrate alongwith handle_spammer_account' account = create(:account) login_as account Account::Access.new(account).spam! get :edit, id: account.to_param session[:account_id].must_be_nil account.reload.remember_token.must_be_nil cookies[:auth_token].must_be_nil flash[:notice].wont_be_nil must_respond_with :redirect end end describe 'update' do let(:account) { create(:account) } before { login_as account } it 'must fail for invalid data' do url = :not_an_url post :update, id: account, account: { url: url } must_render_template 'edit' account.reload.url.wont_equal url end it 'must display description after a validation error' do text = 'about raw content' post :update, id: account.to_param, account: { email: '', about_raw: text } must_select 'textarea.edit-description', text: text end it 'must not allow description beyond 500 characters' do post :update, id: account.to_param, account: { about_raw: 'a' * 501 } assigns(:account).wont_be_nil assigns(:account).errors.wont_be_nil assigns(:account).errors.messages[:'markup.raw'].must_be :present? must_select "p.error[rel='markup.raw']", text: 'is too long (maximum is 500 characters)' end it 'must accept description within 500 characters' do post :update, id: account.to_param, account: { about_raw: 'a' * 99 + "\n" + 'a' * 99 + "\r" + 'a' * 300 } must_redirect_to account end it 'must be successful' do location = 'Washington' post :update, id: account.to_param, account: { location: location } flash[:notice].must_equal 'Save successful!' account.reload.location.must_equal location end it 'must not allow updating other user\'s account' do post :update, id: create(:account).id, account: { location: :Wherever } must_redirect_to new_session_path flash.now[:error].must_match(/You can't edit another's account/) end end describe 'destroy' do it 'must allow deletion' do AnonymousAccount.create! account = create(:account) login_as account assert_difference 'Account.count', -1 do post :destroy, id: account.to_param must_redirect_to edit_deleted_account_path(account.login) end end it 'must not allow deletion by other accounts' do account = create(:account) login_as create(:account) assert_no_difference 'Account.count' do post :destroy, id: account.to_param flash.now[:error].must_match(/You can't edit another's account/) end end it 'while deleting an account, edits.account_id and edits.undone_by should be marked with Anonymous Coward ID' do skip 'Fix edits logic' project = create(:project) account = create(:account) login_as account anonymous_account_id = Account.find_or_create_anonymous_account.id Edit.delete_all manage = project.manages.create!(account: account) manage.update!(approved_by: account.id) project.update!(best_analysis_id: nil, editor_account: account) project.edits.first.account_id.must_equal account.id project.edits.first.undone_by.must_equal nil post :destroy, id: account.to_param project.edits.first.account_id.must_equal anonymous_account_id end it 'when deleting an account set the approved_by and deleted_by fields to Anonymous Coward ID' do skip 'Fix edits logic' project = create(:project) account = create(:account) login_as account Edit.delete_all manage = project.manages.create!(account: account) manage.update!(approved_by: account.id) project.update!(best_analysis_id: nil, editor_account: account) project.manages.wont_be :empty? post :destroy, id: account.to_param project.manages.must_be :empty? end end describe 'settings' do it 'should render settings' do get :settings, id: user.id end end private def stub_verify_recaptcha_to_add_captcha_error AccountsController.any_instance.unstub(:verify_recaptcha) ApplicationController.class_eval do def verify_recaptcha(options) options[:model].errors.add(:captcha, 'some error') end end end end
1
7,530
The description should match the test
blackducksoftware-ohloh-ui
rb
@@ -165,12 +165,9 @@ static void xwayland_finish_display(struct wlr_xwayland *wlr_xwayland) { unlink_display_sockets(wlr_xwayland->display); wlr_xwayland->display = -1; - unsetenv("DISPLAY"); + wlr_xwayland->display_name[0] = '\0'; } -static bool xwayland_start_display(struct wlr_xwayland *wlr_xwayland, - struct wl_display *wl_display); - static bool xwayland_start_server(struct wlr_xwayland *wlr_xwayland); static bool xwayland_start_server_lazy(struct wlr_xwayland *wlr_xwayland);
1
#define _POSIX_C_SOURCE 200112L #include <errno.h> #include <fcntl.h> #include <signal.h> #include <stdio.h> #include <stdlib.h> #include <stdlib.h> #include <sys/socket.h> #include <sys/types.h> #include <sys/wait.h> #include <time.h> #include <unistd.h> #include <wayland-server.h> #include <wlr/util/log.h> #include <wlr/xwayland.h> #include "sockets.h" #include "util/signal.h" #include "xwayland/xwm.h" struct wlr_xwayland_cursor { uint8_t *pixels; uint32_t stride; uint32_t width; uint32_t height; int32_t hotspot_x; int32_t hotspot_y; }; static void safe_close(int fd) { if (fd >= 0) { close(fd); } } static int fill_arg(char ***argv, const char *fmt, ...) { int len; char **cur_arg = *argv; va_list args; va_start(args, fmt); len = vsnprintf(NULL, 0, fmt, args) + 1; va_end(args); while (*cur_arg) { cur_arg++; } *cur_arg = malloc(len); if (!*cur_arg) { return -1; } *argv = cur_arg; va_start(args, fmt); len = vsnprintf(*cur_arg, len, fmt, args); va_end(args); return len; } _Noreturn static void exec_xwayland(struct wlr_xwayland *wlr_xwayland) { if (!set_cloexec(wlr_xwayland->x_fd[0], false) || !set_cloexec(wlr_xwayland->x_fd[1], false) || !set_cloexec(wlr_xwayland->wm_fd[1], false) || !set_cloexec(wlr_xwayland->wl_fd[1], false)) { _exit(EXIT_FAILURE); } /* Make Xwayland signal us when it's ready */ signal(SIGUSR1, SIG_IGN); char *argv[] = { "Xwayland", NULL /* display, e.g. :1 */, "-rootless", "-terminate", "-listen", NULL /* x_fd[0] */, "-listen", NULL /* x_fd[1] */, "-wm", NULL /* wm_fd[1] */, NULL, }; char **cur_arg = argv; if (fill_arg(&cur_arg, ":%d", wlr_xwayland->display) < 0 || fill_arg(&cur_arg, "%d", wlr_xwayland->x_fd[0]) < 0 || fill_arg(&cur_arg, "%d", wlr_xwayland->x_fd[1]) < 0 || fill_arg(&cur_arg, "%d", wlr_xwayland->wm_fd[1]) < 0) { wlr_log_errno(WLR_ERROR, "alloc/print failure"); _exit(EXIT_FAILURE); } char wayland_socket_str[16]; snprintf(wayland_socket_str, sizeof(wayland_socket_str), "%d", wlr_xwayland->wl_fd[1]); setenv("WAYLAND_SOCKET", wayland_socket_str, true); wlr_log(WLR_INFO, "WAYLAND_SOCKET=%d Xwayland :%d -rootless -terminate -listen %d -listen %d -wm %d", wlr_xwayland->wl_fd[1], wlr_xwayland->display, wlr_xwayland->x_fd[0], wlr_xwayland->x_fd[1], wlr_xwayland->wm_fd[1]); // Closes stdout/stderr depending on log verbosity enum wlr_log_importance verbosity = wlr_log_get_verbosity(); int devnull = open("/dev/null", O_WRONLY | O_CREAT, 0666); if (devnull < 0) { wlr_log_errno(WLR_ERROR, "XWayland: failed to open /dev/null"); _exit(EXIT_FAILURE); } if (verbosity < WLR_INFO) { dup2(devnull, STDOUT_FILENO); } if (verbosity < WLR_ERROR) { dup2(devnull, STDERR_FILENO); } // This returns if and only if the call fails execvp("Xwayland", argv); wlr_log_errno(WLR_ERROR, "failed to exec Xwayland"); close(devnull); _exit(EXIT_FAILURE); } static void xwayland_finish_server(struct wlr_xwayland *wlr_xwayland) { if (!wlr_xwayland || wlr_xwayland->display == -1) { return; } if (wlr_xwayland->x_fd_read_event[0]) { wl_event_source_remove(wlr_xwayland->x_fd_read_event[0]); wl_event_source_remove(wlr_xwayland->x_fd_read_event[1]); wlr_xwayland->x_fd_read_event[0] = wlr_xwayland->x_fd_read_event[1] = NULL; } if (wlr_xwayland->cursor != NULL) { free(wlr_xwayland->cursor); } xwm_destroy(wlr_xwayland->xwm); if (wlr_xwayland->client) { wl_list_remove(&wlr_xwayland->client_destroy.link); wl_client_destroy(wlr_xwayland->client); } if (wlr_xwayland->sigusr1_source) { wl_event_source_remove(wlr_xwayland->sigusr1_source); } safe_close(wlr_xwayland->wl_fd[0]); safe_close(wlr_xwayland->wl_fd[1]); safe_close(wlr_xwayland->wm_fd[0]); safe_close(wlr_xwayland->wm_fd[1]); memset(wlr_xwayland, 0, offsetof(struct wlr_xwayland, display)); wlr_xwayland->wl_fd[0] = wlr_xwayland->wl_fd[1] = -1; wlr_xwayland->wm_fd[0] = wlr_xwayland->wm_fd[1] = -1; /* We do not kill the Xwayland process, it dies to broken pipe * after we close our side of the wm/wl fds. This is more reliable * than trying to kill something that might no longer be Xwayland. */ } static void xwayland_finish_display(struct wlr_xwayland *wlr_xwayland) { if (!wlr_xwayland || wlr_xwayland->display == -1) { return; } safe_close(wlr_xwayland->x_fd[0]); safe_close(wlr_xwayland->x_fd[1]); wlr_xwayland->x_fd[0] = wlr_xwayland->x_fd[1] = -1; wl_list_remove(&wlr_xwayland->display_destroy.link); unlink_display_sockets(wlr_xwayland->display); wlr_xwayland->display = -1; unsetenv("DISPLAY"); } static bool xwayland_start_display(struct wlr_xwayland *wlr_xwayland, struct wl_display *wl_display); static bool xwayland_start_server(struct wlr_xwayland *wlr_xwayland); static bool xwayland_start_server_lazy(struct wlr_xwayland *wlr_xwayland); static void handle_client_destroy(struct wl_listener *listener, void *data) { struct wlr_xwayland *wlr_xwayland = wl_container_of(listener, wlr_xwayland, client_destroy); if (wlr_xwayland->sigusr1_source) { // Xwayland failed to start, let the sigusr1 handler deal with it return; } // Don't call client destroy: it's being destroyed already wlr_xwayland->client = NULL; wl_list_remove(&wlr_xwayland->client_destroy.link); xwayland_finish_server(wlr_xwayland); if (time(NULL) - wlr_xwayland->server_start > 5) { if (wlr_xwayland->lazy) { wlr_log(WLR_INFO, "Restarting Xwayland (lazy)"); xwayland_start_server_lazy(wlr_xwayland); } else { wlr_log(WLR_INFO, "Restarting Xwayland"); xwayland_start_server(wlr_xwayland); } } } static void handle_display_destroy(struct wl_listener *listener, void *data) { struct wlr_xwayland *wlr_xwayland = wl_container_of(listener, wlr_xwayland, display_destroy); // Don't call client destroy: the display is being destroyed, it's too late if (wlr_xwayland->client) { wlr_xwayland->client = NULL; wl_list_remove(&wlr_xwayland->client_destroy.link); } wlr_xwayland_destroy(wlr_xwayland); } static int xserver_handle_ready(int signal_number, void *data) { struct wlr_xwayland *wlr_xwayland = data; int stat_val = -1; while (waitpid(wlr_xwayland->pid, &stat_val, 0) < 0) { if (errno == EINTR) { continue; } wlr_log_errno(WLR_ERROR, "waitpid for Xwayland fork failed"); return 1; } if (stat_val) { wlr_log(WLR_ERROR, "Xwayland startup failed, not setting up xwm"); return 1; } wlr_log(WLR_DEBUG, "Xserver is ready"); wlr_xwayland->xwm = xwm_create(wlr_xwayland); if (!wlr_xwayland->xwm) { xwayland_finish_server(wlr_xwayland); return 1; } if (wlr_xwayland->seat) { xwm_set_seat(wlr_xwayland->xwm, wlr_xwayland->seat); } wl_event_source_remove(wlr_xwayland->sigusr1_source); wlr_xwayland->sigusr1_source = NULL; if (wlr_xwayland->cursor != NULL) { struct wlr_xwayland_cursor *cur = wlr_xwayland->cursor; xwm_set_cursor(wlr_xwayland->xwm, cur->pixels, cur->stride, cur->width, cur->height, cur->hotspot_x, cur->hotspot_y); free(cur); wlr_xwayland->cursor = NULL; } wlr_signal_emit_safe(&wlr_xwayland->events.ready, wlr_xwayland); /* ready is a one-shot signal, fire and forget */ wl_signal_init(&wlr_xwayland->events.ready); return 1; /* wayland event loop dispatcher's count */ } static int xwayland_socket_connected(int fd, uint32_t mask, void* data){ struct wlr_xwayland *wlr_xwayland = data; wl_event_source_remove(wlr_xwayland->x_fd_read_event[0]); wl_event_source_remove(wlr_xwayland->x_fd_read_event[1]); wlr_xwayland->x_fd_read_event[0] = wlr_xwayland->x_fd_read_event[1] = NULL; xwayland_start_server(wlr_xwayland); return 0; } static bool xwayland_start_display(struct wlr_xwayland *wlr_xwayland, struct wl_display *wl_display) { wlr_xwayland->display_destroy.notify = handle_display_destroy; wl_display_add_destroy_listener(wl_display, &wlr_xwayland->display_destroy); wlr_xwayland->display = open_display_sockets(wlr_xwayland->x_fd); if (wlr_xwayland->display < 0) { xwayland_finish_display(wlr_xwayland); return false; } char display_name[16]; snprintf(display_name, sizeof(display_name), ":%d", wlr_xwayland->display); setenv("DISPLAY", display_name, true); return true; } static bool xwayland_start_server(struct wlr_xwayland *wlr_xwayland) { if (socketpair(AF_UNIX, SOCK_STREAM, 0, wlr_xwayland->wl_fd) != 0 || socketpair(AF_UNIX, SOCK_STREAM, 0, wlr_xwayland->wm_fd) != 0) { wlr_log_errno(WLR_ERROR, "socketpair failed"); xwayland_finish_server(wlr_xwayland); return false; } if (!set_cloexec(wlr_xwayland->wl_fd[0], true) || !set_cloexec(wlr_xwayland->wl_fd[1], true) || !set_cloexec(wlr_xwayland->wm_fd[0], true) || !set_cloexec(wlr_xwayland->wm_fd[1], true)) { xwayland_finish_server(wlr_xwayland); return false; } wlr_xwayland->server_start = time(NULL); wlr_xwayland->client = wl_client_create(wlr_xwayland->wl_display, wlr_xwayland->wl_fd[0]); if (!wlr_xwayland->client) { wlr_log_errno(WLR_ERROR, "wl_client_create failed"); xwayland_finish_server(wlr_xwayland); return false; } wlr_xwayland->wl_fd[0] = -1; /* not ours anymore */ wlr_xwayland->client_destroy.notify = handle_client_destroy; wl_client_add_destroy_listener(wlr_xwayland->client, &wlr_xwayland->client_destroy); struct wl_event_loop *loop = wl_display_get_event_loop(wlr_xwayland->wl_display); wlr_xwayland->sigusr1_source = wl_event_loop_add_signal(loop, SIGUSR1, xserver_handle_ready, wlr_xwayland); wlr_xwayland->pid = fork(); if (wlr_xwayland->pid < 0) { wlr_log_errno(WLR_ERROR, "fork failed"); xwayland_finish_server(wlr_xwayland); return false; } else if (wlr_xwayland->pid == 0) { /* Double-fork, but we need to forward SIGUSR1 once Xserver(1) * is ready, or error if there was one. */ pid_t ppid = getppid(); sigset_t sigset; sigemptyset(&sigset); sigaddset(&sigset, SIGUSR1); sigaddset(&sigset, SIGCHLD); sigprocmask(SIG_BLOCK, &sigset, NULL); pid_t pid = fork(); if (pid < 0) { wlr_log_errno(WLR_ERROR, "second fork failed"); _exit(EXIT_FAILURE); } else if (pid == 0) { exec_xwayland(wlr_xwayland); } int sig; sigwait(&sigset, &sig); kill(ppid, SIGUSR1); wlr_log(WLR_DEBUG, "sent SIGUSR1 to process %d", ppid); if (sig == SIGCHLD) { waitpid(pid, NULL, 0); _exit(EXIT_FAILURE); } _exit(EXIT_SUCCESS); } /* close child fds */ /* remain managing x sockets for lazy start */ close(wlr_xwayland->wl_fd[1]); close(wlr_xwayland->wm_fd[1]); wlr_xwayland->wl_fd[1] = wlr_xwayland->wm_fd[1] = -1; return true; } static bool xwayland_start_server_lazy(struct wlr_xwayland *wlr_xwayland) { struct wl_event_loop *loop = wl_display_get_event_loop(wlr_xwayland->wl_display); wlr_xwayland->x_fd_read_event[0] = wl_event_loop_add_fd(loop, wlr_xwayland->x_fd[0], WL_EVENT_READABLE, xwayland_socket_connected, wlr_xwayland); wlr_xwayland->x_fd_read_event[1] = wl_event_loop_add_fd(loop, wlr_xwayland->x_fd[1], WL_EVENT_READABLE, xwayland_socket_connected, wlr_xwayland); return true; } void wlr_xwayland_destroy(struct wlr_xwayland *wlr_xwayland) { if (!wlr_xwayland) { return; } wlr_xwayland_set_seat(wlr_xwayland, NULL); xwayland_finish_server(wlr_xwayland); xwayland_finish_display(wlr_xwayland); free(wlr_xwayland); } struct wlr_xwayland *wlr_xwayland_create(struct wl_display *wl_display, struct wlr_compositor *compositor, bool lazy) { struct wlr_xwayland *wlr_xwayland = calloc(1, sizeof(struct wlr_xwayland)); if (!wlr_xwayland) { return NULL; } wlr_xwayland->wl_display = wl_display; wlr_xwayland->compositor = compositor; wlr_xwayland->lazy = lazy; wlr_xwayland->x_fd[0] = wlr_xwayland->x_fd[1] = -1; wlr_xwayland->wl_fd[0] = wlr_xwayland->wl_fd[1] = -1; wlr_xwayland->wm_fd[0] = wlr_xwayland->wm_fd[1] = -1; wl_signal_init(&wlr_xwayland->events.new_surface); wl_signal_init(&wlr_xwayland->events.ready); if (!xwayland_start_display(wlr_xwayland, wl_display)) { goto error_alloc; } if (wlr_xwayland->lazy) { if (!xwayland_start_server_lazy(wlr_xwayland)) { goto error_display; } } else { if (!xwayland_start_server(wlr_xwayland)) { goto error_display; } } return wlr_xwayland; error_display: xwayland_finish_display(wlr_xwayland); error_alloc: free(wlr_xwayland); return NULL; } void wlr_xwayland_set_cursor(struct wlr_xwayland *wlr_xwayland, uint8_t *pixels, uint32_t stride, uint32_t width, uint32_t height, int32_t hotspot_x, int32_t hotspot_y) { if (wlr_xwayland->xwm != NULL) { xwm_set_cursor(wlr_xwayland->xwm, pixels, stride, width, height, hotspot_x, hotspot_y); return; } free(wlr_xwayland->cursor); wlr_xwayland->cursor = calloc(1, sizeof(struct wlr_xwayland_cursor)); if (wlr_xwayland->cursor == NULL) { return; } wlr_xwayland->cursor->pixels = pixels; wlr_xwayland->cursor->stride = stride; wlr_xwayland->cursor->width = width; wlr_xwayland->cursor->height = height; wlr_xwayland->cursor->hotspot_x = hotspot_x; wlr_xwayland->cursor->hotspot_y = hotspot_y; } static void xwayland_handle_seat_destroy(struct wl_listener *listener, void *data) { struct wlr_xwayland *xwayland = wl_container_of(listener, xwayland, seat_destroy); wlr_xwayland_set_seat(xwayland, NULL); } void wlr_xwayland_set_seat(struct wlr_xwayland *xwayland, struct wlr_seat *seat) { if (xwayland->seat) { wl_list_remove(&xwayland->seat_destroy.link); } xwayland->seat = seat; if (xwayland->xwm) { xwm_set_seat(xwayland->xwm, seat); } if (seat == NULL) { return; } xwayland->seat_destroy.notify = xwayland_handle_seat_destroy; wl_signal_add(&seat->events.destroy, &xwayland->seat_destroy); }
1
13,599
rootston needs to unset this now
swaywm-wlroots
c
@@ -35,7 +35,7 @@ func newBlockBuffer(bufferSize, intervalSize uint64) *blockBuffer { } } -func (b *blockBuffer) Delete(height uint64) []*peerBlock { +func (b *blockBuffer) Pop(height uint64) []*peerBlock { b.mu.Lock() defer b.mu.Unlock() queue, ok := b.blockQueues[height]
1
// Copyright (c) 2019 IoTeX Foundation // This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no // warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent // permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache // License 2.0 that can be found in the LICENSE file. package blocksync import ( "sync" "go.uber.org/zap" "github.com/iotexproject/iotex-core/pkg/log" ) // blockBuffer is used to keep in-coming block in order. type blockBuffer struct { mu sync.RWMutex blockQueues map[uint64]*uniQueue bufferSize uint64 intervalSize uint64 } type syncBlocksInterval struct { Start uint64 End uint64 } func newBlockBuffer(bufferSize, intervalSize uint64) *blockBuffer { return &blockBuffer{ blockQueues: map[uint64]*uniQueue{}, bufferSize: bufferSize, intervalSize: intervalSize, } } func (b *blockBuffer) Delete(height uint64) []*peerBlock { b.mu.Lock() defer b.mu.Unlock() queue, ok := b.blockQueues[height] if !ok { return nil } blks := queue.dequeAll() delete(b.blockQueues, height) return blks } func (b *blockBuffer) Cleanup(height uint64) { b.mu.Lock() defer b.mu.Unlock() size := len(b.blockQueues) if size > int(b.bufferSize)*2 { log.L().Warn("blockBuffer is leaking memory.", zap.Int("bufferSize", size)) newQueues := map[uint64]*uniQueue{} for h := range b.blockQueues { if h > height { newQueues[h] = b.blockQueues[h] } } b.blockQueues = newQueues } } // AddBlock tries to put given block into buffer and flush buffer into blockchain. func (b *blockBuffer) AddBlock(tipHeight uint64, blk *peerBlock) (bool, uint64) { b.mu.Lock() defer b.mu.Unlock() blkHeight := blk.block.Height() if blkHeight <= tipHeight { return false, 0 } if blkHeight > tipHeight+b.bufferSize { return false, tipHeight + b.bufferSize } if _, ok := b.blockQueues[blkHeight]; !ok { b.blockQueues[blkHeight] = newUniQueue() } b.blockQueues[blkHeight].enque(blk) return true, blkHeight } // GetBlocksIntervalsToSync returns groups of syncBlocksInterval are missing upto targetHeight. func (b *blockBuffer) GetBlocksIntervalsToSync(confirmedHeight uint64, targetHeight uint64) []syncBlocksInterval { b.mu.RLock() defer b.mu.RUnlock() var ( start uint64 startSet bool bi []syncBlocksInterval ) // The sync range shouldn't go beyond tip height + buffer size to avoid being too aggressive if targetHeight > confirmedHeight+b.bufferSize { targetHeight = confirmedHeight + b.bufferSize } // The sync range should at least contain one interval to speculatively fetch missing blocks if targetHeight < confirmedHeight+b.intervalSize { targetHeight = confirmedHeight + b.intervalSize } var iLen uint64 for h := confirmedHeight + 1; h <= targetHeight; h++ { if _, ok := b.blockQueues[h]; !ok { iLen++ if !startSet { start = h startSet = true } if iLen >= b.intervalSize { bi = append(bi, syncBlocksInterval{Start: start, End: h}) startSet = false iLen = 0 } continue } if startSet { bi = append(bi, syncBlocksInterval{Start: start, End: h - 1}) startSet = false iLen = 0 } } // handle last interval if startSet { bi = append(bi, syncBlocksInterval{Start: start, End: targetHeight}) } return bi }
1
24,709
why do you rename this function?
iotexproject-iotex-core
go
@@ -30,12 +30,10 @@ module MailerHelper end def step_status_icon(step) - if step.status == "actionable" - "emails/numbers/icon-number-" + (step.position - 1).to_s + ".png" - elsif step.status == "pending" - "emails/numbers/icon-number-" + (step.position - 1).to_s + "-pending.png" - elsif step.status == "completed" + if step.status == "completed" "emails/numbers/icon-completed.png" + else + "emails/numbers/icon-number-" + (step.position - 1).to_s + "-pending.png" end end
1
module MailerHelper def property_display_value(field) if field.present? property_to_s(field) else "-" end end def time_and_date(date) "#{date.strftime('%m/%d/%Y')} at #{date.strftime('%I:%M %P')}" end def generate_approve_url(approval) proposal = approval.proposal opts = { version: proposal.version, cch: approval.api_token.access_token } complete_proposal_url(proposal, opts) end def add_author(text, user) if user text << " by #{user.full_name}" end end def add_reason(text, reason) if reason.present? text << t("mailer.reason", reason: reason) end end def step_status_icon(step) if step.status == "actionable" "emails/numbers/icon-number-" + (step.position - 1).to_s + ".png" elsif step.status == "pending" "emails/numbers/icon-number-" + (step.position - 1).to_s + "-pending.png" elsif step.status == "completed" "emails/numbers/icon-completed.png" end end def step_user_title(step) step.decorate.role_name end end
1
17,016
since we are no longer using these numbered icons can we remove them from source control?
18F-C2
rb
@@ -182,6 +182,8 @@ public class VertxRestDispatcher extends AbstractVertxHttpDispatcher { RestProducerInvocation restProducerInvocation = new RestProducerInvocation(); context.put(RestConst.REST_PRODUCER_INVOCATION, restProducerInvocation); + restProducerInvocation.setAfterCreateInvocationHandler( + invocation -> context.put(RestConst.REST_INVOCATION_CONTEXT, invocation)); restProducerInvocation.invoke(transport, requestEx, responseEx, httpServerFilters); } }
1
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.servicecomb.transport.rest.vertx; import javax.ws.rs.core.HttpHeaders; import javax.ws.rs.core.MediaType; import javax.ws.rs.core.Response.Status; import javax.ws.rs.core.Response.Status.Family; import org.apache.servicecomb.common.rest.AbstractRestInvocation; import org.apache.servicecomb.common.rest.RestConst; import org.apache.servicecomb.common.rest.RestProducerInvocation; import org.apache.servicecomb.core.Const; import org.apache.servicecomb.core.CseContext; import org.apache.servicecomb.core.Transport; import org.apache.servicecomb.foundation.vertx.http.HttpServletRequestEx; import org.apache.servicecomb.foundation.vertx.http.HttpServletResponseEx; import org.apache.servicecomb.foundation.vertx.http.VertxServerRequestToHttpServletRequest; import org.apache.servicecomb.foundation.vertx.http.VertxServerResponseToHttpServletResponse; import org.apache.servicecomb.swagger.invocation.exception.InvocationException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import io.netty.handler.codec.http.multipart.HttpPostRequestDecoder.ErrorDataDecoderException; import io.vertx.core.json.JsonObject; import io.vertx.ext.web.Router; import io.vertx.ext.web.RoutingContext; import io.vertx.ext.web.handler.CookieHandler; public class VertxRestDispatcher extends AbstractVertxHttpDispatcher { private static final Logger LOGGER = LoggerFactory.getLogger(VertxRestDispatcher.class); private Transport transport; @Override public int getOrder() { return Integer.MAX_VALUE; } @Override public void init(Router router) { router.route().handler(CookieHandler.create()); router.route().handler(createBodyHandler()); router.route().failureHandler(this::failureHandler).handler(this::onRequest); } private void failureHandler(RoutingContext context) { LOGGER.error("http server failed.", context.failure()); AbstractRestInvocation restProducerInvocation = context.get(RestConst.REST_PRODUCER_INVOCATION); Throwable e = context.failure(); if (ErrorDataDecoderException.class.isInstance(e)) { Throwable cause = e.getCause(); if (InvocationException.class.isInstance(cause)) { e = cause; } } // only when unexpected exception happens, it will run into here. // the connection should be closed. handleFailureAndClose(context, restProducerInvocation, e); } /** * Try to find out the failure information and send it in response. */ private void handleFailureAndClose(RoutingContext context, AbstractRestInvocation restProducerInvocation, Throwable e) { if (null != restProducerInvocation) { // if there is restProducerInvocation, let it send exception in response. The exception is allowed to be null. sendFailResponseByInvocation(context, restProducerInvocation, e); return; } if (null != e) { // if there exists exception, try to send this exception by RoutingContext sendExceptionByRoutingContext(context, e); return; } // if there is no exception, the response is determined by status code. sendFailureRespDeterminedByStatus(context); } /** * Try to determine response by status code, and send response. */ private void sendFailureRespDeterminedByStatus(RoutingContext context) { Family statusFamily = Family.familyOf(context.statusCode()); if (Family.CLIENT_ERROR.equals(statusFamily) || Family.SERVER_ERROR.equals(statusFamily) || Family.OTHER .equals(statusFamily)) { context.response().putHeader(HttpHeaders.CONTENT_TYPE, MediaType.WILDCARD) .setStatusCode(context.statusCode()).end(); } else { // it seems the status code is not set properly context.response().putHeader(HttpHeaders.CONTENT_TYPE, MediaType.WILDCARD) .setStatusCode(Status.INTERNAL_SERVER_ERROR.getStatusCode()) .setStatusMessage(Status.INTERNAL_SERVER_ERROR.getReasonPhrase()) .end(wrapResponseBody(Status.INTERNAL_SERVER_ERROR.getReasonPhrase())); } context.response().close(); } /** * Use routingContext to send failure information in throwable. */ private void sendExceptionByRoutingContext(RoutingContext context, Throwable e) { if (InvocationException.class.isInstance(e)) { InvocationException invocationException = (InvocationException) e; context.response().putHeader(HttpHeaders.CONTENT_TYPE, MediaType.WILDCARD) .setStatusCode(invocationException.getStatusCode()).setStatusMessage(invocationException.getReasonPhrase()) .end(wrapResponseBody(invocationException.getReasonPhrase())); } else { context.response().putHeader(HttpHeaders.CONTENT_TYPE, MediaType.WILDCARD) .setStatusCode(Status.INTERNAL_SERVER_ERROR.getStatusCode()).end(wrapResponseBody(e.getMessage())); } context.response().close(); } /** * Consumer will treat the response body as json by default, so it's necessary to wrap response body as Json string * to avoid deserialization error. * * @param message response body * @return response body wrapped as Json string */ String wrapResponseBody(String message) { if (isValidJson(message)) { return message; } JsonObject jsonObject = new JsonObject(); jsonObject.put("message", message); return jsonObject.toString(); } /** * Check if the message is a valid Json string. * @param message the message to be checked. * @return true if message is a valid Json string, otherwise false. */ private boolean isValidJson(String message) { try { new JsonObject(message); } catch (Exception ignored) { return false; } return true; } /** * Use restProducerInvocation to send failure message. The throwable is allowed to be null. */ private void sendFailResponseByInvocation(RoutingContext context, AbstractRestInvocation restProducerInvocation, Throwable e) { restProducerInvocation.sendFailResponse(e); context.response().close(); } private void onRequest(RoutingContext context) { if (transport == null) { transport = CseContext.getInstance().getTransportManager().findTransport(Const.RESTFUL); } HttpServletRequestEx requestEx = new VertxServerRequestToHttpServletRequest(context); HttpServletResponseEx responseEx = new VertxServerResponseToHttpServletResponse(context.response()); RestProducerInvocation restProducerInvocation = new RestProducerInvocation(); context.put(RestConst.REST_PRODUCER_INVOCATION, restProducerInvocation); restProducerInvocation.invoke(transport, requestEx, responseEx, httpServerFilters); } }
1
9,644
both edge and normal need to set this, so set it to be default action?
apache-servicecomb-java-chassis
java
@@ -391,9 +391,13 @@ namespace Microsoft.DotNet.Build.Tasks private static void WriteProject(JObject projectRoot, string projectJsonPath) { - string projectJson = JsonConvert.SerializeObject(projectRoot, Formatting.Indented); - Directory.CreateDirectory(Path.GetDirectoryName(projectJsonPath)); - File.WriteAllText(projectJsonPath, projectJson + Environment.NewLine); + string projectJson = JsonConvert.SerializeObject(projectRoot, Formatting.Indented) + Environment.NewLine; + + if (!File.Exists(projectJsonPath) || !projectJson.Equals(File.ReadAllText(projectJsonPath))) + { + Directory.CreateDirectory(Path.GetDirectoryName(projectJsonPath)); + File.WriteAllText(projectJsonPath, projectJson); + } } /* JProperties are encapsulated with "['" and "']" to assist with matching Paths which
1
using Microsoft.Build.Utilities; using Microsoft.Build.Framework; using Newtonsoft.Json; using Newtonsoft.Json.Linq; using System; using System.Collections.Generic; using System.IO; using System.Linq; using System.Text.RegularExpressions; using NuGet.Packaging; using NuGet.Packaging.Core; using NuGet.Versioning; namespace Microsoft.DotNet.Build.Tasks { /// <summary> /// Parse a project.json, and add additional dependencies, then write a out new project.json. /// Use-case scenarios /// 1. Provide a list of package drops, this becomes the source of package versions /// 2. Provide a versions files, this becomes the source of package versions /// If both a package drop and a version file are provided, then the package drop takes precedent over the version file. /// </summary> public class AddDependenciesToProjectJson : Task { // Additional Dependencies to add to the project.json. May Optionally contain a version. // Will Override dependencies present in the project if there is a conflict. // AdditionalDependencies required metadata: Name, Version [Required] public ITaskItem[] AdditionalDependencies { get; set; } // Framework section which the additional dependencies apply to. Empty is the default dependencies section. public string[] Frameworks { get; set; } public string[] PackagesDrops { get; set; } [Required] public string PackageNameRegex { get; set; } public string[] VersionsFiles { get; set; } /// <summary> /// If there are multiple package items from different sources (ie, package items found in one or more package drops, /// package items found in one or more versions files) with the same package name, allow the conflict, but choose /// the newest package version. /// </summary> public bool UseNewestAvailablePackages { get; set; } /// <summary> /// Original package version which is used to seed the output project.json /// </summary> [Required] public string ProjectJson { get; set; } /// <summary> /// External package dependency versions. /// </summary> public ITaskItem[] ExternalPackages { get; set; } /// <summary> /// Optional list of RIDs to exclude from the generated project.json. /// </summary> public string[] ExcludedRuntimes { get; set; } // The directory to put the generated project.json in [Required] public string OutputProjectJson { get; set; } private Regex _packageNameRegex; private VersionComparer comparer = new VersionComparer(VersionComparison.VersionRelease); public override bool Execute() { if (!File.Exists(ProjectJson)) { Log.LogError("Cannot find specified project.json - '{0}'", ProjectJson); return false; } Dictionary<string, PackageItem> packageInformation = new Dictionary<string, PackageItem>(); _packageNameRegex = new Regex(PackageNameRegex); // Retrieve package information from a package drop location if (PackagesDrops != null && PackagesDrops.Length > 0) { AddPackageItemsToDictionary(ref packageInformation, GatherPackageInformationFromDrops(PackagesDrops)); } // Retrieve package information from a versions file if (VersionsFiles != null) { foreach (var versionsFile in VersionsFiles) { if (!File.Exists(versionsFile)) { Log.LogError("Version file {0} does not exist.", versionsFile); } AddPackageItemsToDictionary(ref packageInformation, GatherPackageInformationFromVersionsFile(versionsFile, comparer)); } } JObject projectRoot = ReadProject(ProjectJson); var invalidFramework = AreValidFrameworkPaths(projectRoot); if(invalidFramework != string.Empty) { OutputProjectJson = ProjectJson; Log.LogError("Unable to find framework section '{0}' in '{1}'", invalidFramework, ProjectJson); return false; } // No Frameworks were specified, apply AdditionalDependencies to all framework groups in the project.json if (Frameworks == null || Frameworks.Length == 0) { Frameworks = projectRoot.SelectTokens("frameworks").SelectMany(f => f.Children().Select(c => ((JProperty)c).Name)).ToArray(); } // Update default dependencies section JObject dependencies = GenerateDependencies(projectRoot, ExternalPackages, packageInformation); projectRoot = UpdateDependenciesProperty(projectRoot, dependencies); if (ExcludedRuntimes != null) { var excludedRIDs = new HashSet<string>(ExcludedRuntimes, StringComparer.OrdinalIgnoreCase); projectRoot = FilterRuntimes(projectRoot, excludedRIDs); } // Update framework dependencies sections for (int i = 0; i < Frameworks.Length; i++) { dependencies = GenerateDependencies(projectRoot, ExternalPackages, packageInformation, Frameworks[i]); projectRoot = UpdateDependenciesProperty(projectRoot, dependencies, Frameworks[i]); } WriteProject(projectRoot, OutputProjectJson); return true; } /// <summary> /// Given a package name regex pattern, and an array of drop locations, create an array of objects /// containing package information (name, version,, prerelease version) /// </summary> /// <param name="packagesDrops"></param> /// <returns></returns> private Dictionary<string, PackageItem> GatherPackageInformationFromDrops(string [] packagesDrops) { Dictionary<string, PackageItem> packageItems = new Dictionary<string, PackageItem>(); foreach (string packageDrop in packagesDrops) { if (!Directory.Exists(packageDrop)) { Log.LogWarning("PackageDrop does not exist - '{0}'", packageDrop); continue; } IEnumerable<string> packages = Directory.GetFiles(packageDrop); foreach (var package in packages) { PackageItem packageItem = CreatePackageItem(package); AddPackageItemToDictionary(packageItems, packageItem); } } return packageItems; } private void AddPackageItemToDictionary(Dictionary<string, PackageItem> packageItems, PackageItem packageItem) { if (packageItems.ContainsKey(packageItem.Name)) { if (comparer == null) { comparer = new VersionComparer(VersionComparison.VersionRelease); } if (comparer.Compare(packageItems[packageItem.Name].Version, packageItem.Version) != 0 && UseNewestAvailablePackages != true) { Log.LogError("Package named {0} already exists. Cannot have multiple packages with the same name.\n", packageItem.Name); Log.LogError("To permit package name clashes and take latest, specify 'UseNewestAvailablePackages=true'.\n"); Log.LogError("Package {0} version {1} clashes with {2}", packageItem.Name, packageItems[packageItem.Name].Version.ToFullString(), packageItem.Version.ToFullString()); } else if (UseNewestAvailablePackages == true) { PackageItem item = (comparer.Compare(packageItems[packageItem.Name].Version, packageItem.Version) < 0) ? packageItem : packageItems[packageItem.Name]; packageItems[packageItem.Name] = item; } } else { packageItems.Add(packageItem.Name, packageItem); } } private void AddPackageItemsToDictionary(ref Dictionary<string, PackageItem> packageItems, Dictionary<string, PackageItem> addPackageItems) { foreach(var packageItem in addPackageItems.Values) { AddPackageItemToDictionary(packageItems, packageItem); } } // A versions file is of the form https://github.com/dotnet/versions/blob/master/build-info/dotnet/corefx/master/Latest_Packages.txt private Dictionary<string, PackageItem> GatherPackageInformationFromVersionsFile(string versionsFile, VersionComparer comparer = null) { Dictionary<string, PackageItem> packageItems = new Dictionary<string, PackageItem>(); if (!File.Exists(versionsFile)) { Log.LogError("Specified versions file ({0}) does not exist.", versionsFile); } var lines = File.ReadAllLines(versionsFile); foreach(string line in lines) { if(!string.IsNullOrWhiteSpace(line)) { string [] packageVersionTokens = line.Split(' '); PackageItem packageItem = CreatePackageItem(packageVersionTokens[0], packageVersionTokens[1]); AddPackageItemToDictionary(packageItems, packageItem); } } return packageItems; } /// <summary> /// Create a package item object from a nupkg file /// </summary> /// <param name="package">path to a nupkg</param> /// <returns></returns> private PackageItem CreatePackageItem(string package) { using (PackageArchiveReader archiveReader = new PackageArchiveReader(package)) { PackageIdentity identity = archiveReader.GetIdentity(); return new PackageItem(identity.Id, identity.Version); } } /// <summary> /// Create a package item object from a package name (id) and version /// </summary> /// <param name="id"></param> /// <param name="version"></param> /// <returns></returns> private PackageItem CreatePackageItem(string id, string version) { NuGetVersion nuGetVersion = new NuGetVersion(version); return new PackageItem(id, nuGetVersion); } private string AreValidFrameworkPaths(JObject projectRoot) { if(Frameworks == null || Frameworks.Length == 0) { return string.Empty; } // Check for a valid path, if invalid, exit for (int i = 0; i < Frameworks.Length; i++) { var _frameworkPath = "frameworks." + NewtonsoftEscapeJProperty(Frameworks[i]); var validFramework = projectRoot.SelectToken(_frameworkPath); if (validFramework == null) { return _frameworkPath; } } return string.Empty; } private static JObject ReadProject(string projectJsonPath) { using (TextReader projectFileReader = File.OpenText(projectJsonPath)) { var projectJsonReader = new JsonTextReader(projectFileReader); var serializer = new JsonSerializer(); return serializer.Deserialize<JObject>(projectJsonReader); } } private JToken GetFrameworkDependenciesSection(JObject projectJsonRoot, string framework = null) { if(string.IsNullOrWhiteSpace(framework)) { return projectJsonRoot["dependencies"]; } return projectJsonRoot.SelectToken("frameworks." + NewtonsoftEscapeJProperty(framework) + ".dependencies"); } // Generate the combines dependencies from the projectjson jObject and from AdditionalDependencies private JObject GenerateDependencies(JObject projectJsonRoot, ITaskItem[] externalPackageVersions, Dictionary<string, PackageItem> packageInformation, string framework = null) { var originalDependenciesList = new List<JToken>(); var returnDependenciesList = new Dictionary<string, JToken>(); var frameworkDependencies = GetFrameworkDependenciesSection(projectJsonRoot, framework); if (frameworkDependencies != null) { originalDependenciesList = frameworkDependencies.Children().ToList(); // Update versions in dependencies foreach (JProperty property in originalDependenciesList.Select(od => od)) { PackageItem packageItem = null; if (packageInformation.ContainsKey(property.Name)) { packageItem = packageInformation[property.Name]; NuGetVersion nuGetVersion = packageItem.Version; // Only add the original dependency if it wasn't passed as an AdditionalDependency, ie. AdditionalDependencies may override dependencies in project.json if (!AdditionalDependencies.Any(d => d.ItemSpec.Equals(property.Name, StringComparison.OrdinalIgnoreCase))) { JProperty addProperty; if (nuGetVersion != null) { addProperty = new JProperty(property.Name, nuGetVersion.ToString()); } else { addProperty = property; } returnDependenciesList.Add(property.Name, addProperty); } } else { returnDependenciesList.Add(property.Name, property); } } } foreach (var dependency in AdditionalDependencies) { string name = dependency.GetMetadata("Name"); // Don't add a new dependency if one already exists. if (!returnDependenciesList.ContainsKey(name)) { NuGetVersion nuGetVersion = NuGetVersion.Parse(dependency.GetMetadata("Version")); PackageItem packageItem = new PackageItem(name, nuGetVersion); string version = packageItem.GetVersionString(); // a package version was provided, use its version information. if (packageInformation.ContainsKey(name)) { version = packageInformation[name].Version.ToString(); } JProperty property = new JProperty(name, version); returnDependenciesList.Add(name, property); } else { Log.LogMessage("Ignoring AdditionalDependency '{0}', dependency is already present in {1}", name, ProjectJson); } } return new JObject(returnDependenciesList.Values.ToArray()); } /* Given a project.json as a JObject, replace it's dependencies property with a new dependencies property. */ private JObject UpdateDependenciesProperty(JObject projectJsonRoot, JObject updatedProperties, string framework = null) { var frameworkPath = string.Empty; if(!string.IsNullOrWhiteSpace(framework)) { frameworkPath = "frameworks." + NewtonsoftEscapeJProperty(framework); } var frameworkPathObject = projectJsonRoot.SelectToken(frameworkPath); frameworkPathObject["dependencies"] = updatedProperties; return projectJsonRoot; } private JObject FilterRuntimes(JObject projectRoot, HashSet<string> excludedRIDs) { var runtimes = projectRoot["runtimes"]; if (runtimes != null) { var toRemove = new List<JToken>(); foreach (JProperty runtime in runtimes) { if (excludedRIDs.Contains(runtime.Name)) toRemove.Add(runtime); } foreach (var token in toRemove) { Log.LogMessage("Removing RID '{0}' from the list of applicable runtimes.", ((JProperty)token).Name); token.Remove(); } } return projectRoot; } private static void WriteProject(JObject projectRoot, string projectJsonPath) { string projectJson = JsonConvert.SerializeObject(projectRoot, Formatting.Indented); Directory.CreateDirectory(Path.GetDirectoryName(projectJsonPath)); File.WriteAllText(projectJsonPath, projectJson + Environment.NewLine); } /* JProperties are encapsulated with "['" and "']" to assist with matching Paths which contain properties with a '.'. ie. frameworks.netcoreapp1.0 becomes frameworks.['netcoreapp1.0']. A match for a property without a '.' and unencapsulated still works. ie, we can still select frameworks.['netcoreapp1.0'] even if internally its path is frameworks.netcoreapp1.0. */ private static string NewtonsoftEscapeJProperty(string property) { if (string.IsNullOrWhiteSpace(property)) { return property; } if (!property.StartsWith("['") && !property.EndsWith("']")) { property = "['" + property + "']"; } return property; } } internal class PackageItem { public string Name { get; set; } public NuGetVersion Version { set { _version = value; } get { return _version; } } NuGetVersion _version; public PackageItem() { } public PackageItem(string name) { Name = name; } public PackageItem(string name, NuGetVersion version) { Name = name; Version = version; } public string GetVersionString() { return string.Join(".", _version.Major, _version.Minor, _version.Patch); } public TaskItem ToTaskItem() { TaskItem taskItem = new TaskItem(Name); taskItem.SetMetadata("Name", Name); taskItem.SetMetadata("Version", string.Join(".", Version.Major, Version.Minor, Version.Patch)); taskItem.SetMetadata("Prerelease", Version.Release); return taskItem; } } }
1
11,007
Is doing a straight string compare of the files the right way to determine this? I guess if you expect to be the only one writing this file it could work but it does seem like it might be a large string compare and if we are doing this hundreds of times that might cause some memory issues.
dotnet-buildtools
.cs
@@ -431,7 +431,7 @@ static void set_client_priority() { #ifdef __linux__ char buf[1024]; snprintf(buf, sizeof(buf), "ionice -c 3 -p %d", getpid()); - system(buf); + if (!system(buf)) {} #endif }
1
// This file is part of BOINC. // http://boinc.berkeley.edu // Copyright (C) 2018 University of California // // BOINC is free software; you can redistribute it and/or modify it // under the terms of the GNU Lesser General Public License // as published by the Free Software Foundation, // either version 3 of the License, or (at your option) any later version. // // BOINC is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. // See the GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with BOINC. If not, see <http://www.gnu.org/licenses/>. #ifdef __APPLE__ #include <Carbon/Carbon.h> #endif #ifdef _WIN32 #include "boinc_win.h" #else #include "config.h" #include <unistd.h> #include <csignal> #include <cstdio> #include <cstdlib> #include <ctime> #include <cstdarg> #include <cstring> #include <cmath> #if HAVE_SYS_SOCKET_H #include <sys/socket.h> #endif #endif #ifdef _MSC_VER #define snprintf _snprintf #endif #ifdef __EMX__ #define INCL_DOS #include <os2.h> #endif #include "cpp.h" #include "error_numbers.h" #include "filesys.h" #include "parse.h" #include "str_replace.h" #include "str_util.h" #include "util.h" #ifdef _WIN32 #include "run_app_windows.h" #endif #include "app_config.h" #include "async_file.h" #include "client_msgs.h" #include "cs_notice.h" #include "cs_proxy.h" #include "cs_trickle.h" #include "file_names.h" #include "hostinfo.h" #include "http_curl.h" #include "network.h" #include "project.h" #include "result.h" #include "sandbox.h" #include "shmem.h" #include "client_state.h" using std::max; CLIENT_STATE gstate; COPROCS coprocs; #ifndef SIM #ifdef NEW_CPU_THROTTLE THREAD_LOCK client_mutex; THREAD throttle_thread; #endif #endif CLIENT_STATE::CLIENT_STATE() : lookup_website_op(&gui_http), get_current_version_op(&gui_http), get_project_list_op(&gui_http), acct_mgr_op(&gui_http), lookup_login_token_op(&gui_http) { http_ops = new HTTP_OP_SET(); file_xfers = new FILE_XFER_SET(http_ops); pers_file_xfers = new PERS_FILE_XFER_SET(file_xfers); #ifndef SIM scheduler_op = new SCHEDULER_OP(http_ops); #endif time_stats.init(); client_state_dirty = false; old_major_version = 0; old_minor_version = 0; old_release = 0; clock_change = false; check_all_logins = false; user_active = false; cmdline_gui_rpc_port = 0; run_cpu_benchmarks = false; file_xfer_giveup_period = PERS_GIVEUP; had_or_requested_work = false; tasks_suspended = false; tasks_throttled = false; network_suspended = false; file_xfers_suspended = false; suspend_reason = 0; network_suspend_reason = 0; core_client_version.major = BOINC_MAJOR_VERSION; core_client_version.minor = BOINC_MINOR_VERSION; core_client_version.release = BOINC_RELEASE; #ifdef BOINC_PRERELEASE core_client_version.prerelease = true; #else core_client_version.prerelease = false; #endif safe_strcpy(language, ""); safe_strcpy(client_brand, ""); exit_after_app_start_secs = 0; app_started = 0; cmdline_dir = false; exit_before_upload = false; run_test_app = false; #ifndef _WIN32 boinc_project_gid = 0; #endif show_projects = false; safe_strcpy(detach_project_url, ""); safe_strcpy(reset_project_url, ""); safe_strcpy(update_prefs_url, ""); safe_strcpy(main_host_venue, ""); safe_strcpy(attach_project_url, ""); safe_strcpy(attach_project_auth, ""); cpu_run_mode.set(RUN_MODE_AUTO, 0); gpu_run_mode.set(RUN_MODE_AUTO, 0); network_run_mode.set(RUN_MODE_AUTO, 0); started_by_screensaver = false; requested_exit = false; os_requested_suspend = false; os_requested_suspend_time = 0; cleanup_completed = false; in_abort_sequence = false; master_fetch_period = MASTER_FETCH_PERIOD; retry_cap = RETRY_CAP; master_fetch_retry_cap = MASTER_FETCH_RETRY_CAP; master_fetch_interval = MASTER_FETCH_INTERVAL; sched_retry_delay_min = SCHED_RETRY_DELAY_MIN; sched_retry_delay_max = SCHED_RETRY_DELAY_MAX; pers_retry_delay_min = PERS_RETRY_DELAY_MIN; pers_retry_delay_max = PERS_RETRY_DELAY_MAX; pers_giveup = PERS_GIVEUP; executing_as_daemon = false; redirect_io = false; disable_graphics = false; cant_write_state_file = false; ncpus = 1; benchmarks_running = false; client_disk_usage = 0.0; total_disk_usage = 0.0; device_status_time = 0; rec_interval_start = 0; total_cpu_time_this_rec_interval = 0.0; must_enforce_cpu_schedule = false; must_schedule_cpus = true; must_check_work_fetch = true; retry_shmem_time = 0; no_gui_rpc = false; autologin_in_progress = false; autologin_fetching_project_list = false; gui_rpc_unix_domain = false; new_version_check_time = 0; all_projects_list_check_time = 0; client_version_check_url = DEFAULT_VERSION_CHECK_URL; detach_console = false; #ifdef SANDBOX g_use_sandbox = true; // User can override with -insecure command-line arg #endif launched_by_manager = false; run_by_updater = false; now = 0.0; initialized = false; last_wakeup_time = dtime(); device_status_time = 0; #ifdef _WIN32 have_sysmon_msg = false; #endif } void CLIENT_STATE::show_host_info() { char buf[256], buf2[256]; msg_printf(NULL, MSG_INFO, "Host name: %s", host_info.domain_name ); nbytes_to_string(host_info.m_cache, 0, buf, sizeof(buf)); msg_printf(NULL, MSG_INFO, "Processor: %d %s %s", host_info.p_ncpus, host_info.p_vendor, host_info.p_model ); if (ncpus != host_info.p_ncpus) { msg_printf(NULL, MSG_INFO, "Using %d CPUs", ncpus); } #if 0 if (host_info.m_cache > 0) { msg_printf(NULL, MSG_INFO, "Processor: %s cache", buf ); } #endif msg_printf(NULL, MSG_INFO, "Processor features: %s", host_info.p_features ); #ifdef __APPLE__ buf[0] = '\0'; FILE *f = popen("sw_vers -productVersion", "r"); fgets(buf, sizeof(buf), f); strip_whitespace(buf); pclose(f); msg_printf(NULL, MSG_INFO, "OS: Mac OS X %s (%s %s)", buf, host_info.os_name, host_info.os_version ); #else msg_printf(NULL, MSG_INFO, "OS: %s: %s", host_info.os_name, host_info.os_version ); #endif nbytes_to_string(host_info.m_nbytes, 0, buf, sizeof(buf)); nbytes_to_string(host_info.m_swap, 0, buf2, sizeof(buf2)); msg_printf(NULL, MSG_INFO, "Memory: %s physical, %s virtual", buf, buf2 ); nbytes_to_string(host_info.d_total, 0, buf, sizeof(buf)); nbytes_to_string(host_info.d_free, 0, buf2, sizeof(buf2)); msg_printf(NULL, MSG_INFO, "Disk: %s total, %s free", buf, buf2); int tz = host_info.timezone/3600; msg_printf(0, MSG_INFO, "Local time is UTC %s%d hours", tz<0?"":"+", tz ); #ifdef _WIN64 if (host_info.wsl_available) { msg_printf(NULL, MSG_INFO, "WSL detected:"); for (size_t i = 0; i < host_info.wsls.wsls.size(); ++i) { const WSL& wsl = host_info.wsls.wsls[i]; if (wsl.is_default) { msg_printf(NULL, MSG_INFO, " [%s] (default): %s (%s)", wsl.distro_name.c_str(), wsl.name.c_str(), wsl.version.c_str() ); } else { msg_printf(NULL, MSG_INFO, " [%s]: %s (%s)", wsl.distro_name.c_str(), wsl.name.c_str(), wsl.version.c_str() ); } } } else { msg_printf(NULL, MSG_INFO, "No WSL found."); } #endif if (strlen(host_info.virtualbox_version)) { msg_printf(NULL, MSG_INFO, "VirtualBox version: %s", host_info.virtualbox_version ); } else { #if defined (_WIN32) && !defined(_WIN64) if (!strcmp(get_primary_platform(), "windows_x86_64")) { msg_printf(NULL, MSG_USER_ALERT, "Can't detect VirtualBox because this is a 32-bit version of BOINC; to fix, please install a 64-bit version." ); } #endif } } int rsc_index(const char* name) { const char* nm = strcmp(name, "CUDA")?name:GPU_TYPE_NVIDIA; // handle old state files for (int i=0; i<coprocs.n_rsc; i++) { if (!strcmp(nm, coprocs.coprocs[i].type)) { return i; } } return -1; } // used in XML and COPROC::type // const char* rsc_name(int i) { return coprocs.coprocs[i].type; } // user-friendly version // const char* rsc_name_long(int i) { int num = coproc_type_name_to_num(coprocs.coprocs[i].type); if (num >= 0) return proc_type_name(num); // CPU, NVIDIA GPU, AMD GPU or Intel GPU return coprocs.coprocs[i].type; // Some other type } #ifndef SIM // alert user if any jobs need more RAM than available // (based on RAM estimate, not measured size) // static void check_too_large_jobs() { unsigned int i, j; double m = gstate.max_available_ram(); for (i=0; i<gstate.projects.size(); i++) { PROJECT* p = gstate.projects[i]; bool found = false; for (j=0; j<gstate.results.size(); j++) { RESULT* rp = gstate.results[j]; if (rp->project == p && rp->wup->rsc_memory_bound > m) { found = true; break; } } if (found) { msg_printf(p, MSG_USER_ALERT, _("Some tasks need more memory than allowed by your preferences. Please check the preferences.") ); } } } #endif // Something has failed N times. // Calculate an exponential backoff between MIN and MAX // double calculate_exponential_backoff(int n, double MIN, double MAX) { double x = pow(2, (double)n); x *= MIN; if (x > MAX) x = MAX; x *= (.5 + .5*drand()); return x; } #ifndef SIM void CLIENT_STATE::set_now() { double x = dtime(); // if time went backward significantly, clear delays // clock_change = false; if (x < (now-60)) { clock_change = true; msg_printf(NULL, MSG_INFO, "New system time (%.0f) < old system time (%.0f); clearing timeouts", x, now ); clear_absolute_times(); } #ifdef _WIN32 // On Win, check for evidence that we're awake after a suspension // (in case we missed the event announcing this) // if (os_requested_suspend) { if (x > now+10) { msg_printf(0, MSG_INFO, "Resuming after OS suspension"); os_requested_suspend = false; } else if (x > os_requested_suspend_time + 300) { msg_printf(0, MSG_INFO, "Resuming after OS suspension"); os_requested_suspend = false; } } #endif now = x; } // Check if version or platform has changed; // if so we're running a different client than before. // bool CLIENT_STATE::is_new_client() { bool new_client = false; if ((core_client_version.major != old_major_version) || (core_client_version.minor != old_minor_version) || (core_client_version.release != old_release) ) { msg_printf(NULL, MSG_INFO, "Version change (%d.%d.%d -> %d.%d.%d)", old_major_version, old_minor_version, old_release, core_client_version.major, core_client_version.minor, core_client_version.release ); new_client = true; } if (statefile_platform_name.size() && strcmp(get_primary_platform(), statefile_platform_name.c_str())) { msg_printf(NULL, MSG_INFO, "Platform changed from %s to %s", statefile_platform_name.c_str(), get_primary_platform() ); new_client = true; } return new_client; } #ifdef _WIN32 typedef DWORD (WINAPI *STP)(HANDLE, DWORD); #endif static void set_client_priority() { #ifdef _WIN32 STP stp = (STP) GetProcAddress(GetModuleHandle(_T("kernel32.dll")), "SetThreadPriority"); if (!stp) return; if (stp(GetCurrentThread(), THREAD_MODE_BACKGROUND_BEGIN)) { msg_printf(NULL, MSG_INFO, "Running at background priority"); } else { msg_printf(NULL, MSG_INFO, "Failed to set background priority"); } #endif #ifdef __linux__ char buf[1024]; snprintf(buf, sizeof(buf), "ionice -c 3 -p %d", getpid()); system(buf); #endif } int CLIENT_STATE::init() { int retval; unsigned int i; char buf[256]; PROJECT* p; srand((unsigned int)time(0)); now = dtime(); #ifdef ANDROID device_status_time = dtime(); #endif scheduler_op->url_random = drand(); notices.init(); daily_xfer_history.init(); time_stats.init(); detect_platforms(); time_stats.start(); msg_printf( NULL, MSG_INFO, "Starting BOINC client version %d.%d.%d for %s%s", core_client_version.major, core_client_version.minor, core_client_version.release, HOSTTYPE, #ifdef _DEBUG " (DEBUG)" #else "" #endif ); if (core_client_version.prerelease) { msg_printf(NULL, MSG_INFO, "This a development version of BOINC and may not function properly" ); } log_flags.show(); msg_printf(NULL, MSG_INFO, "Libraries: %s", curl_version()); if (cc_config.lower_client_priority) { set_client_priority(); } if (executing_as_daemon) { #ifdef _WIN32 msg_printf(NULL, MSG_INFO, "Running as a daemon (GPU computing disabled)"); #else msg_printf(NULL, MSG_INFO, "Running as a daemon"); #endif } relative_to_absolute("", buf); msg_printf(NULL, MSG_INFO, "Data directory: %s", buf); #ifdef _WIN32 DWORD buf_size = sizeof(buf); LPTSTR pbuf = buf; GetUserName(pbuf, &buf_size); msg_printf(NULL, MSG_INFO, "Running under account %s", pbuf); #endif FILE* f = fopen(CLIENT_BRAND_FILENAME, "r"); if (f) { fgets(client_brand, sizeof(client_brand), f); strip_whitespace(client_brand); msg_printf(NULL, MSG_INFO, "Client brand: %s", client_brand); fclose(f); } // parse keyword file if present // f = fopen(KEYWORD_FILENAME, "r"); if (f) { MIOFILE mf; mf.init_file(f); XML_PARSER xp(&mf); retval = keywords.parse(xp); if (!retval) keywords.present = true; fclose(f); #if 0 std::map<int, KEYWORD>::iterator it; for (it = keywords.keywords.begin(); it != keywords.keywords.end(); it++) { int id = it->first; KEYWORD& kw = it->second; printf("keyword %d: %s\n", id, kw.name.c_str()); } #endif } parse_account_files(); parse_statistics_files(); // check for GPUs. // coprocs.bound_counts(); // show GPUs described in cc_config.xml if (!cc_config.no_gpus #ifdef _WIN32 && !executing_as_daemon #endif ) { vector<string> descs; vector<string> warnings; coprocs.get( cc_config.use_all_gpus, descs, warnings, cc_config.ignore_gpu_instance ); for (i=0; i<descs.size(); i++) { msg_printf(NULL, MSG_INFO, "%s", descs[i].c_str()); } if (log_flags.coproc_debug) { for (i=0; i<warnings.size(); i++) { msg_printf(NULL, MSG_INFO, "[coproc] %s", warnings[i].c_str()); } } #if 0 msg_printf(NULL, MSG_INFO, "Faking an NVIDIA GPU"); coprocs.nvidia.fake(18000, 512*MEGA, 490*MEGA, 2); #endif #if 0 msg_printf(NULL, MSG_INFO, "Faking an ATI GPU"); coprocs.ati.fake(512*MEGA, 256*MEGA, 2); #endif #if 0 msg_printf(NULL, MSG_INFO, "Faking an Intel GPU"); coprocs.intel_gpu.fake(512*MEGA, 256*MEGA, 2); #endif #if 0 fake_opencl_gpu("Mali-T628"); #endif } if (coprocs.have_nvidia()) { if (rsc_index(GPU_TYPE_NVIDIA)>0) { msg_printf(NULL, MSG_INFO, "NVIDIA GPU info taken from cc_config.xml"); } else { coprocs.add(coprocs.nvidia); } } if (coprocs.have_ati()) { if (rsc_index(GPU_TYPE_ATI)>0) { msg_printf(NULL, MSG_INFO, "ATI GPU info taken from cc_config.xml"); } else { coprocs.add(coprocs.ati); } } if (coprocs.have_intel_gpu()) { if (rsc_index(GPU_TYPE_INTEL)>0) { msg_printf(NULL, MSG_INFO, "INTEL GPU info taken from cc_config.xml"); } else { coprocs.add(coprocs.intel_gpu); } } coprocs.add_other_coproc_types(); host_info.coprocs = coprocs; if (coprocs.none() ) { msg_printf(NULL, MSG_INFO, "No usable GPUs found"); } set_no_rsc_config(); // check for app_info.xml file in project dirs. // If find, read app info from there, set project.anonymous_platform // - this must follow coproc.get() (need to know if GPUs are present) // - this is being done before CPU speed has been read from state file, // so we'll need to patch up avp->flops later; // check_anonymous(); // first time, set p_fpops nonzero to avoid div by zero // cpu_benchmarks_set_defaults(); // Parse the client state file, // ignoring any <project> tags (and associated stuff) // for projects with no account file // parse_state_file(); bool new_client = is_new_client(); // this follows parse_state_file() since we need to have read // domain_name for Android // host_info.get_host_info(true); // clear the VM extensions disabled flag. // It's possible that the user enabled them since the last VM failure, // or that the last failure was specious. // host_info.p_vm_extensions_disabled = false; set_ncpus(); show_host_info(); // this follows parse_state_file() because that's where we read project names // sort_projects_by_name(); // check for app_config.xml files in project dirs // check_app_config(); show_app_config(); // this needs to go after parse_state_file() because // GPU exclusions refer to projects // cc_config.show(); // inform the user if there's a newer version of client // NOTE: this must be called AFTER // read_vc_config_file() // newer_version_startup_check(); // parse account files again, // now that we know the host's venue on each project // parse_account_files_venue(); // fill in p->no_X_apps for anon platform projects, // and check no_rsc_apps for others // for (i=0; i<projects.size(); i++) { p = projects[i]; if (p->anonymous_platform) { p->check_no_apps(); } else { p->check_no_rsc_apps(); } } // fill in avp->flops for anonymous platform projects // for (i=0; i<app_versions.size(); i++) { APP_VERSION* avp = app_versions[i]; if (!avp->flops) { if (!avp->avg_ncpus) { avp->avg_ncpus = 1; } avp->flops = avp->avg_ncpus * host_info.p_fpops; // for GPU apps, use conservative estimate: // assume GPU runs at 10X peak CPU speed // if (avp->gpu_usage.rsc_type) { avp->flops += avp->gpu_usage.usage * 10 * host_info.p_fpops; } } } process_gpu_exclusions(); check_clock_reset(); // Check to see if we can write the state file. // retval = write_state_file(); if (retval) { msg_printf_notice(NULL, false, "https://boinc.berkeley.edu/manager_links.php?target=notice&controlid=statefile", _("Couldn't write state file; check directory permissions") ); cant_write_state_file = true; } // scan user prefs; create file records // parse_preferences_for_user_files(); if (log_flags.state_debug) { print_summary(); } do_cmdline_actions(); // if new version of client, // - run CPU benchmarks // - get new project list // - contact reference site (or some project) to trigger firewall alert // if (new_client) { run_cpu_benchmarks = true; all_projects_list_check_time = 0; if (cc_config.dont_contact_ref_site) { if (projects.size() > 0) { projects[0]->master_url_fetch_pending = true; } } else { net_status.need_to_contact_reference_site = true; } } check_if_need_benchmarks(); read_global_prefs(); // do CPU scheduler and work fetch // request_schedule_cpus("Startup"); request_work_fetch("Startup"); work_fetch.init(); rec_interval_start = now; // set up the project and slot directories // msg_printf(NULL, MSG_INFO, "Setting up project and slot directories"); delete_old_slot_dirs(); retval = make_project_dirs(); if (retval) return retval; msg_printf(NULL, MSG_INFO, "Checking active tasks"); active_tasks.init(); active_tasks.report_overdue(); active_tasks.handle_upload_files(); had_or_requested_work = (active_tasks.active_tasks.size() > 0); // Just to be on the safe side; something may have been modified // set_client_state_dirty("init"); // check for initialization files // process_autologin(true); acct_mgr_info.init(); project_init.init(); log_show_projects(); // this must follow acct_mgr_info.init() // set up for handling GUI RPCs // if (!no_gui_rpc) { msg_printf(NULL, MSG_INFO, "Setting up GUI RPC socket"); if (gui_rpc_unix_domain) { retval = gui_rpcs.init_unix_domain(); } else { // When we're running at boot time, // it may be a few seconds before we can socket/bind/listen. // So retry a few times. // for (i=0; i<30; i++) { bool last_time = (i==29); retval = gui_rpcs.init_tcp(last_time); if (!retval) break; boinc_sleep(1.0); } } if (retval) return retval; } if (g_use_sandbox) get_project_gid(); #ifdef _WIN32 get_sandbox_account_service_token(); if (sandbox_account_service_token != NULL) { g_use_sandbox = true; } #endif msg_printf(NULL, MSG_INFO, "Checking presence of %d project files", (int)file_infos.size() ); check_file_existence(); if (!boinc_file_exists(ALL_PROJECTS_LIST_FILENAME)) { all_projects_list_check_time = 0; } #ifdef ENABLE_AUTO_UPDATE auto_update.init(); #endif http_ops->cleanup_temp_files(); // must parse env vars after parsing state file // otherwise items will get overwritten with state file info // parse_env_vars(); // do this after parsing env vars // proxy_info_startup(); if (!autologin_in_progress) { if (gstate.projects.size() == 0) { msg_printf(NULL, MSG_INFO, "This computer is not attached to any projects" ); } } // get list of BOINC projects occasionally, // and initialize notice RSS feeds // if (!cc_config.no_info_fetch) { all_projects_list_check(); notices.init_rss(); } // check for jobs with finish files // (i.e. they finished just as client was exiting) // active_tasks.check_for_finished_jobs(); // warn user if some jobs need more memory than available // check_too_large_jobs(); // initialize project priorities (for the GUI, in case we're suspended) // project_priority_init(false); #ifdef NEW_CPU_THROTTLE client_mutex.lock(); throttle_thread.run(throttler, NULL); #endif initialized = true; return 0; } static void double_to_timeval(double x, timeval& t) { t.tv_sec = (int)x; t.tv_usec = (int)(1000000*(x - (int)x)); } FDSET_GROUP curl_fds; FDSET_GROUP gui_rpc_fds; FDSET_GROUP all_fds; // Spend x seconds either doing I/O (if possible) or sleeping. // void CLIENT_STATE::do_io_or_sleep(double max_time) { int n; struct timeval tv; set_now(); double end_time = now + max_time; double time_remaining = max_time; while (1) { curl_fds.zero(); gui_rpc_fds.zero(); http_ops->get_fdset(curl_fds); all_fds = curl_fds; if (!autologin_in_progress) { gui_rpcs.get_fdset(gui_rpc_fds, all_fds); } bool have_async = have_async_file_op(); // prioritize network (including GUI RPC) over async file ops. // if there's a pending asynch file op, do the select with zero timeout; // otherwise do it for the remaining amount of time. double_to_timeval(have_async?0:time_remaining, tv); #ifdef NEW_CPU_THROTTLE client_mutex.unlock(); #endif if (all_fds.max_fd == -1) { boinc_sleep(time_remaining); n = 0; } else { n = select( all_fds.max_fd+1, &all_fds.read_fds, &all_fds.write_fds, &all_fds.exc_fds, &tv ); } //printf("select in %d out %d\n", all_fds.max_fd, n); #ifdef NEW_CPU_THROTTLE client_mutex.lock(); #endif // Note: curl apparently likes to have curl_multi_perform() // (called from net_xfers->got_select()) // called pretty often, even if no descriptors are enabled. // So do the "if (n==0) break" AFTER the got_selects(). http_ops->got_select(all_fds, time_remaining); gui_rpcs.got_select(all_fds); if (have_async) { // do the async file op only if no network activity // if (n == 0) { do_async_file_op(); } } else { if (n == 0) { break; } } set_now(); if (now > end_time) break; time_remaining = end_time - now; } } #define POLL_ACTION(name, func) \ do { if (func()) { \ ++actions; \ if (log_flags.poll_debug) { \ msg_printf(0, MSG_INFO, "[poll] CLIENT_STATE::poll_slow_events(): " #name "\n"); \ } \ } } while(0) // Poll the client's finite-state machines // possibly triggering state transitions. // Returns true if something happened // (in which case should call this again immediately) // bool CLIENT_STATE::poll_slow_events() { int actions = 0, retval; static int last_suspend_reason=0; static bool tasks_restarted = false; static bool first=true; double old_now = now; set_now(); if (cant_write_state_file) { return false; } if (now - old_now > POLL_INTERVAL*10) { if (log_flags.network_status_debug) { msg_printf(0, MSG_INFO, "[network_status] woke up after %f seconds", now - old_now ); } last_wakeup_time = now; } if (run_cpu_benchmarks && can_run_cpu_benchmarks()) { run_cpu_benchmarks = false; start_cpu_benchmarks(); } #ifdef _WIN32 if (have_sysmon_msg) { msg_printf(NULL, MSG_INFO, sysmon_msg); have_sysmon_msg = false; } #endif bool old_user_active = user_active; #ifdef ANDROID user_active = device_status.user_active; #else long idle_time = host_info.user_idle_time(check_all_logins); user_active = idle_time < global_prefs.idle_time_to_run * 60; #endif if (user_active != old_user_active) { request_schedule_cpus(user_active?"Not idle":"Idle"); } #if 0 // NVIDIA provides an interface for finding if a GPU is // running a graphics app. ATI doesn't as far as I know // if (host_info.have_nvidia() && user_active && !global_prefs.run_gpu_if_user_active) { if (host_info.coprocs.nvidia.check_running_graphics_app()) { request_schedule_cpus("GPU state change"); } } #endif #ifdef __APPLE__ // Mac screensaver launches client if not already running. // OS X quits screensaver when energy saver puts display to sleep, // but we want to keep crunching. // Also, user can start Mac screensaver by putting cursor in "hot corner" // so idletime may be very small initially. // If screensaver started client, this code tells client // to exit when user becomes active, accounting for all these factors. // if (started_by_screensaver && (idle_time < 30) && (getppid() == 1)) { // pid is 1 if parent has exited requested_exit = true; } // Exit if we were launched by Manager and it crashed. // if (launched_by_manager && (getppid() == 1)) { gstate.requested_exit = true; } #endif // active_tasks.get_memory_usage() sets variables needed by // check_suspend_processing(), so it must be called first. // active_tasks.get_memory_usage(); suspend_reason = check_suspend_processing(); // suspend or resume activities (but only if already did startup) // if (tasks_restarted) { if (suspend_reason) { if (!tasks_suspended) { show_suspend_tasks_message(suspend_reason); active_tasks.suspend_all(suspend_reason); } last_suspend_reason = suspend_reason; } else { if (tasks_suspended && !tasks_throttled) { resume_tasks(last_suspend_reason); } } } else if (first) { // if suspended, show message the first time around // first = false; if (suspend_reason) { show_suspend_tasks_message(suspend_reason); } } tasks_suspended = (suspend_reason != 0); if (benchmarks_running) { cpu_benchmarks_poll(); } int old_network_suspend_reason = network_suspend_reason; bool old_network_suspended = network_suspended; check_suspend_network(); if (network_suspend_reason) { if (!old_network_suspend_reason) { char buf[256]; if (network_suspended) { snprintf(buf, sizeof(buf), "Suspending network activity - %s", suspend_reason_string(network_suspend_reason) ); request_schedule_cpus("network suspended"); // in case any "needs_network" jobs are running } else { snprintf(buf, sizeof(buf), "Suspending file transfers - %s", suspend_reason_string(network_suspend_reason) ); } msg_printf(NULL, MSG_INFO, "%s", buf); pers_file_xfers->suspend(); } } else { if (old_network_suspend_reason) { if (old_network_suspended) { msg_printf(NULL, MSG_INFO, "Resuming network activity"); } else { msg_printf(NULL, MSG_INFO, "Resuming file transfers"); } request_schedule_cpus("network resumed"); } // if we're emerging from a bandwidth quota suspension, // add a random delay to avoid DDOS effect // if ( old_network_suspend_reason == SUSPEND_REASON_NETWORK_QUOTA_EXCEEDED && network_run_mode.get_current() == RUN_MODE_AUTO ) { pers_file_xfers->add_random_delay(3600); } } // NOTE: // The order of calls in the following lists generally doesn't matter, // except for the following: // must have: // active_tasks_poll // handle_finished_apps // schedule_cpus // in that order (active_tasks_poll() sets must_schedule_cpus, // and handle_finished_apps() must be done before schedule_cpus() check_project_timeout(); #ifdef ENABLE_AUTO_UPDATE auto_update.poll(); #endif POLL_ACTION(active_tasks , active_tasks.poll ); POLL_ACTION(garbage_collect , garbage_collect ); // remove PERS_FILE_XFERs (and associated FILE_XFERs and HTTP_OPs) // for unreferenced files POLL_ACTION(gui_http , gui_http.poll ); POLL_ACTION(gui_rpc_http , gui_rpcs.poll ); POLL_ACTION(trickle_up_ops, trickle_up_poll); // scan FILE_INFOS and create PERS_FILE_XFERs // for PERS_FILE_XFERS that are done, delete them if (!network_suspended && suspend_reason != SUSPEND_REASON_BENCHMARKS) { // don't initiate network activity if we're doing CPU benchmarks net_status.poll(); daily_xfer_history.poll(); POLL_ACTION(acct_mgr , acct_mgr_info.poll ); POLL_ACTION(file_xfers , file_xfers->poll ); // check for file xfer completion; don't delete anything POLL_ACTION(pers_file_xfers , pers_file_xfers->poll ); // poll PERS_FILE_XFERS // if we need to start xfer, creat FILE_XFER and init // if FILE_XFER is complete // handle transient and permanent failures // delete the FILE_XFER if (!cc_config.no_info_fetch) { POLL_ACTION(rss_feed_op , rss_feed_op.poll ); } } POLL_ACTION(create_and_delete_pers_file_xfers , create_and_delete_pers_file_xfers ); POLL_ACTION(handle_finished_apps , handle_finished_apps ); POLL_ACTION(update_results , update_results ); if (!tasks_suspended) { POLL_ACTION(schedule_cpus, schedule_cpus ); tasks_restarted = true; } if (!network_suspended) { POLL_ACTION(scheduler_rpc , scheduler_rpc_poll ); } retval = write_state_file_if_needed(); if (retval) { msg_printf(NULL, MSG_INTERNAL_ERROR, "Couldn't write state file: %s; giving up", boincerror(retval) ); exit(EXIT_STATEFILE_WRITE); } if (log_flags.poll_debug) { msg_printf(0, MSG_INFO, "[poll] CLIENT_STATE::do_something(): End poll: %d tasks active\n", actions ); } if (actions > 0) { return true; } else { time_stats.update(suspend_reason, gpu_suspend_reason); // on some systems, DNS resolution only starts working // a few minutes after system boot. // If it didn't work before, try it again. // if (!strlen(host_info.domain_name)) { host_info.get_local_network_info(); } return false; } } #endif // ifndef SIM // See if the project specified by master_url already exists // in the client state record. Ignore any trailing "/" characters // PROJECT* CLIENT_STATE::lookup_project(const char* master_url) { int len1, len2; char *mu; len1 = (int)strlen(master_url); if (master_url[strlen(master_url)-1] == '/') len1--; for (unsigned int i=0; i<projects.size(); i++) { mu = projects[i]->master_url; len2 = (int)strlen(mu); if (mu[strlen(mu)-1] == '/') len2--; if (!strncmp(master_url, projects[i]->master_url, max(len1,len2))) { return projects[i]; } } return 0; } APP* CLIENT_STATE::lookup_app(PROJECT* p, const char* name) { for (unsigned int i=0; i<apps.size(); i++) { APP* app = apps[i]; if (app->project == p && !strcmp(name, app->name)) return app; } return 0; } RESULT* CLIENT_STATE::lookup_result(PROJECT* p, const char* name) { for (unsigned int i=0; i<results.size(); i++) { RESULT* rp = results[i]; if (rp->project == p && !strcmp(name, rp->name)) return rp; } return 0; } WORKUNIT* CLIENT_STATE::lookup_workunit(PROJECT* p, const char* name) { for (unsigned int i=0; i<workunits.size(); i++) { WORKUNIT* wup = workunits[i]; if (wup->project == p && !strcmp(name, wup->name)) return wup; } return 0; } APP_VERSION* CLIENT_STATE::lookup_app_version( APP* app, char* platform, int version_num, char* plan_class ) { for (unsigned int i=0; i<app_versions.size(); i++) { APP_VERSION* avp = app_versions[i]; if (avp->app != app) continue; if (version_num != avp->version_num) continue; if (strcmp(avp->platform, platform)) continue; if (strcmp(avp->plan_class, plan_class)) continue; return avp; } return 0; } FILE_INFO* CLIENT_STATE::lookup_file_info(PROJECT* p, const char* name) { for (unsigned int i=0; i<file_infos.size(); i++) { FILE_INFO* fip = file_infos[i]; if (fip->project == p && !strcmp(fip->name, name)) { return fip; } } return 0; } // functions to create links between state objects // (which, in their XML form, reference one another by name) // Return nonzero if already in client state. // int CLIENT_STATE::link_app(PROJECT* p, APP* app) { if (lookup_app(p, app->name)) return ERR_NOT_UNIQUE; app->project = p; return 0; } int CLIENT_STATE::link_file_info(PROJECT* p, FILE_INFO* fip) { if (lookup_file_info(p, fip->name)) return ERR_NOT_UNIQUE; fip->project = p; return 0; } int CLIENT_STATE::link_app_version(PROJECT* p, APP_VERSION* avp) { APP* app; avp->project = p; app = lookup_app(p, avp->app_name); if (!app) { msg_printf(p, MSG_INTERNAL_ERROR, "State file error: bad application name %s", avp->app_name ); return ERR_NOT_FOUND; } avp->app = app; if (lookup_app_version(app, avp->platform, avp->version_num, avp->plan_class)) { #ifndef SIM msg_printf(p, MSG_INTERNAL_ERROR, "State file error: duplicate app version: %s %s %d %s", avp->app_name, avp->platform, avp->version_num, avp->plan_class ); #endif return ERR_NOT_UNIQUE; } #ifndef SIM safe_strcpy(avp->graphics_exec_path, ""); safe_strcpy(avp->graphics_exec_file, ""); for (unsigned int i=0; i<avp->app_files.size(); i++) { FILE_REF& file_ref = avp->app_files[i]; FILE_INFO* fip = lookup_file_info(p, file_ref.file_name); if (!fip) { msg_printf(p, MSG_INTERNAL_ERROR, "State file error: missing application file %s", file_ref.file_name ); return ERR_NOT_FOUND; } if (!strcmp(file_ref.open_name, GRAPHICS_APP_FILENAME)) { char relpath[MAXPATHLEN], path[MAXPATHLEN]; get_pathname(fip, relpath, sizeof(relpath)); relative_to_absolute(relpath, path); safe_strcpy(avp->graphics_exec_path, path); safe_strcpy(avp->graphics_exec_file, fip->name); } // any file associated with an app version must be signed // if (!cc_config.unsigned_apps_ok) { fip->signature_required = true; } file_ref.file_info = fip; } #endif return 0; } int CLIENT_STATE::link_file_ref(PROJECT* p, FILE_REF* file_refp) { FILE_INFO* fip; fip = lookup_file_info(p, file_refp->file_name); if (!fip) { msg_printf(p, MSG_INTERNAL_ERROR, "State file error: missing file %s", file_refp->file_name ); return ERR_NOT_FOUND; } file_refp->file_info = fip; return 0; } int CLIENT_STATE::link_workunit(PROJECT* p, WORKUNIT* wup) { APP* app; unsigned int i; int retval; app = lookup_app(p, wup->app_name); if (!app) { msg_printf(p, MSG_INTERNAL_ERROR, "State file error: missing application %s", wup->app_name ); return ERR_NOT_FOUND; } wup->project = p; wup->app = app; for (i=0; i<wup->input_files.size(); i++) { retval = link_file_ref(p, &wup->input_files[i]); if (retval) { msg_printf(p, MSG_INTERNAL_ERROR, "State file error: missing input file %s\n", wup->input_files[i].file_name ); return retval; } } return 0; } int CLIENT_STATE::link_result(PROJECT* p, RESULT* rp) { WORKUNIT* wup; unsigned int i; int retval; wup = lookup_workunit(p, rp->wu_name); if (!wup) { msg_printf(p, MSG_INTERNAL_ERROR, "State file error: missing task %s\n", rp->wu_name ); return ERR_NOT_FOUND; } rp->project = p; rp->wup = wup; rp->app = wup->app; for (i=0; i<rp->output_files.size(); i++) { retval = link_file_ref(p, &rp->output_files[i]); if (retval) return retval; } return 0; } // Print debugging information about how many projects/files/etc // are currently in the client state record // void CLIENT_STATE::print_summary() { unsigned int i; double t; msg_printf(0, MSG_INFO, "[state] Client state summary:"); msg_printf(0, MSG_INFO, "%d projects:", (int)projects.size()); for (i=0; i<projects.size(); i++) { t = projects[i]->min_rpc_time; if (t) { msg_printf(0, MSG_INFO, " %s min RPC %f.0 seconds from now", projects[i]->master_url, t-now); } else { msg_printf(0, MSG_INFO, " %s", projects[i]->master_url); } } msg_printf(0, MSG_INFO, "%d file_infos:", (int)file_infos.size()); for (i=0; i<file_infos.size(); i++) { msg_printf(0, MSG_INFO, " %s status:%d %s", file_infos[i]->name, file_infos[i]->status, file_infos[i]->pers_file_xfer?"active":"inactive"); } msg_printf(0, MSG_INFO, "%d app_versions", (int)app_versions.size()); for (i=0; i<app_versions.size(); i++) { msg_printf(0, MSG_INFO, " %s %d", app_versions[i]->app_name, app_versions[i]->version_num); } msg_printf(0, MSG_INFO, "%d workunits", (int)workunits.size()); for (i=0; i<workunits.size(); i++) { msg_printf(0, MSG_INFO, " %s", workunits[i]->name); } msg_printf(0, MSG_INFO, "%d results", (int)results.size()); for (i=0; i<results.size(); i++) { msg_printf(0, MSG_INFO, " %s state:%d", results[i]->name, results[i]->state()); } msg_printf(0, MSG_INFO, "%d persistent file xfers", (int)pers_file_xfers->pers_file_xfers.size()); for (i=0; i<pers_file_xfers->pers_file_xfers.size(); i++) { msg_printf(0, MSG_INFO, " %s http op state: %d", pers_file_xfers->pers_file_xfers[i]->fip->name, (pers_file_xfers->pers_file_xfers[i]->fxp?pers_file_xfers->pers_file_xfers[i]->fxp->http_op_state:-1)); } msg_printf(0, MSG_INFO, "%d active tasks", (int)active_tasks.active_tasks.size()); for (i=0; i<active_tasks.active_tasks.size(); i++) { msg_printf(0, MSG_INFO, " %s", active_tasks.active_tasks[i]->result->name); } } int CLIENT_STATE::nresults_for_project(PROJECT* p) { int n=0; for (unsigned int i=0; i<results.size(); i++) { if (results[i]->project == p) n++; } return n; } bool CLIENT_STATE::abort_unstarted_late_jobs() { bool action = false; if (now < 1235668593) return false; // skip if user reset system clock for (unsigned int i=0; i<results.size(); i++) { RESULT* rp = results[i]; if (!rp->is_not_started()) continue; if (rp->report_deadline > now) continue; msg_printf(rp->project, MSG_INFO, "Aborting task %s; not started and deadline has passed", rp->name ); rp->abort_inactive(EXIT_UNSTARTED_LATE); action = true; } return action; } bool CLIENT_STATE::garbage_collect() { bool action; static double last_time=0; if (!clock_change && now - last_time < GARBAGE_COLLECT_PERIOD) return false; last_time = gstate.now; action = abort_unstarted_late_jobs(); if (action) return true; action = garbage_collect_always(); if (action) return true; #ifndef SIM // Detach projects that are marked for detach when done // and are in fact done (have no results). // This is done here (not in garbage_collect_always()) // because detach_project() calls garbage_collect_always(), // and we need to avoid infinite recursion // while (1) { bool found = false; for (unsigned i=0; i<projects.size(); i++) { PROJECT* p = projects[i]; if (p->detach_when_done && !nresults_for_project(p)) { // If we're using an AM, // wait until the next successful RPC to detach project, // so the AM will be informed of its work done. // if (!p->attached_via_acct_mgr) { msg_printf(p, MSG_INFO, "Detaching - no more tasks"); detach_project(p); action = true; found = true; } } } if (!found) break; } #endif return action; } // delete unneeded records and files // bool CLIENT_STATE::garbage_collect_always() { unsigned int i, j; int failnum; FILE_INFO* fip; RESULT* rp; WORKUNIT* wup; APP_VERSION* avp, *avp2; vector<RESULT*>::iterator result_iter; vector<WORKUNIT*>::iterator wu_iter; vector<FILE_INFO*>::iterator fi_iter; vector<APP_VERSION*>::iterator avp_iter; bool action = false, found; string error_msgs; PROJECT* project; // zero references counts on WUs, FILE_INFOs and APP_VERSIONs for (i=0; i<workunits.size(); i++) { wup = workunits[i]; wup->ref_cnt = 0; } for (i=0; i<file_infos.size(); i++) { fip = file_infos[i]; fip->ref_cnt = 0; } for (i=0; i<app_versions.size(); i++) { avp = app_versions[i]; avp->ref_cnt = 0; } // reference-count user and project files // for (i=0; i<projects.size(); i++) { project = projects[i]; for (j=0; j<project->user_files.size(); j++) { project->user_files[j].file_info->ref_cnt++; } for (j=0; j<project->project_files.size(); j++) { project->project_files[j].file_info->ref_cnt++; } } #ifdef ENABLE_AUTO_UPDATE // reference-count auto update files // if (auto_update.present) { for (i=0; i<auto_update.file_refs.size(); i++) { auto_update.file_refs[i].file_info->ref_cnt++; } } #endif // Scan through RESULTs. // delete RESULTs that have been reported and acked. // Check for results whose WUs had download failures // Check for results that had upload failures // Reference-count output files // Reference-count WUs // result_iter = results.begin(); while (result_iter != results.end()) { rp = *result_iter; #ifndef SIM if (rp->got_server_ack) { // see if - for some reason - there's an active task // for this result. don't want to create dangling ptr. // ACTIVE_TASK* atp = active_tasks.lookup_result(rp); if (atp) { msg_printf(rp->project, MSG_INTERNAL_ERROR, "garbage_collect(); still have active task for acked result %s; state %d", rp->name, atp->task_state() ); atp->abort_task( EXIT_ABORTED_BY_CLIENT, "Got ack for job that's still active" ); } else { if (log_flags.state_debug) { msg_printf(0, MSG_INFO, "[state] garbage_collect: deleting result %s\n", rp->name ); } add_old_result(*rp); delete rp; result_iter = results.erase(result_iter); action = true; continue; } } #endif // See if the files for this result's workunit had // any errors (download failure, MD5, RSA, etc) // and we don't already have an error for this result // if (!rp->ready_to_report) { wup = rp->wup; if (wup->had_download_failure(failnum)) { wup->get_file_errors(error_msgs); string err_msg = "WU download error: " + error_msgs; report_result_error(*rp, err_msg.c_str()); } else if (rp->avp && rp->avp->had_download_failure(failnum)) { rp->avp->get_file_errors(error_msgs); string err_msg = "app_version download error: " + error_msgs; report_result_error(*rp, err_msg.c_str()); } } bool found_error = false; string error_str; for (i=0; i<rp->output_files.size(); i++) { // If one of the output files had an upload failure, // mark the result as done and report the error. // if (!rp->ready_to_report) { fip = rp->output_files[i].file_info; if (fip->had_failure(failnum)) { string msg; fip->failure_message(msg); found_error = true; error_str += msg; } } rp->output_files[i].file_info->ref_cnt++; } #ifndef SIM if (found_error) { // check for process still running; this can happen // e.g. if an intermediate upload fails // ACTIVE_TASK* atp = active_tasks.lookup_result(rp); if (atp) { switch (atp->task_state()) { case PROCESS_EXECUTING: case PROCESS_SUSPENDED: atp->abort_task(ERR_RESULT_UPLOAD, "upload failure"); } } string err_msg = "upload failure: " + error_str; report_result_error(*rp, err_msg.c_str()); } #endif rp->avp->ref_cnt++; rp->wup->ref_cnt++; ++result_iter; } // delete WORKUNITs not referenced by any in-progress result; // reference-count files and APP_VERSIONs referred to by other WUs // wu_iter = workunits.begin(); while (wu_iter != workunits.end()) { wup = *wu_iter; if (wup->ref_cnt == 0) { if (log_flags.state_debug) { msg_printf(0, MSG_INFO, "[state] CLIENT_STATE::garbage_collect(): deleting workunit %s\n", wup->name ); } delete wup; wu_iter = workunits.erase(wu_iter); action = true; } else { for (i=0; i<wup->input_files.size(); i++) { wup->input_files[i].file_info->ref_cnt++; } ++wu_iter; } } // go through APP_VERSIONs; // delete any not referenced by any WORKUNIT // and superceded by a more recent version // for the same platform and plan class // avp_iter = app_versions.begin(); while (avp_iter != app_versions.end()) { avp = *avp_iter; if (avp->ref_cnt == 0) { found = false; for (j=0; j<app_versions.size(); j++) { avp2 = app_versions[j]; if (avp2->app == avp->app && avp2->version_num > avp->version_num && (!strcmp(avp2->plan_class, avp->plan_class)) && (!strcmp(avp2->platform, avp->platform)) ) { found = true; break; } } if (found) { delete avp; avp_iter = app_versions.erase(avp_iter); action = true; } else { ++avp_iter; } } else { ++avp_iter; } } // Then go through remaining APP_VERSIONs, // bumping refcnt of associated files. // for (i=0; i<app_versions.size(); i++) { avp = app_versions[i]; for (j=0; j<avp->app_files.size(); j++) { avp->app_files[j].file_info->ref_cnt++; } } // reference-count sticky files not marked for deletion // for (fi_iter = file_infos.begin(); fi_iter!=file_infos.end(); ++fi_iter) { fip = *fi_iter; if (fip->sticky_expire_time && now > fip->sticky_expire_time) { fip->sticky = false; fip->sticky_expire_time = 0; } if (!fip->sticky) continue; if (fip->status < 0) continue; fip->ref_cnt++; } // remove PERS_FILE_XFERs (and associated FILE_XFERs and HTTP_OPs) // for unreferenced files // vector<PERS_FILE_XFER*>::iterator pfx_iter; pfx_iter = pers_file_xfers->pers_file_xfers.begin(); while (pfx_iter != pers_file_xfers->pers_file_xfers.end()) { PERS_FILE_XFER* pfx = *pfx_iter; if (pfx->fip->ref_cnt == 0) { pfx->suspend(); delete pfx; pfx_iter = pers_file_xfers->pers_file_xfers.erase(pfx_iter); } else { ++pfx_iter; } } // delete FILE_INFOs (and corresponding files) that are not referenced // fi_iter = file_infos.begin(); while (fi_iter != file_infos.end()) { fip = *fi_iter; if (fip->ref_cnt==0) { fip->delete_file(); if (log_flags.state_debug) { msg_printf(0, MSG_INFO, "[state] CLIENT_STATE::garbage_collect(): deleting file %s\n", fip->name ); } delete fip; fi_iter = file_infos.erase(fi_iter); action = true; } else { ++fi_iter; } } if (action && log_flags.state_debug) { print_summary(); } return action; } // For results that are waiting for file transfer, // check if the transfer is done, // and if so switch to new state and take other actions. // Also set some fields for newly-aborted results. // bool CLIENT_STATE::update_results() { RESULT* rp; vector<RESULT*>::iterator result_iter; bool action = false; static double last_time=0; if (!clock_change && now - last_time < UPDATE_RESULTS_PERIOD) return false; last_time = now; result_iter = results.begin(); while (result_iter != results.end()) { rp = *result_iter; switch (rp->state()) { case RESULT_NEW: rp->set_state(RESULT_FILES_DOWNLOADING, "CS::update_results"); action = true; break; #ifndef SIM case RESULT_FILES_DOWNLOADING: if (input_files_available(rp, false) == 0) { if (rp->avp->app_files.size()==0) { // if this is a file-transfer app, start the upload phase // rp->set_state(RESULT_FILES_UPLOADING, "CS::update_results"); rp->clear_uploaded_flags(); } else { // else try to start the computation // rp->set_state(RESULT_FILES_DOWNLOADED, "CS::update_results"); request_schedule_cpus("files downloaded"); } action = true; } break; #endif case RESULT_FILES_UPLOADING: if (rp->is_upload_done()) { rp->set_ready_to_report(); rp->completed_time = gstate.now; rp->set_state(RESULT_FILES_UPLOADED, "CS::update_results"); // clear backoffs for app's resources; // this addresses the situation where the project has a // "max # jobs in progress" limit, // and we're backed off because of that // work_fetch.clear_backoffs(*rp->avp); action = true; } break; case RESULT_FILES_UPLOADED: break; case RESULT_ABORTED: if (!rp->ready_to_report) { rp->set_ready_to_report(); rp->completed_time = now; action = true; } break; } ++result_iter; } return action; } // Returns true if client should exit for various reasons // bool CLIENT_STATE::time_to_exit() { if (exit_after_app_start_secs && (app_started>0) && ((now - app_started) >= exit_after_app_start_secs) ) { msg_printf(NULL, MSG_INFO, "Exiting because %d elapsed since started task", exit_after_app_start_secs ); return true; } if (cc_config.exit_when_idle && (results.size() == 0) && had_or_requested_work ) { msg_printf(NULL, MSG_INFO, "exiting because no more results"); return true; } if (cant_write_state_file) { static bool first = true; double t = now - last_wakeup_time; if (first && t > 50) { first = false; msg_printf(NULL, MSG_INFO, "Can't write state file, exiting in 10 seconds" ); } if (t > 60) { msg_printf(NULL, MSG_INFO, "Can't write state file, exiting now" ); return true; } } return false; } // Call this when a result has a nonrecoverable error. // - back off on contacting the project's scheduler // (so don't crash over and over) // - Append a description of the error to result.stderr_out // - If result state is FILES_DOWNLOADED, change it to COMPUTE_ERROR // so that we don't try to run it again. // int CLIENT_STATE::report_result_error(RESULT& res, const char* err_msg) { char buf[1024]; unsigned int i; int failnum; // only do this once per result // if (res.ready_to_report) { return 0; } res.set_ready_to_report(); res.completed_time = now; sprintf(buf, "Unrecoverable error for task %s", res.name); #ifndef SIM scheduler_op->project_rpc_backoff(res.project, buf); #endif res.stderr_out.append("<message>\n"); res.stderr_out.append(err_msg); res.stderr_out.append("</message>\n"); switch(res.state()) { case RESULT_NEW: case RESULT_FILES_DOWNLOADING: // called from: // CLIENT_STATE::garbage_collect() // if WU or app_version had a download failure // if (!res.exit_status) { res.exit_status = ERR_RESULT_DOWNLOAD; } break; case RESULT_FILES_DOWNLOADED: // called from: // ACTIVE_TASK::start (if couldn't start app) // ACTIVE_TASK::restart (if files missing) // ACITVE_TASK_SET::restart_tasks (catch other error returns) // ACTIVE_TASK::handle_exited_app (on nonzero exit or signal) // ACTIVE_TASK::abort_task (if exceeded resource limit) // CLIENT_STATE::schedule_cpus (catch-all for resume/start errors) // res.set_state(RESULT_COMPUTE_ERROR, "CS::report_result_error"); if (!res.exit_status) { res.exit_status = ERR_RESULT_START; } break; case RESULT_FILES_UPLOADING: // called from // CLIENT_STATE::garbage_collect() if result had an upload error // for (i=0; i<res.output_files.size(); i++) { if (res.output_files[i].file_info->had_failure(failnum)) { sprintf(buf, "<upload_error>\n" " <file_name>%s</file_name>\n" " <error_code>%d</error_code>\n" "</upload_error>\n", res.output_files[i].file_info->name, failnum ); res.stderr_out.append(buf); } } if (!res.exit_status) { res.exit_status = ERR_RESULT_UPLOAD; } res.set_state(RESULT_UPLOAD_FAILED, "CS::report_result_error"); break; case RESULT_FILES_UPLOADED: msg_printf(res.project, MSG_INTERNAL_ERROR, "Error reported for completed task %s", res.name ); break; } res.stderr_out = res.stderr_out.substr(0, MAX_STDERR_LEN); return 0; } #ifndef SIM // "Reset" a project: (clear error conditions) // - stop all active tasks // - stop all file transfers // - stop scheduler RPC if any // - delete workunits and results // - delete apps and app_versions // - garbage collect to delete unneeded files // - clear backoffs // // does not delete project dir // int CLIENT_STATE::reset_project(PROJECT* project, bool detaching) { unsigned int i; APP_VERSION* avp; APP* app; vector<APP*>::iterator app_iter; vector<APP_VERSION*>::iterator avp_iter; RESULT* rp; PERS_FILE_XFER* pxp; msg_printf(project, MSG_INFO, "Resetting project"); active_tasks.abort_project(project); // stop and remove file transfers // for (i=0; i<pers_file_xfers->pers_file_xfers.size(); i++) { pxp = pers_file_xfers->pers_file_xfers[i]; if (pxp->fip->project == project) { if (pxp->fxp) { file_xfers->remove(pxp->fxp); delete pxp->fxp; } pers_file_xfers->remove(pxp); delete pxp; i--; } } // if we're in the middle of a scheduler op to the project, abort it // scheduler_op->abort(project); // abort other HTTP operations // //http_ops.abort_project_ops(project); // mark results as server-acked. // This will cause garbage_collect to delete them, // and in turn their WUs will be deleted // for (i=0; i<results.size(); i++) { rp = results[i]; if (rp->project == project) { rp->got_server_ack = true; } } project->user_files.clear(); project->project_files.clear(); // clear flags so that sticky files get deleted // for (i=0; i<file_infos.size(); i++) { FILE_INFO* fip = file_infos[i]; if (fip->project == project) { fip->sticky = false; } } garbage_collect_always(); // remove apps and app_versions (but not if anonymous platform) // if (!project->anonymous_platform || detaching) { avp_iter = app_versions.begin(); while (avp_iter != app_versions.end()) { avp = *avp_iter; if (avp->project == project) { avp_iter = app_versions.erase(avp_iter); delete avp; } else { ++avp_iter; } } app_iter = apps.begin(); while (app_iter != apps.end()) { app = *app_iter; if (app->project == project) { app_iter = apps.erase(app_iter); delete app; } else { ++app_iter; } } garbage_collect_always(); } // if not anonymous platform, clean out the project dir // except for app_config.xml // if (!project->anonymous_platform) { client_clean_out_dir( project->project_dir(), "reset project", "app_config.xml" ); } // force refresh of scheduler URLs // project->scheduler_urls.clear(); project->duration_correction_factor = 1; project->ams_resource_share = -1; project->min_rpc_time = 0; project->pwf.reset(project); for (int j=0; j<coprocs.n_rsc; j++) { project->rsc_pwf[j].reset(); } write_state_file(); return 0; } // "Detach" a project: // - Reset (see above) // - delete all file infos // - delete account file // - delete project directory // - delete various per-project files // int CLIENT_STATE::detach_project(PROJECT* project) { vector<PROJECT*>::iterator project_iter; vector<FILE_INFO*>::iterator fi_iter; FILE_INFO* fip; PROJECT* p; char path[MAXPATHLEN]; int retval; reset_project(project, true); msg_printf(project, MSG_INFO, "Detaching from project"); // delete all FILE_INFOs associated with this project // fi_iter = file_infos.begin(); while (fi_iter != file_infos.end()) { fip = *fi_iter; if (fip->project == project) { fi_iter = file_infos.erase(fi_iter); delete fip; } else { ++fi_iter; } } // find project and remove it from the vector // for (project_iter = projects.begin(); project_iter != projects.end(); ++project_iter) { p = *project_iter; if (p == project) { project_iter = projects.erase(project_iter); break; } } // delete statistics file // get_statistics_filename(project->master_url, path, sizeof(path)); retval = boinc_delete_file(path); if (retval) { msg_printf(project, MSG_INTERNAL_ERROR, "Can't delete statistics file: %s", boincerror(retval) ); } // delete account file // get_account_filename(project->master_url, path, sizeof(path)); retval = boinc_delete_file(path); if (retval) { msg_printf(project, MSG_INTERNAL_ERROR, "Can't delete account file: %s", boincerror(retval) ); } get_sched_request_filename(*project, path, sizeof(path)); retval = boinc_delete_file(path); get_sched_reply_filename(*project, path, sizeof(path)); retval = boinc_delete_file(path); get_master_filename(*project, path, sizeof(path)); retval = boinc_delete_file(path); // remove project directory and its contents // retval = remove_project_dir(*project); if (retval) { msg_printf(project, MSG_INTERNAL_ERROR, "Can't delete project directory: %s", boincerror(retval) ); } // remove miscellaneous per-project files // //job_log_filename(*project, path, sizeof(path)); //boinc_delete_file(path); delete_project_notice_files(project); rss_feeds.update_feed_list(); delete project; write_state_file(); adjust_rec(); request_schedule_cpus("Detach"); request_work_fetch("Detach"); return 0; } // Quit running applications, quit benchmarks, // write the client_state.xml file // (in principle we could also terminate net_xfers here, // e.g. flush buffers, but why bother) // int CLIENT_STATE::quit_activities() { // calculate REC (for state file) // adjust_rec(); daily_xfer_history.write_file(); write_state_file(); gui_rpcs.close(); abort_cpu_benchmarks(); time_stats.quit(); // stop jobs. // Do this last because it could take a long time, // and the OS might kill us in the middle // int retval = active_tasks.exit_tasks(); if (retval) { msg_printf(NULL, MSG_INTERNAL_ERROR, "Couldn't exit tasks: %s", boincerror(retval) ); } return 0; } #endif // Called at startup to see if a timestamp in the client state file // is later than the current time. // If so, the user must have decremented the system clock. // void CLIENT_STATE::check_clock_reset() { if (!time_stats.last_update) return; if (time_stats.last_update <= now) return; msg_printf(NULL, MSG_INFO, "System clock (%.0f) < state file timestamp (%.0f); clearing timeouts", now, time_stats.last_update ); clear_absolute_times(); } // The system clock seems to have been set back, // possibly by a large amount (years). // Clear various "wait until X" absolute times. // // Note: there are other absolute times (like job deadlines) // that we could try to patch up, but it's not clear how. // void CLIENT_STATE::clear_absolute_times() { exclusive_app_running = 0; exclusive_gpu_app_running = 0; new_version_check_time = now; all_projects_list_check_time = now; retry_shmem_time = 0; cpu_run_mode.temp_timeout = 0; gpu_run_mode.temp_timeout = 0; network_run_mode.temp_timeout = 0; time_stats.last_update = now; unsigned int i; for (i=0; i<projects.size(); i++) { PROJECT* p = projects[i]; p->min_rpc_time = 0; if (p->next_rpc_time) { p->next_rpc_time = now; } p->download_backoff.next_xfer_time = 0; p->upload_backoff.next_xfer_time = 0; for (int j=0; j<coprocs.n_rsc; j++) { p->rsc_pwf[j].clear_backoff(); } //#ifdef USE_REC p->pwf.rec_time = now; //#endif } for (i=0; i<pers_file_xfers->pers_file_xfers.size(); i++) { PERS_FILE_XFER* pfx = pers_file_xfers->pers_file_xfers[i]; pfx->next_request_time = 0; } for (i=0; i<results.size(); i++) { RESULT* rp = results[i]; rp->schedule_backoff = 0; } } void CLIENT_STATE::log_show_projects() { char buf[256]; for (unsigned int i=0; i<projects.size(); i++) { PROJECT* p = projects[i]; if (p->hostid) { sprintf(buf, "%d", p->hostid); } else { safe_strcpy(buf, "not assigned yet"); } msg_printf(p, MSG_INFO, "URL %s; Computer ID %s; resource share %.0f", p->master_url, buf, p->resource_share ); if (p->ended) { msg_printf(p, MSG_INFO, "Project has ended - OK to detach"); } p->show_no_work_notice(); } } #ifndef SIM // the following is done on client exit if the // "abort_jobs_on_exit" flag is present. // Abort jobs, and arrange to tell projects about it. // void CLIENT_STATE::start_abort_sequence() { unsigned int i; in_abort_sequence = true; for (i=0; i<results.size(); i++) { RESULT* rp = results[i]; rp->project->sched_rpc_pending = RPC_REASON_USER_REQ; if (rp->computing_done()) continue; ACTIVE_TASK* atp = lookup_active_task_by_result(rp); if (atp) { atp->abort_task(EXIT_CLIENT_EXITING, "aborting on client exit"); } else { rp->abort_inactive(EXIT_CLIENT_EXITING); } } for (i=0; i<projects.size(); i++) { PROJECT* p = projects[i]; p->min_rpc_time = 0; p->dont_request_more_work = true; } } // The second part of the above; check if RPCs are done // bool CLIENT_STATE::abort_sequence_done() { unsigned int i; for (i=0; i<projects.size(); i++) { PROJECT* p = projects[i]; if (p->sched_rpc_pending == RPC_REASON_USER_REQ) return false; } return true; } #endif
1
14,361
What's the purpose of this change? It basically changes nothing until we want to put smth between curly braces
BOINC-boinc
php
@@ -186,6 +186,9 @@ static void handle_device_removed(struct wlr_libinput_backend *backend, } struct wlr_input_device *dev, *tmp_dev; wl_list_for_each_safe(dev, tmp_dev, wlr_devices, link) { + if (dev->type == WLR_INPUT_DEVICE_TABLET_TOOL) { + wlr_libinput_tablet_tool_destroy(dev); + } wlr_input_device_destroy(dev); } for (size_t i = 0; i < backend->wlr_device_lists.length; i++) {
1
#define _POSIX_C_SOURCE 200809L #include <assert.h> #include <libinput.h> #include <stdlib.h> #include <wayland-util.h> #include <wlr/backend/session.h> #include <wlr/interfaces/wlr_input_device.h> #include <wlr/util/log.h> #include "backend/libinput.h" #include "util/signal.h" struct wlr_input_device *get_appropriate_device( enum wlr_input_device_type desired_type, struct libinput_device *libinput_dev) { struct wl_list *wlr_devices = libinput_device_get_user_data(libinput_dev); if (!wlr_devices) { return NULL; } struct wlr_input_device *dev; wl_list_for_each(dev, wlr_devices, link) { if (dev->type == desired_type) { return dev; } } return NULL; } static void input_device_destroy(struct wlr_input_device *_dev) { struct wlr_libinput_input_device *dev = (struct wlr_libinput_input_device *)_dev; libinput_device_unref(dev->handle); wl_list_remove(&dev->wlr_input_device.link); free(dev); } static const struct wlr_input_device_impl input_device_impl = { .destroy = input_device_destroy, }; static struct wlr_input_device *allocate_device( struct wlr_libinput_backend *backend, struct libinput_device *libinput_dev, struct wl_list *wlr_devices, enum wlr_input_device_type type) { int vendor = libinput_device_get_id_vendor(libinput_dev); int product = libinput_device_get_id_product(libinput_dev); const char *name = libinput_device_get_name(libinput_dev); struct wlr_libinput_input_device *wlr_libinput_dev; if (!(wlr_libinput_dev = calloc(1, sizeof(struct wlr_libinput_input_device)))) { return NULL; } struct wlr_input_device *wlr_dev = &wlr_libinput_dev->wlr_input_device; libinput_device_get_size(libinput_dev, &wlr_dev->width_mm, &wlr_dev->height_mm); const char *output_name = libinput_device_get_output_name(libinput_dev); if (output_name != NULL) { wlr_dev->output_name = strdup(output_name); } wl_list_insert(wlr_devices, &wlr_dev->link); wlr_libinput_dev->handle = libinput_dev; libinput_device_ref(libinput_dev); wlr_input_device_init(wlr_dev, type, &input_device_impl, name, vendor, product); return wlr_dev; } bool wlr_input_device_is_libinput(struct wlr_input_device *wlr_dev) { return wlr_dev->impl == &input_device_impl; } static void handle_device_added(struct wlr_libinput_backend *backend, struct libinput_device *libinput_dev) { assert(backend && libinput_dev); /* * Note: the wlr API exposes only devices with a single capability, because * that meshes better with how Wayland does things and is a bit simpler. * However, libinput devices often have multiple capabilities - in such * cases we have to create several devices. */ int vendor = libinput_device_get_id_vendor(libinput_dev); int product = libinput_device_get_id_product(libinput_dev); const char *name = libinput_device_get_name(libinput_dev); struct wl_list *wlr_devices = calloc(1, sizeof(struct wl_list)); if (!wlr_devices) { wlr_log(WLR_ERROR, "Allocation failed"); return; } wl_list_init(wlr_devices); wlr_log(WLR_DEBUG, "Added %s [%d:%d]", name, vendor, product); if (libinput_device_has_capability(libinput_dev, LIBINPUT_DEVICE_CAP_KEYBOARD)) { struct wlr_input_device *wlr_dev = allocate_device(backend, libinput_dev, wlr_devices, WLR_INPUT_DEVICE_KEYBOARD); if (!wlr_dev) { goto fail; } wlr_dev->keyboard = create_libinput_keyboard(libinput_dev); if (!wlr_dev->keyboard) { free(wlr_dev); goto fail; } wlr_signal_emit_safe(&backend->backend.events.new_input, wlr_dev); } if (libinput_device_has_capability(libinput_dev, LIBINPUT_DEVICE_CAP_POINTER)) { struct wlr_input_device *wlr_dev = allocate_device(backend, libinput_dev, wlr_devices, WLR_INPUT_DEVICE_POINTER); if (!wlr_dev) { goto fail; } wlr_dev->pointer = create_libinput_pointer(libinput_dev); if (!wlr_dev->pointer) { free(wlr_dev); goto fail; } wlr_signal_emit_safe(&backend->backend.events.new_input, wlr_dev); } if (libinput_device_has_capability(libinput_dev, LIBINPUT_DEVICE_CAP_TOUCH)) { struct wlr_input_device *wlr_dev = allocate_device(backend, libinput_dev, wlr_devices, WLR_INPUT_DEVICE_TOUCH); if (!wlr_dev) { goto fail; } wlr_dev->touch = create_libinput_touch(libinput_dev); if (!wlr_dev->touch) { free(wlr_dev); goto fail; } wlr_signal_emit_safe(&backend->backend.events.new_input, wlr_dev); } if (libinput_device_has_capability(libinput_dev, LIBINPUT_DEVICE_CAP_TABLET_TOOL)) { struct wlr_input_device *wlr_dev = allocate_device(backend, libinput_dev, wlr_devices, WLR_INPUT_DEVICE_TABLET_TOOL); if (!wlr_dev) { goto fail; } wlr_dev->tablet_tool = create_libinput_tablet_tool(libinput_dev); if (!wlr_dev->tablet_tool) { free(wlr_dev); goto fail; } wlr_signal_emit_safe(&backend->backend.events.new_input, wlr_dev); } if (libinput_device_has_capability(libinput_dev, LIBINPUT_DEVICE_CAP_TABLET_PAD)) { struct wlr_input_device *wlr_dev = allocate_device(backend, libinput_dev, wlr_devices, WLR_INPUT_DEVICE_TABLET_PAD); if (!wlr_dev) { goto fail; } wlr_dev->tablet_pad = create_libinput_tablet_pad(libinput_dev); if (!wlr_dev->tablet_pad) { free(wlr_dev); goto fail; } wlr_signal_emit_safe(&backend->backend.events.new_input, wlr_dev); } if (libinput_device_has_capability(libinput_dev, LIBINPUT_DEVICE_CAP_GESTURE)) { // TODO } if (libinput_device_has_capability(libinput_dev, LIBINPUT_DEVICE_CAP_SWITCH)) { // TODO } if (wl_list_length(wlr_devices) > 0) { libinput_device_set_user_data(libinput_dev, wlr_devices); wlr_list_push(&backend->wlr_device_lists, wlr_devices); } else { free(wlr_devices); } return; fail: wlr_log(WLR_ERROR, "Could not allocate new device"); struct wlr_input_device *dev, *tmp_dev; wl_list_for_each_safe(dev, tmp_dev, wlr_devices, link) { free(dev); } free(wlr_devices); } static void handle_device_removed(struct wlr_libinput_backend *backend, struct libinput_device *libinput_dev) { struct wl_list *wlr_devices = libinput_device_get_user_data(libinput_dev); int vendor = libinput_device_get_id_vendor(libinput_dev); int product = libinput_device_get_id_product(libinput_dev); const char *name = libinput_device_get_name(libinput_dev); wlr_log(WLR_DEBUG, "Removing %s [%d:%d]", name, vendor, product); if (!wlr_devices) { return; } struct wlr_input_device *dev, *tmp_dev; wl_list_for_each_safe(dev, tmp_dev, wlr_devices, link) { wlr_input_device_destroy(dev); } for (size_t i = 0; i < backend->wlr_device_lists.length; i++) { if (backend->wlr_device_lists.items[i] == wlr_devices) { wlr_list_del(&backend->wlr_device_lists, i); break; } } free(wlr_devices); } void handle_libinput_event(struct wlr_libinput_backend *backend, struct libinput_event *event) { assert(backend && event); struct libinput_device *libinput_dev = libinput_event_get_device(event); enum libinput_event_type event_type = libinput_event_get_type(event); switch (event_type) { case LIBINPUT_EVENT_DEVICE_ADDED: handle_device_added(backend, libinput_dev); break; case LIBINPUT_EVENT_DEVICE_REMOVED: handle_device_removed(backend, libinput_dev); break; case LIBINPUT_EVENT_KEYBOARD_KEY: handle_keyboard_key(event, libinput_dev); break; case LIBINPUT_EVENT_POINTER_MOTION: handle_pointer_motion(event, libinput_dev); break; case LIBINPUT_EVENT_POINTER_MOTION_ABSOLUTE: handle_pointer_motion_abs(event, libinput_dev); break; case LIBINPUT_EVENT_POINTER_BUTTON: handle_pointer_button(event, libinput_dev); break; case LIBINPUT_EVENT_POINTER_AXIS: handle_pointer_axis(event, libinput_dev); break; case LIBINPUT_EVENT_TOUCH_DOWN: handle_touch_down(event, libinput_dev); break; case LIBINPUT_EVENT_TOUCH_UP: handle_touch_up(event, libinput_dev); break; case LIBINPUT_EVENT_TOUCH_MOTION: handle_touch_motion(event, libinput_dev); break; case LIBINPUT_EVENT_TOUCH_CANCEL: handle_touch_cancel(event, libinput_dev); break; case LIBINPUT_EVENT_TOUCH_FRAME: // no-op (at least for now) break; case LIBINPUT_EVENT_TABLET_TOOL_AXIS: handle_tablet_tool_axis(event, libinput_dev); break; case LIBINPUT_EVENT_TABLET_TOOL_PROXIMITY: handle_tablet_tool_proximity(event, libinput_dev); break; case LIBINPUT_EVENT_TABLET_TOOL_TIP: handle_tablet_tool_tip(event, libinput_dev); break; case LIBINPUT_EVENT_TABLET_TOOL_BUTTON: handle_tablet_tool_button(event, libinput_dev); break; case LIBINPUT_EVENT_TABLET_PAD_BUTTON: handle_tablet_pad_button(event, libinput_dev); break; case LIBINPUT_EVENT_TABLET_PAD_RING: handle_tablet_pad_ring(event, libinput_dev); break; case LIBINPUT_EVENT_TABLET_PAD_STRIP: handle_tablet_pad_strip(event, libinput_dev); break; default: wlr_log(WLR_DEBUG, "Unknown libinput event %d", event_type); break; } }
1
11,290
Hmm, why is this needed? `wlr_input_device_destroy` should destroy the tablet tool.
swaywm-wlroots
c
@@ -62,6 +62,12 @@ public class DiscoApiModel implements ApiModel { return interfaceModels; } + @Override + public Iterable<? extends TypeModel> getAdditionalTypes() { + // TODO: is this supported? + return ImmutableList.of(); + } + @Override public InterfaceModel getInterface(String interfaceName) { for (InterfaceModel interfaceModel : getInterfaces()) {
1
/* Copyright 2017 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.api.codegen.config; import com.google.api.codegen.discovery.Document; import com.google.api.codegen.util.Name; import com.google.common.collect.ImmutableList; import java.util.List; /** * Utility class that provides data from "service config", as defined in a service yaml file. * * <p>The scope of this configuration is at the product level, and covers multiple API interfaces. */ public class DiscoApiModel implements ApiModel { private final Document document; private ImmutableList<DiscoInterfaceModel> interfaceModels; @Override public String getServiceName() { return document.canonicalName(); } @Override public String getDocumentationSummary() { return document.description(); } @Override public boolean hasMultipleServices() { return document.resources().size() > 1; } @Override public Iterable<DiscoInterfaceModel> getInterfaces() { if (interfaceModels != null) { return interfaceModels; } ImmutableList.Builder<DiscoInterfaceModel> builder = ImmutableList.builder(); for (String resource : document.resources().keySet()) { String ownerName = document.ownerDomain().split("\\.")[0]; String resourceName = Name.anyCamel(resource).toUpperCamel(); String interfaceName = String.format( "%s.%s.%s.%s", ownerName, document.name(), document.version(), resourceName); builder.add(new DiscoInterfaceModel(interfaceName, document)); } interfaceModels = builder.build(); return interfaceModels; } @Override public InterfaceModel getInterface(String interfaceName) { for (InterfaceModel interfaceModel : getInterfaces()) { if (interfaceModel.getSimpleName().equals(interfaceName) || interfaceModel.getFullName().equals(interfaceName)) { return interfaceModel; } } return null; } public DiscoApiModel(Document document) { this.document = document; } public Document getDocument() { return document; } @Override public ApiSource getApiSource() { return ApiSource.DISCOVERY; } @Override public String getServiceAddress() { // TODO(andrealin): Implement. return document.baseUrl(); } /** Return the service port. */ @Override public Integer getServicePort() { return 443; } @Override public String getTitle() { return document.title(); } @Override public List<String> getAuthScopes() { return document.authScopes(); } @Override public boolean equals(Object o) { return o != null && o instanceof DiscoApiModel && ((DiscoApiModel) o).document.equals(document); } }
1
24,641
I think it's better to throw `UnsupportedOperationException`
googleapis-gapic-generator
java
@@ -124,6 +124,14 @@ function createElement(...args) { delete props.defaultValue; } + if (Array.isArray(props.value) && props.multiple && type==='select') { + props.children.forEach((child) => { + if (props.value.includes(child.props.value)) { + child.props.selected = true; + } + }); + delete props.value; + } handleElementVNode(vnode, props); }
1
import { render as preactRender, cloneElement as preactCloneElement, createRef, h, Component, options, toChildArray, createContext, Fragment } from 'preact'; import * as hooks from 'preact/hooks'; export * from 'preact/hooks'; const version = '16.8.0'; // trick libraries to think we are react /* istanbul ignore next */ const REACT_ELEMENT_TYPE = (typeof Symbol!=='undefined' && Symbol.for && Symbol.for('react.element')) || 0xeac7; const CAMEL_PROPS = /^(?:accent|alignment|arabic|baseline|cap|clip|color|fill|flood|font|glyph|horiz|marker|overline|paint|stop|strikethrough|stroke|text|underline|unicode|units|v|vector|vert|word|writing|x)[A-Z]/; let oldEventHook = options.event; options.event = e => { /* istanbul ignore next */ if (oldEventHook) e = oldEventHook(e); e.persist = Object; e.nativeEvent = e; return e; }; /** * Legacy version of createElement. * @param {import('./internal').VNode["type"]} type The node name or Component constructor */ function createFactory(type) { return createElement.bind(null, type); } /** * Normalize DOM vnode properties. * @param {import('./internal').VNode} vnode The vnode to normalize props of * @param {object | null | undefined} props props to normalize */ function handleElementVNode(vnode, props) { let shouldSanitize, attrs, i; for (i in props) if ((shouldSanitize = CAMEL_PROPS.test(i))) break; if (shouldSanitize) { attrs = vnode.props = {}; for (i in props) { attrs[CAMEL_PROPS.test(i) ? i.replace(/([A-Z0-9])/, '-$1').toLowerCase() : i] = props[i]; } } } /** * Proxy render() since React returns a Component reference. * @param {import('./internal').VNode} vnode VNode tree to render * @param {import('./internal').PreactElement} parent DOM node to render vnode tree into * @param {() => void} [callback] Optional callback that will be called after rendering * @returns {import('./internal').Component | null} The root component reference or null */ function render(vnode, parent, callback) { preactRender(vnode, parent); if (typeof callback==='function') callback(); return vnode!=null ? vnode._component : null; } class ContextProvider { getChildContext() { return this.props.context; } render(props) { return props.children; } } /** * Portal component * @param {object | null | undefined} props */ function Portal(props) { let wrap = h(ContextProvider, { context: this.context }, props.vnode); render(wrap, props.container); return null; } /** * Create a `Portal` to continue rendering the vnode tree at a different DOM node * @param {import('./internal').VNode} vnode The vnode to render * @param {import('./internal').PreactElement} container The DOM node to continue rendering in to. */ function createPortal(vnode, container) { return h(Portal, { vnode, container }); } const mapFn = (children, fn) => { if (children == null) return null; children = toChildArray(children); return children.map(fn); }; // This API is completely unnecessary for Preact, so it's basically passthrough. let Children = { map: mapFn, forEach: mapFn, count(children) { return children ? toChildArray(children).length : 0; }, only(children) { children = toChildArray(children); if (children.length!==1) throw new Error('Children.only() expects only one child.'); return children[0]; }, toArray: toChildArray }; /** * Wrap `createElement` to apply various vnode normalizations. * @param {import('./internal').VNode["type"]} type The node name or Component constructor * @param {object | null | undefined} [props] The vnode's properties * @param {Array<import('./internal').ComponentChildren>} [children] The vnode's children * @returns {import('./internal').VNode} */ function createElement(...args) { let vnode = h(...args); let type = vnode.type, props = vnode.props; if (typeof type!='function') { if (props.defaultValue) { if (!props.value && props.value!==0) { props.value = props.defaultValue; } delete props.defaultValue; } handleElementVNode(vnode, props); } vnode.preactCompatNormalized = false; return normalizeVNode(vnode); } /** * Normalize a vnode * @param {import('./internal').VNode} vnode */ function normalizeVNode(vnode) { vnode.preactCompatNormalized = true; applyClassName(vnode); applyEventNormalization(vnode); return vnode; } /** * Wrap `cloneElement` to abort if the passed element is not a valid element and apply * all vnode normalizations. * @param {import('./internal').VNode} element The vnode to clone * @param {object} props Props to add when cloning * @param {Array<import('./internal').ComponentChildren} rest Optional component children */ function cloneElement(element) { if (!isValidElement(element)) return element; let vnode = normalizeVNode(preactCloneElement.apply(null, arguments)); vnode.$$typeof = REACT_ELEMENT_TYPE; return vnode; } /** * Check if the passed element is a valid (p)react node. * @param {*} element The element to check * @returns {boolean} */ function isValidElement(element) { return element!=null && element.$$typeof===REACT_ELEMENT_TYPE; } /** * Normalize event handlers like react does. Most famously it uses `onChange` for any input element. * @param {import('./internal').VNode} vnode The vnode to normalize events on */ function applyEventNormalization({ type, props }) { if (!props || typeof type!=='string') return; let newProps = {}; for (let i in props) { newProps[i.toLowerCase()] = i; } if (newProps.ondoubleclick) { props.ondblclick = props[newProps.ondoubleclick]; delete props[newProps.ondoubleclick]; } if (newProps.onbeforeinput) { props.onbeforeinput = props[newProps.onbeforeinput]; delete props[newProps.onbeforeinput]; } // for *textual inputs* (incl textarea), normalize `onChange` -> `onInput`: if (newProps.onchange && (type==='textarea' || (type.toLowerCase()==='input' && !/^fil|che|rad/i.test(props.type)))) { let normalized = newProps.oninput || 'oninput'; if (!props[normalized]) { props[normalized] = props[newProps.onchange]; delete props[newProps.onchange]; } } } /** * Remove a component tree from the DOM, including state and event handlers. * @param {Element | Document | ShadowRoot | DocumentFragment} container * @returns {boolean} */ function unmountComponentAtNode(container) { if (container._prevVNode!=null) { preactRender(null, container); return true; } return false; } /** * Alias `class` prop to `className` if available * @param {import('./internal').VNode} vnode */ function applyClassName(vnode) { let a = vnode.props; if (a.class || a.className) { classNameDescriptor.enumerable = 'className' in a; if (a.className) a.class = a.className; Object.defineProperty(a, 'className', classNameDescriptor); } } let classNameDescriptor = { configurable: true, get() { return this.class; } }; /** * Check if two objects have a different shape * @param {object} a * @param {object} b * @returns {boolean} */ function shallowDiffers(a, b) { for (let i in a) if (!(i in b)) return true; for (let i in b) if (a[i]!==b[i]) return true; return false; } /** * Get the matching DOM node for a component * @param {import('./internal').Component} component * @returns {import('./internal').PreactElement | null} */ function findDOMNode(component) { return component && (component.base || component.nodeType === 1 && component) || null; } /** * Component class with a predefined `shouldComponentUpdate` implementation */ class PureComponent extends Component { constructor(props) { super(props); // Some third-party libraries check if this property is present this.isPureReactComponent = true; } shouldComponentUpdate(props, state) { return shallowDiffers(this.props, props) || shallowDiffers(this.state, state); } } // Some libraries like `react-virtualized` explicitely check for this. Component.prototype.isReactComponent = {}; /** * Memoize a component, so that it only updates when the props actually have * changed. This was previously known as `React.pure`. * @param {import('./internal').ComponentFactory<any>} c The component constructor * @param {(prev: object, next: object) => boolean} [comparer] Custom equality function * @returns {import('./internal').ComponentFactory<any>} */ function memo(c, comparer) { function shouldUpdate(nextProps) { return !comparer(this.props, nextProps); } function Memoed(props, context) { this.shouldComponentUpdate = this.shouldComponentUpdate || (comparer ? shouldUpdate : PureComponent.prototype.shouldComponentUpdate); return c.call(this, props, context); } Memoed.displayName = 'Memo(' + (c.displayName || c.name) + ')'; return Memoed; } // Patch in `UNSAFE_*` lifecycle hooks function setUnsafeDescriptor(obj, key) { Object.defineProperty(obj.prototype, 'UNSAFE_' + key, { configurable: true, get() { return this[key]; }, set(v) { this[key] = v; } }); } setUnsafeDescriptor(Component, 'componentWillMount'); setUnsafeDescriptor(Component, 'componentWillReceiveProps'); setUnsafeDescriptor(Component, 'componentWillUpdate'); /** * Pass ref down to a child. This is mainly used in libraries with HOCs that * wrap components. Using `forwardRef` there is an easy way to get a reference * of the wrapped component instead of one of the wrapper itself. * @param {import('./internal').ForwardFn} fn * @returns {import('./internal').FunctionalComponent} */ function forwardRef(fn) { function Forwarded(props) { let ref = props.ref; delete props.ref; return fn(props, ref); } Forwarded._forwarded = true; Forwarded.displayName = 'ForwardRef(' + (fn.displayName || fn.name) + ')'; return Forwarded; } let oldVNodeHook = options.vnode; options.vnode = vnode => { vnode.$$typeof = REACT_ELEMENT_TYPE; let type = vnode.type; if (type!=null && type._forwarded) { vnode.props.ref = vnode.ref; vnode.ref = null; } /* istanbul ignore next */ if (oldVNodeHook) oldVNodeHook(vnode); }; export { version, Children, render, render as hydrate, unmountComponentAtNode, createPortal, createElement, createContext, createFactory, cloneElement, createRef, Fragment, isValidElement, findDOMNode, Component, PureComponent, memo, forwardRef }; // React copies the named exports to the default one. export default { ...hooks, version, Children, render, hydrate: render, unmountComponentAtNode, createPortal, createElement, createContext, createFactory, cloneElement, createRef, Fragment, isValidElement, findDOMNode, Component, PureComponent, memo, forwardRef };
1
12,664
`props.children` is not always guaranteed to be an array. When only one child is passed this will break. We can use `toChildArray()` to turn it into an array :tada:
preactjs-preact
js
@@ -26,7 +26,8 @@ buildStyle = os.environ['BUILD_STYLE'] # Build the configure command. configureCmd = os.path.join(buildSystemDir, 'contrib', 'configure.py') -configureCmd += ' --mode=%s' % buildStyle +# configureCmd += ' --mode=%s' % buildStyle +configureCmd += ' --mode=release' configureCmd += ' --builddir=%s' % buildDir print 'Running command:', configureCmd
1
#!/usr/bin/env python import os import sys import string doClean = ('clean' in sys.argv) or ('uninstall' in sys.argv) rootDir = os.getcwd() buildSystemDir = os.path.join(rootDir, 'build_system') # Generate the configure input files. setupCmd = 'python ' + os.path.join(buildSystemDir, 'setup.py') + ' --autogen' \ + " --win32BuildDir '$(NTAX_BUILD_DIR)'" print 'Running command:', setupCmd sys.stdout.flush() retCode = os.system(setupCmd) if retCode != 0: print >>sys.stderr, 'setup.py failed: Error', retCode sys.exit(1) buildDir = os.environ['BUILT_PRODUCTS_DIR'] buildStyle = os.environ['BUILD_STYLE'] # Build the configure command. configureCmd = os.path.join(buildSystemDir, 'contrib', 'configure.py') configureCmd += ' --mode=%s' % buildStyle configureCmd += ' --builddir=%s' % buildDir print 'Running command:', configureCmd sys.stdout.flush() retCode = os.system(configureCmd) if retCode != 0: print >>sys.stderr, 'configure failed: Error', retCode sys.exit(1) # Build success = True pushd = os.getcwd() os.chdir(buildDir) buildCmd = os.path.join(buildSystemDir, 'contrib', 'make.py') if doClean: buildCmd += ' clean' print 'Running command:', buildCmd retCode = os.system(buildCmd) if retCode != 0: print >>sys.stderr, 'Build failed: Error', retCode success = False os.chdir(pushd) if not success: sys.exit(1)
1
12,435
Please just remove this line. We have the git history if we want the old version.
numenta-nupic
py
@@ -49,9 +49,11 @@ public class ScriptTaskGraalJsTest extends AbstractScriptTaskTest { private static final String GRAALJS = "graal.js"; protected ScriptEngineResolver defaultScriptEngineResolver; + protected boolean spinEnabled = false; @Before public void setup() { + spinEnabled = processEngineConfiguration.getEnvScriptResolvers().stream().anyMatch(resolver -> resolver.getClass().getSimpleName().equals("SpinScriptEnvResolver")); defaultScriptEngineResolver = processEngineConfiguration.getScriptEngineResolver(); processEngineConfiguration.setConfigureScriptEngineHostAccess(configureHostAccess); processEngineConfiguration.setEnableScriptEngineLoadExternalResources(enableExternalResources);
1
/* * Copyright Camunda Services GmbH and/or licensed to Camunda Services GmbH * under one or more contributor license agreements. See the NOTICE file * distributed with this work for additional information regarding copyright * ownership. Camunda licenses this file to you under the Apache License, * Version 2.0; you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.camunda.bpm.engine.test.bpmn.scripttask; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; import java.util.Arrays; import java.util.Collection; import java.util.Date; import javax.script.ScriptEngine; import javax.script.ScriptEngineManager; import org.camunda.bpm.engine.ScriptEvaluationException; import org.camunda.bpm.engine.impl.scripting.engine.DefaultScriptEngineResolver; import org.camunda.bpm.engine.impl.scripting.engine.ScriptEngineResolver; import org.camunda.bpm.engine.runtime.ProcessInstance; import org.junit.After; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import org.junit.runners.Parameterized.Parameter; import org.junit.runners.Parameterized.Parameters; import com.oracle.truffle.js.scriptengine.GraalJSEngineFactory; import com.oracle.truffle.js.scriptengine.GraalJSScriptEngine; @RunWith(Parameterized.class) public class ScriptTaskGraalJsTest extends AbstractScriptTaskTest { private static final String GRAALJS = "graal.js"; protected ScriptEngineResolver defaultScriptEngineResolver; @Before public void setup() { defaultScriptEngineResolver = processEngineConfiguration.getScriptEngineResolver(); processEngineConfiguration.setConfigureScriptEngineHostAccess(configureHostAccess); processEngineConfiguration.setEnableScriptEngineLoadExternalResources(enableExternalResources); processEngineConfiguration.setEnableScriptEngineNashornCompatibility(enableNashornCompat); // create custom script engine lookup to receive a fresh GraalVM JavaScript engine processEngineConfiguration.setScriptEngineResolver(new TestScriptEngineResolver( processEngineConfiguration.getScriptEngineResolver().getScriptEngineManager())); } @After public void resetConfiguration() { processEngineConfiguration.setConfigureScriptEngineHostAccess(true); processEngineConfiguration.setEnableScriptEngineNashornCompatibility(false); processEngineConfiguration.setEnableScriptEngineLoadExternalResources(false); processEngineConfiguration.setScriptEngineResolver(defaultScriptEngineResolver); } @Parameters public static Collection<Object[]> setups() { return Arrays.asList(new Object[][] { {false, false, false}, {true, false, false}, {false, true, false}, {false, false, true}, {true, true, false}, {true, false, true}, {false, true, true}, {true, true, true}, }); } @Parameter(0) public boolean configureHostAccess; @Parameter(1) public boolean enableExternalResources; @Parameter(2) public boolean enableNashornCompat; @Test public void testJavascriptProcessVarVisibility() { deployProcess(GRAALJS, // GIVEN // an execution variable 'foo' "execution.setVariable('foo', 'a');" // THEN // there should be a script variable defined + "if (typeof foo !== 'undefined') { " + " throw 'Variable foo should be defined as script variable.';" + "}" // GIVEN // a script variable with the same name + "var foo = 'b';" // THEN // it should not change the value of the execution variable + "if(execution.getVariable('foo') != 'a') {" + " throw 'Execution should contain variable foo';" + "}" // AND // it should override the visibility of the execution variable + "if(foo != 'b') {" + " throw 'Script variable must override the visibiltity of the execution variable.';" + "}" ); if (enableNashornCompat || configureHostAccess) { // WHEN // we start an instance of this process ProcessInstance pi = runtimeService.startProcessInstanceByKey("testProcess"); // THEN // the script task can be executed without exceptions // the execution variable is stored and has the correct value Object variableValue = runtimeService.getVariable(pi.getId(), "foo"); assertEquals("a", variableValue); } else { // WHEN // we start an instance of this process assertThatThrownBy(() -> runtimeService.startProcessInstanceByKey("testProcess")) // THEN // this is not allowed in the JS ScriptEngine .isInstanceOf(ScriptEvaluationException.class) .hasMessageContaining("TypeError"); } } @Test public void testJavascriptFunctionInvocation() { deployProcess(GRAALJS, // GIVEN // a function named sum "function sum(a,b){" + " return a+b;" + "};" // THEN // i can call the function + "var result = sum(1,2);" + "execution.setVariable('foo', result);" ); if (enableNashornCompat || configureHostAccess) { // WHEN // we start an instance of this process ProcessInstance pi = runtimeService.startProcessInstanceByKey("testProcess"); // THEN // the variable is defined Object variable = runtimeService.getVariable(pi.getId(), "foo"); assertThat(variable).isIn(3, 3.0); } else { // WHEN // we start an instance of this process assertThatThrownBy(() -> runtimeService.startProcessInstanceByKey("testProcess")) // THEN // this is not allowed in the JS ScriptEngine .isInstanceOf(ScriptEvaluationException.class) .hasMessageContaining("TypeError"); } } @Test public void testJsVariable() { String scriptText = "var foo = 1;"; deployProcess(GRAALJS, scriptText); ProcessInstance pi = runtimeService.startProcessInstanceByKey("testProcess"); Object variableValue = runtimeService.getVariable(pi.getId(), "foo"); assertNull(variableValue); } @Test public void testJavascriptVariableSerialization() { deployProcess(GRAALJS, // GIVEN // setting Java classes as variables "execution.setVariable('date', new java.util.Date(0));" + "execution.setVariable('myVar', new org.camunda.bpm.engine.test.bpmn.scripttask.MySerializable('test'));"); if (enableNashornCompat || configureHostAccess) { // WHEN ProcessInstance pi = runtimeService.startProcessInstanceByKey("testProcess"); // THEN Date date = (Date) runtimeService.getVariable(pi.getId(), "date"); assertEquals(0, date.getTime()); MySerializable myVar = (MySerializable) runtimeService.getVariable(pi.getId(), "myVar"); assertEquals("test", myVar.getName()); } else { // WHEN // we start an instance of this process assertThatThrownBy(() -> runtimeService.startProcessInstanceByKey("testProcess")) // THEN // this is not allowed in the JS ScriptEngine .isInstanceOf(ScriptEvaluationException.class) .hasMessageContaining("ReferenceError"); } } @Test public void shouldLoadExternalScript() { // GIVEN // an external JS file with a function deployProcess(GRAALJS, // WHEN // we load a function from an external file "load(\"" + getNormalizedResourcePath("/org/camunda/bpm/engine/test/bpmn/scripttask/sum.js") + "\");" // THEN // we can use that function + "execution.setVariable('foo', sum(3, 4));" ); if (enableNashornCompat || (enableExternalResources && configureHostAccess)) { // WHEN // we start an instance of this process ProcessInstance pi = runtimeService.startProcessInstanceByKey("testProcess"); // THEN // the script task can be executed without exceptions // the execution variable is stored and has the correct value Object variableValue = runtimeService.getVariable(pi.getId(), "foo"); assertEquals(7, variableValue); } else { // WHEN // we start an instance of this process assertThatThrownBy(() -> runtimeService.startProcessInstanceByKey("testProcess")) // THEN // this is not allowed in the JS ScriptEngine .isInstanceOf(ScriptEvaluationException.class) .hasMessageContaining(enableExternalResources && !configureHostAccess ? "TypeError" : "Operation is not allowed"); } } protected static class TestScriptEngineResolver extends DefaultScriptEngineResolver { public TestScriptEngineResolver(ScriptEngineManager scriptEngineManager) { super(scriptEngineManager); } @Override protected ScriptEngine getScriptEngine(String language) { if (GRAALJS.equalsIgnoreCase(language)) { GraalJSScriptEngine scriptEngine = new GraalJSEngineFactory().getScriptEngine(); configureScriptEngines(language, scriptEngine); return scriptEngine; } return super.getScriptEngine(language); } } }
1
11,999
Can we put some of the code on a new line to make it more readable. The max line length was 120 now, right?
camunda-camunda-bpm-platform
java
@@ -56,3 +56,19 @@ func Example() { cfg := snapshot.Value.(MyConfig) _ = cfg } + +func Example_openVariable() { + // OpenVariable creates a *runtimevar.Variable from a URL. + // This example watches a variable based on a file-based blob.Bucket with JSON. + ctx := context.Background() + v, err := runtimevar.OpenVariable(ctx, "etcd://myvarname?client=my.etcd.server:9999") + if err != nil { + log.Fatal(err) + } + + snapshot, err := v.Watch(ctx) + if err != nil { + log.Fatal(err) + } + _, _ = snapshot, err +}
1
// Copyright 2018 The Go Cloud Development Kit Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package etcdvar_test import ( "context" "log" "go.etcd.io/etcd/clientv3" "gocloud.dev/runtimevar" "gocloud.dev/runtimevar/etcdvar" ) // MyConfig is a sample configuration struct. type MyConfig struct { Server string Port int } func Example() { // Connect to the etcd server. client, err := clientv3.NewFromURL("http://your.etcd.server:9999") if err != nil { log.Fatal(err) } // Create a decoder for decoding JSON strings into MyConfig. decoder := runtimevar.NewDecoder(MyConfig{}, runtimevar.JSONDecode) // Construct a *runtimevar.Variable that watches the variable. // The etcd variable being referenced should have a JSON string that // decodes into MyConfig. v, err := etcdvar.New(client, "cfg-variable-name", decoder, nil) if err != nil { log.Fatal(err) } defer v.Close() // We can now read the current value of the variable from v. snapshot, err := v.Watch(context.Background()) if err != nil { log.Fatal(err) } cfg := snapshot.Value.(MyConfig) _ = cfg }
1
15,227
Remove the handling here as well.
google-go-cloud
go
@@ -113,7 +113,7 @@ namespace OpenTelemetry.Trace /// <inheritdoc/> public override void OnEnd(Activity activity) { - if (this.queue.TryAdd(activity)) + if (this.queue.TryAdd(activity, maxSpinCount: 50000)) { if (this.queue.Count >= this.maxExportBatchSize) {
1
// <copyright file="BatchExportActivityProcessor.cs" company="OpenTelemetry Authors"> // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // </copyright> using System; using System.Diagnostics; using System.Threading; using System.Threading.Tasks; using OpenTelemetry.Internal; namespace OpenTelemetry.Trace { /// <summary> /// Implements processor that batches activities before calling exporter. /// </summary> public class BatchExportActivityProcessor : ActivityProcessor { private readonly ActivityExporterSync exporter; private readonly CircularBuffer<Activity> queue; private readonly TimeSpan scheduledDelay; private readonly TimeSpan exporterTimeout; private readonly int maxExportBatchSize; private bool disposed; private long droppedCount = 0; /// <summary> /// Initializes a new instance of the <see cref="BatchExportActivityProcessor"/> class with custom settings. /// </summary> /// <param name="exporter">Exporter instance.</param> /// <param name="maxQueueSize">The maximum queue size. After the size is reached data are dropped. The default value is 2048.</param> /// <param name="scheduledDelayMillis">The delay interval in milliseconds between two consecutive exports. The default value is 5000.</param> /// <param name="exporterTimeoutMillis">How long the export can run before it is cancelled. The default value is 30000.</param> /// <param name="maxExportBatchSize">The maximum batch size of every export. It must be smaller or equal to maxQueueSize. The default value is 512.</param> public BatchExportActivityProcessor( ActivityExporterSync exporter, int maxQueueSize = 2048, int scheduledDelayMillis = 5000, int exporterTimeoutMillis = 30000, int maxExportBatchSize = 512) { if (maxQueueSize <= 0) { throw new ArgumentOutOfRangeException(nameof(maxQueueSize)); } if (maxExportBatchSize <= 0 || maxExportBatchSize > maxQueueSize) { throw new ArgumentOutOfRangeException(nameof(maxExportBatchSize)); } if (scheduledDelayMillis <= 0) { throw new ArgumentOutOfRangeException(nameof(scheduledDelayMillis)); } if (exporterTimeoutMillis < 0) { throw new ArgumentOutOfRangeException(nameof(exporterTimeoutMillis)); } this.exporter = exporter ?? throw new ArgumentNullException(nameof(exporter)); this.queue = new CircularBuffer<Activity>(maxQueueSize); this.scheduledDelay = TimeSpan.FromMilliseconds(scheduledDelayMillis); this.exporterTimeout = TimeSpan.FromMilliseconds(exporterTimeoutMillis); this.maxExportBatchSize = maxExportBatchSize; } /// <summary> /// Gets the number of <see cref="Activity"/> dropped (when the queue is full). /// </summary> internal long DroppedCount { get { return this.droppedCount; } } /// <summary> /// Gets the number of <see cref="Activity"/> received by the processor. /// </summary> internal long ReceivedCount { get { return this.queue.AddedCount + this.DroppedCount; } } /// <summary> /// Gets the number of <see cref="Activity"/> processed by the underlying exporter. /// </summary> internal long ProcessedCount { get { return this.queue.RemovedCount; } } /// <inheritdoc/> public override void OnEnd(Activity activity) { if (this.queue.TryAdd(activity)) { if (this.queue.Count >= this.maxExportBatchSize) { // TODO: signal the exporter } return; // enqueue succeeded } // drop item on the floor Interlocked.Increment(ref this.droppedCount); } /// <inheritdoc/> /// <exception cref="OperationCanceledException">If the <paramref name="cancellationToken"/> is canceled.</exception> public override Task ForceFlushAsync(CancellationToken cancellationToken) { // TODO throw new NotImplementedException(); } /// <inheritdoc/> /// <exception cref="OperationCanceledException">If the <paramref name="cancellationToken"/> is canceled.</exception> public override Task ShutdownAsync(CancellationToken cancellationToken) { // TODO throw new NotImplementedException(); } /// <summary> /// Releases the unmanaged resources used by this class and optionally releases the managed resources. /// </summary> /// <param name="disposing"><see langword="true"/> to release both managed and unmanaged resources; <see langword="false"/> to release only unmanaged resources.</param> protected override void Dispose(bool disposing) { base.Dispose(disposing); if (disposing && !this.disposed) { try { this.exporter.Dispose(); } catch (Exception ex) { OpenTelemetrySdkEventSource.Log.SpanProcessorException(nameof(this.Dispose), ex); } this.disposed = true; } } } }
1
16,290
Do we want to make this configurable?
open-telemetry-opentelemetry-dotnet
.cs
@@ -159,6 +159,17 @@ public final class Require { return number; } + public static double positive(String argName, double number, String message) { + if (number <= 0) { + if (message == null) { + throw new IllegalArgumentException(argName + " must be greater than 0"); + } else { + throw new IllegalArgumentException(message); + } + } + return number; + } + public static int positive(String argName, Integer number) { return positive(argName, number, null); }
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package org.openqa.selenium.internal; import java.io.File; import java.nio.file.Files; import java.nio.file.Path; import java.time.Duration; import java.util.Objects; /** * A utility class to check arguments (preconditions) and state. * <p> * Examples of use: * <pre> * public void setActionWithTimeout(Action action delegate, int timeout) { * this.action = Require.nonNull("Action", action); * this.timeout = Require.positive("Timeout", timeout); * } * </pre> */ public final class Require { private static final String ARG_MUST_BE_SET = "%s must be set"; private static final String MUST_EXIST = "%s must exist: %s"; private static final String MUST_BE_DIR = "%s must be a directory: %s"; private static final String MUST_BE_FILE = "%s must be a regular file: %s"; private Require() { // An utility class } public static void precondition(boolean condition, String message, Object... args) { if (!condition) { throw new IllegalArgumentException(String.format(message, args)); } } public static <T> T nonNull(String argName, T arg) { if (arg == null) { throw new IllegalArgumentException(String.format(ARG_MUST_BE_SET, argName)); } return arg; } public static <T> T nonNull(String argName, T arg, String message, Object... args) { if (arg == null) { throw new IllegalArgumentException( String.join(" ", argName, String.format(message, args))); } return arg; } public static <T> ArgumentChecker<T> argument(String argName, T arg) { return new ArgumentChecker<>(argName, arg); } public static class ArgumentChecker<T> { private final String argName; private final T arg; ArgumentChecker(String argName, T arg) { this.argName = argName; this.arg = arg; } public T nonNull() { if (arg == null) { throw new IllegalArgumentException(String.format(ARG_MUST_BE_SET, argName)); } return arg; } public T nonNull(String message, Object... args) { if (arg == null) { throw new IllegalArgumentException(String.format(message, args)); } return arg; } public T equalTo(Object other) { if (arg == null) { throw new IllegalArgumentException(String.format(ARG_MUST_BE_SET, argName)); } if (!Objects.equals(arg, other)) { throw new IllegalArgumentException(argName + " must be equal to `" + other + "`"); } return arg; } public T instanceOf(Class<?> cls) { if (arg == null) { throw new IllegalArgumentException(String.format(ARG_MUST_BE_SET, argName)); } if (!cls.isInstance(arg)) { throw new IllegalArgumentException(argName + " must be an instance of " + cls); } return arg; } } public static Duration nonNegative(String argName, Duration arg) { if (arg == null) { throw new IllegalArgumentException(String.format(ARG_MUST_BE_SET, argName)); } if (arg.isNegative()) { throw new IllegalArgumentException(argName + " must be set to 0 or more"); } return arg; } public static Duration nonNegative(Duration arg) { if (arg == null) { throw new IllegalArgumentException("Duration must be set"); } if (arg.isNegative()) { throw new IllegalArgumentException("Duration must be set to 0 or more"); } return arg; } public static int nonNegative(String argName, Integer number) { if (number == null) { throw new IllegalArgumentException(String.format(ARG_MUST_BE_SET, argName)); } if (number < 0) { throw new IllegalArgumentException(argName + " cannot be less than 0"); } return number; } public static int positive(String argName, Integer number, String message) { if (number == null) { throw new IllegalArgumentException(String.format(ARG_MUST_BE_SET, argName)); } if (number <= 0) { if (message == null) { throw new IllegalArgumentException(argName + " must be greater than 0"); } else { throw new IllegalArgumentException(message); } } return number; } public static int positive(String argName, Integer number) { return positive(argName, number, null); } public static IntChecker argument(String argName, Integer number) { return new IntChecker(argName, number); } public static class IntChecker { private final String argName; private final Integer number; IntChecker(String argName, Integer number) { this.argName = argName; this.number = number; } public int greaterThan(int max, String message) { if (number == null) { throw new IllegalArgumentException(String.format(ARG_MUST_BE_SET, argName)); } if (number <= max) { throw new IllegalArgumentException(message); } return number; } } public static FileChecker argument(String argName, File file) { return new FileChecker(argName, file); } public static class FileChecker { private final String argName; private final File file; FileChecker(String argName, File file) { this.argName = argName; this.file = file; } public File isFile() { if (file == null) { throw new IllegalArgumentException(String.format(ARG_MUST_BE_SET, argName)); } if (!file.exists()) { throw new IllegalArgumentException( String.format(MUST_EXIST, argName, file.getAbsolutePath())); } if (!file.isFile()) { throw new IllegalArgumentException( String.format(MUST_BE_FILE, argName, file.getAbsolutePath())); } return file; } public File isDirectory() { if (file == null) { throw new IllegalArgumentException(String.format(ARG_MUST_BE_SET, argName)); } if (!file.exists()) { throw new IllegalArgumentException( String.format(MUST_EXIST, argName, file.getAbsolutePath())); } if (!file.isDirectory()) { throw new IllegalArgumentException( String.format(MUST_BE_DIR, argName, file.getAbsolutePath())); } return file; } } public static void stateCondition(boolean state, String message, Object... args) { if (!state) { throw new IllegalStateException(String.format(message, args)); } } public static <T> StateChecker<T> state(String name, T state) { return new StateChecker<>(name, state); } public static class StateChecker<T> { private final String name; private final T state; StateChecker(String name, T state) { this.name = name; this.state = state; } public T nonNull() { if (state == null) { throw new IllegalStateException(name + " must not be null"); } return state; } public T nonNull(String message, Object... args) { if (state == null) { throw new IllegalStateException(String.join(" ", name, String.format(message, args))); } return state; } public T instanceOf(Class<?> cls) { if (state == null) { throw new IllegalStateException(String.format(ARG_MUST_BE_SET, name)); } if (!cls.isInstance(state)) { throw new IllegalStateException(name + " must be an instance of " + cls); } return state; } } public static FileStateChecker state(String name, File file) { return new FileStateChecker(name, file); } public static class FileStateChecker { private final String name; private final File file; FileStateChecker(String name, File file) { this.name = name; this.file = file; } public File isFile() { if (file == null) { throw new IllegalStateException(String.format(ARG_MUST_BE_SET, name)); } if (!file.exists()) { throw new IllegalStateException(String.format(MUST_EXIST, name, file.getAbsolutePath())); } if (!file.isFile()) { throw new IllegalStateException(String.format(MUST_BE_FILE, name, file.getAbsolutePath())); } return file; } public File isDirectory() { if (file == null) { throw new IllegalStateException(String.format(ARG_MUST_BE_SET, name)); } if (!file.exists()) { throw new IllegalStateException(String.format(MUST_EXIST, name, file.getAbsolutePath())); } if (!file.isDirectory()) { throw new IllegalStateException(String.format(MUST_BE_DIR, name, file.getAbsolutePath())); } return file; } } public static PathStateChecker state(String name, Path path) { return new PathStateChecker(name, path); } public static class PathStateChecker { private final String name; private final Path path; PathStateChecker(String name, Path path) { this.name = name; this.path = path; } public Path isFile() { if (path == null) { throw new IllegalStateException(String.format(ARG_MUST_BE_SET, name)); } if (!Files.exists(path)) { throw new IllegalStateException(String.format(MUST_EXIST, name, path)); } if (!Files.isRegularFile(path)) { throw new IllegalStateException(String.format(MUST_BE_FILE, name, path)); } return path; } public Path isDirectory() { if (path == null) { throw new IllegalStateException(String.format(ARG_MUST_BE_SET, name)); } if (!Files.exists(path)) { throw new IllegalStateException(String.format(MUST_EXIST, name, path)); } if (!Files.isDirectory(path)) { throw new IllegalStateException(String.format(MUST_BE_DIR, name, path)); } return path; } } }
1
18,469
Prefer adding a second `positive(String, double)` that delegates down to this three-param version. Using `null` in code is generally Not A Great Idea, and it looks ugly.
SeleniumHQ-selenium
py
@@ -71,4 +71,17 @@ ActiveAdmin.register Proposal do flash[:alert] = "Completed!" redirect_to admin_proposal_path(resource) end + + csv do + proposal_attributes = %w(id status created_at updated_at client_data_type public_id visit_id) + proposal_attributes.each do |proposal_attr| + column(proposal_attr.to_sym) { |proposal| proposal.attributes[proposal_attr] } + end + column(:requester) { |proposal| proposal.requester.display_name } + client_data_attributes = %w(expense_type vendor not_to_exceed building_number emergency rwa_number work_order_code project_title description direct_pay cl_number function_code soc_code ncr_organization_id) + client_data_attributes.each do |data_attr| + column(data_attr.to_sym) { |proposal| proposal.client_data.attributes[data_attr] } + end + column(:approving_offical_name) { |proposal| User.find(proposal.client_data.approving_official_id).display_name } + end end
1
ActiveAdmin.register Proposal do actions :index, :show permit_params :status filter :client_data_type filter :status filter :created_at filter :updated_at index do column :id column :status column :name column :public_id column :requester actions end # /:id page show do attributes_table do row :id row :public_id row :name row :status row :requester row :created_at row :updated_at end panel "Steps" do table_for proposal.individual_steps do |tbl| tbl.column("Position") { |step| link_to step.position, admin_step_path(step) } tbl.column("User") { |step| step.user } tbl.column("Status") { |step| step.status } tbl.column("Created") { |step| step.created_at } tbl.column("Updated") { |step| step.updated_at } tbl.column("Completer") { |step| step.completer } tbl.column("Completed") { |step| step.completed_at } end end end action_item :reindex, only: [:show] do link_to "Re-index", reindex_admin_proposal_path(proposal), "data-method" => :post, title: "Re-index this proposal" end action_item :fully_complete, only: [:show] do link_to "Complete", fully_complete_admin_proposal_path(proposal), "data-method" => :post, title: "Fully complete this proposal" end action_item :fully_complete_no_email, only: [:show] do link_to "Complete without notifications", fully_complete_no_email_admin_proposal_path(proposal), "data-method" => :post, title: "Fully complete this proposal without sending notifications to affected subscribers" end member_action :reindex, method: :post do resource.delay.reindex flash[:alert] = "Re-index scheduled!" redirect_to admin_proposal_path(resource) end member_action :fully_complete, method: :post do resource.fully_complete!(current_user) flash[:alert] = "Completed!" redirect_to admin_proposal_path(resource) end member_action :fully_complete_no_email, method: :post do resource.fully_complete!(current_user, true) flash[:alert] = "Completed!" redirect_to admin_proposal_path(resource) end end
1
18,023
Is this going to break things for 18F proposals, or will these fields just be ignored?
18F-C2
rb
@@ -76,8 +76,9 @@ public class Actions { * Note that the modifier key is <b>never</b> released implicitly - either * <i>keyUp(theKey)</i> or <i>sendKeys(Keys.NULL)</i> * must be called to release the modifier. - * @param theKey Either {@link Keys#SHIFT}, {@link Keys#ALT} or {@link Keys#CONTROL}. If the - * provided key is none of those, {@link IllegalArgumentException} is thrown. + * @param theKey Either {@link Keys#SHIFT}, {@link Keys#ALT}, {@link Keys#CONTROL} + * or {@link Keys#COMMAND}. If the provided key is none of those, + * {@link IllegalArgumentException} is thrown. * @return A self reference. */ public Actions keyDown(Keys theKey) {
1
/* Copyright 2007-2011 Selenium committers Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package org.openqa.selenium.interactions; import org.openqa.selenium.HasInputDevices; import org.openqa.selenium.Keyboard; import org.openqa.selenium.Keys; import org.openqa.selenium.Mouse; import org.openqa.selenium.WebDriver; import org.openqa.selenium.WebElement; import org.openqa.selenium.internal.Locatable; /** * The user-facing API for emulating complex user gestures. Use this class rather than using the * Keyboard or Mouse directly. * * Implements the builder pattern: Builds a CompositeAction containing all actions specified by the * method calls. */ public class Actions { protected Mouse mouse; protected Keyboard keyboard; protected CompositeAction action; /** * Default constructor - uses the default keyboard, mouse implemented by the driver. * @param driver the driver providing the implementations to use. */ public Actions(WebDriver driver) { this(((HasInputDevices) driver).getKeyboard(), ((HasInputDevices) driver).getMouse()); } /** * A constructor that should only be used when the keyboard or mouse were extended to provide * additional functionality (for example, dragging-and-dropping from the desktop). * @param keyboard the {@link Keyboard} implementation to delegate to. * @param mouse the {@link Mouse} implementation to delegate to. */ public Actions(Keyboard keyboard, Mouse mouse) { this.mouse = mouse; this.keyboard = keyboard; resetCompositeAction(); } /** * Only used by the TouchActions class. * @param keyboard implementation to delegate to. */ public Actions(Keyboard keyboard) { this.keyboard = keyboard; resetCompositeAction(); } private void resetCompositeAction() { action = new CompositeAction(); } /** * Performs a modifier key press. Does not release the modifier key - subsequent interactions * may assume it's kept pressed. * Note that the modifier key is <b>never</b> released implicitly - either * <i>keyUp(theKey)</i> or <i>sendKeys(Keys.NULL)</i> * must be called to release the modifier. * @param theKey Either {@link Keys#SHIFT}, {@link Keys#ALT} or {@link Keys#CONTROL}. If the * provided key is none of those, {@link IllegalArgumentException} is thrown. * @return A self reference. */ public Actions keyDown(Keys theKey) { return this.keyDown(null, theKey); } /** * Performs a modifier key press after focusing on an element. Equivalent to: * <i>Actions.click(element).sendKeys(theKey);</i> * @see #keyDown(org.openqa.selenium.Keys) * * @param theKey Either {@link Keys#SHIFT}, {@link Keys#ALT} or {@link Keys#CONTROL}. If the * provided key is none of those, {@link IllegalArgumentException} is thrown. * @return A self reference. */ public Actions keyDown(WebElement element, Keys theKey) { action.addAction(new KeyDownAction(keyboard, mouse, (Locatable) element, theKey)); return this; } /** * Performs a modifier key release. Releasing a non-depressed modifier key will yield undefined * behaviour. * * @param theKey Either {@link Keys#SHIFT}, {@link Keys#ALT} or {@link Keys#CONTROL}. * @return A self reference. */ public Actions keyUp(Keys theKey) { return this.keyUp(null, theKey); } /** * Performs a modifier key release after focusing on an element. Equivalent to: * <i>Actions.click(element).sendKeys(theKey);</i> * @see #keyUp(org.openqa.selenium.Keys) on behaviour regarding non-depressed modifier keys. * * @param theKey Either {@link Keys#SHIFT}, {@link Keys#ALT} or {@link Keys#CONTROL}. * @return A self reference. */ public Actions keyUp(WebElement element, Keys theKey) { action.addAction(new KeyUpAction(keyboard, mouse, (Locatable) element, theKey)); return this; } /** * Sends keys to the active element. This differs from calling * {@link WebElement#sendKeys(CharSequence...)} on the active element in two ways: * <ul> * <li>The modifier keys included in this call are not released.</li> * <li>There is no attempt to re-focus the element - so sendKeys(Keys.TAB) for switching * elements should work. </li> * </ul> * * @see WebElement#sendKeys(CharSequence...) * * @param keysToSend The keys. * @return A self reference. */ public Actions sendKeys(CharSequence... keysToSend) { return this.sendKeys(null, keysToSend); } /** * Equivalent to calling: * <i>Actions.click(element).sendKeys(keysToSend).</i> * This method is different from {@link org.openqa.selenium.WebElement#sendKeys(CharSequence...)} - see * {@link Actions#sendKeys(CharSequence...)} for details how. * * @see #sendKeys(java.lang.CharSequence[]) * * @param element element to focus on. * @param keysToSend The keys. * @return A self reference. */ public Actions sendKeys(WebElement element, CharSequence... keysToSend) { action.addAction(new SendKeysAction(keyboard, mouse, (Locatable) element, keysToSend)); return this; } /** * Clicks (without releasing) in the middle of the given element. This is equivalent to: * <i>Actions.moveToElement(onElement).clickAndHold()</i> * * @param onElement Element to move to and click. * @return A self reference. */ public Actions clickAndHold(WebElement onElement) { action.addAction(new ClickAndHoldAction(mouse, (Locatable) onElement)); return this; } /** * Clicks (without releasing) at the current mouse location. * @return A self reference. */ public Actions clickAndHold() { return this.clickAndHold(null); } /** * Releases the depressed left mouse button, in the middle of the given element. * This is equivalent to: * <i>Actions.moveToElement(onElement).release()</i> * * Invoking this action without invoking {@link #clickAndHold()} first will result in * undefined behaviour. * * @param onElement Element to release the mouse button above. * @return A self reference. */ public Actions release(WebElement onElement) { action.addAction(new ButtonReleaseAction(mouse, (Locatable) onElement)); return this; } /** * Releases the depressed left mouse button at the current mouse location. * @see #release(org.openqa.selenium.WebElement) * @return A self reference. */ public Actions release() { return this.release(null); } /** * Clicks in the middle of the given element. Equivalent to: * <i>Actions.moveToElement(onElement).click()</i> * * @param onElement Element to click. * @return A self reference. */ public Actions click(WebElement onElement) { action.addAction(new ClickAction(mouse, (Locatable) onElement)); return this; } /** * Clicks at the current mouse location. Useful when combined with * {@link #moveToElement(org.openqa.selenium.WebElement, int, int)} or * {@link #moveByOffset(int, int)}. * @return A self reference. */ public Actions click() { return this.click(null); } /** * Performs a double-click at middle of the given element. Equivalent to: * <i>Actions.moveToElement(element).doubleClick()</i> * * @param onElement Element to move to. * @return A self reference. */ public Actions doubleClick(WebElement onElement) { action.addAction(new DoubleClickAction(mouse, (Locatable) onElement)); return this; } /** * Performs a double-click at the current mouse location. * @return A self reference. */ public Actions doubleClick() { return this.doubleClick(null); } /** * Moves the mouse to the middle of the element. The element is scrolled into view and its * location is calculated using getBoundingClientRect. * @param toElement element to move to. * @return A self reference. */ public Actions moveToElement(WebElement toElement) { action.addAction(new MoveMouseAction(mouse, (Locatable) toElement)); return this; } /** * Moves the mouse to an offset from the top-left corner of the element. * The element is scrolled into view and its location is calculated using getBoundingClientRect. * @param toElement element to move to. * @param xOffset Offset from the top-left corner. A negative value means coordinates right from * the element. * @param yOffset Offset from the top-left corner. A negative value means coordinates above * the element. * @return A self reference. */ public Actions moveToElement(WebElement toElement, int xOffset, int yOffset) { action.addAction(new MoveToOffsetAction(mouse, (Locatable) toElement, xOffset, yOffset)); return this; } /** * Moves the mouse from its current position (or 0,0) by the given offset. If the coordinates * provided are outside the viewport (the mouse will end up outside the browser window) then * the viewport is scrolled to match. * @param xOffset horizontal offset. A negative value means moving the mouse left. * @param yOffset vertical offset. A negative value means moving the mouse up. * @return A self reference. * @throws MoveTargetOutOfBoundsException if the provided offset is outside the document's * boundaries. */ public Actions moveByOffset(int xOffset, int yOffset) { action.addAction(new MoveToOffsetAction(mouse, null, xOffset, yOffset)); return this; } /** * Performs a context-click at middle of the given element. First performs a mouseMove * to the location of the element. * * @param onElement Element to move to. * @return A self reference. */ public Actions contextClick(WebElement onElement) { action.addAction(new ContextClickAction(mouse, (Locatable) onElement)); return this; } /** * Performs a context-click at the current mouse location. * @return A self reference. */ public Actions contextClick() { return this.contextClick(null); } /** * A convenience method that performs click-and-hold at the location of the source element, * moves to the location of the target element, then releases the mouse. * * @param source element to emulate button down at. * @param target element to move to and release the mouse at. * @return A self reference. */ public Actions dragAndDrop(WebElement source, WebElement target) { action.addAction(new ClickAndHoldAction(mouse, (Locatable) source)); action.addAction(new MoveMouseAction(mouse, (Locatable) target)); action.addAction(new ButtonReleaseAction(mouse, (Locatable) target)); return this; } /** * A convenience method that performs click-and-hold at the location of the source element, * moves by a given offset, then releases the mouse. * * @param source element to emulate button down at. * @param xOffset horizontal move offset. * @param yOffset vertical move offset. * @return A self reference. */ public Actions dragAndDropBy(WebElement source, int xOffset, int yOffset) { action.addAction(new ClickAndHoldAction(mouse, (Locatable) source)); action.addAction(new MoveToOffsetAction(mouse, null, xOffset, yOffset)); action.addAction(new ButtonReleaseAction(mouse, null)); return this; } /** * Generates a composite action containinig all actions so far, ready to be performed (and * resets the internal builder state, so subsequent calls to build() will contain fresh * sequences). * * @return the composite action */ public Action build() { CompositeAction toReturn = action; resetCompositeAction(); return toReturn; } /** * A convenience method for performing the actions without calling build() first. */ public void perform() { build().perform(); } }
1
10,429
Keys.COMMAND seems to be an alias to Keys.META. That isn't mentioned?
SeleniumHQ-selenium
js
@@ -35,6 +35,12 @@ type AWSClusterProviderConfig struct { // SSHKeyName is the name of the ssh key to attach to the bastion host. SSHKeyName string `json:"sshKeyName,omitempty"` + + // CACertificate is a PEM encoded CA Certificate for the control plane nodes. + CACertificate []byte `json:"caCertificate,omitempty"` + + // CAPrivateKey is a PEM encoded PKCS1 CA PrivateKey for the control plane nodes. + CAPrivateKey []byte `json:"caKey,omitemptuy"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
1
/* Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1alpha1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // AWSClusterProviderConfig is the providerConfig for AWS in the cluster // object // +k8s:openapi-gen=true type AWSClusterProviderConfig struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` // The AWS Region the cluster lives in. Region string `json:"region,omitempty"` // SSHKeyName is the name of the ssh key to attach to the bastion host. SSHKeyName string `json:"sshKeyName,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object func init() { SchemeBuilder.Register(&AWSClusterProviderConfig{}) }
1
7,110
What's the reason for moving this to config from status?
kubernetes-sigs-cluster-api-provider-aws
go
@@ -48,7 +48,7 @@ class GenericSuppressHandler(suppress_handler.SuppressHandler): def store_suppress_bug_id(self, bug_id, file_name, comment): if self.suppress_file is None: - return False + return True ret = suppress_file_handler.write_to_suppress_file(self.suppress_file, bug_id,
1
# ------------------------------------------------------------------------- # The CodeChecker Infrastructure # This file is distributed under the University of Illinois Open Source # License. See LICENSE.TXT for details. # ------------------------------------------------------------------------- """ Handler for suppressing a bug. """ import os from libcodechecker import suppress_file_handler from libcodechecker import suppress_handler from libcodechecker.logger import LoggerFactory # Warning! this logger should only be used in this module. LOG = LoggerFactory.get_new_logger('SUPPRESS') class GenericSuppressHandler(suppress_handler.SuppressHandler): def __init__(self, suppress_file): """ Create a new suppress handler with a suppress_file as backend. """ super(GenericSuppressHandler, self).__init__() self.__suppress_info = [] if suppress_file: self.suppress_file = suppress_file self.__have_memory_backend = True self.__revalidate_suppress_data() else: self.__have_memory_backend = False def __revalidate_suppress_data(self): """Reload the information in the suppress file to the memory.""" if not self.__have_memory_backend: # Do not load and have suppress data stored in memory if not # needed. return with open(self.suppress_file, 'r') as file_handle: self.__suppress_info = suppress_file_handler.\ get_suppress_data(file_handle) def store_suppress_bug_id(self, bug_id, file_name, comment): if self.suppress_file is None: return False ret = suppress_file_handler.write_to_suppress_file(self.suppress_file, bug_id, file_name, comment) self.__revalidate_suppress_data() return ret def remove_suppress_bug_id(self, bug_id, file_name): if self.suppress_file is None: return False ret = suppress_file_handler.remove_from_suppress_file( self.suppress_file, bug_id, file_name) self.__revalidate_suppress_data() return ret def get_suppressed(self, bug): return any([suppress for suppress in self.__suppress_info if suppress[0] == bug.hash_value and suppress[1] == os.path.basename(bug.file_path)])
1
6,404
Shouldn't we re validate/update the in memory suppress data here?
Ericsson-codechecker
c
@@ -68,7 +68,7 @@ func TestPaymentChannelLs(t *testing.T) { t.Run("Works with default payer", func(t *testing.T) { t.Parallel() - payer, err := address.NewFromString(fixtures.TestAddresses[0]) + payer, err := address.NewFromString(fixtures.TestAddresses[2]) require.NoError(err) target, err := address.NewFromString(fixtures.TestAddresses[1]) require.NoError(err)
1
package commands import ( "fmt" "strings" "sync" "testing" "gx/ipfs/QmR8BauakNcBa3RbE4nbQu76PDiJgoQgz8AJdhJuiU4TAw/go-cid" cbor "gx/ipfs/QmcZLyosDwMKdB6NLRsiss9HXzDPhVhhRtPy67JFKTDQDX/go-ipld-cbor" "gx/ipfs/QmPVkJMTeRC6iBByPWdrRkD3BE5UXsj5HPzb4kPqL186mS/testify/assert" "gx/ipfs/QmPVkJMTeRC6iBByPWdrRkD3BE5UXsj5HPzb4kPqL186mS/testify/require" "gx/ipfs/QmekxXDhCxCJRNuzmHreuaT3BsuJcsjcXWNrtV9C8DRHtd/go-multibase" "github.com/filecoin-project/go-filecoin/actor/builtin/paymentbroker" "github.com/filecoin-project/go-filecoin/address" "github.com/filecoin-project/go-filecoin/fixtures" th "github.com/filecoin-project/go-filecoin/testhelpers" "github.com/filecoin-project/go-filecoin/types" ) func TestPaymentChannelCreateSuccess(t *testing.T) { t.Parallel() assert := assert.New(t) d := th.NewDaemon( t, th.WithMiner(fixtures.TestMiners[0]), th.KeyFile(fixtures.KeyFilePaths()[0]), ).Start() defer d.ShutdownSuccess() args := []string{"paych", "create"} args = append(args, "--from", fixtures.TestAddresses[0], "--price", "0", "--limit", "300") args = append(args, fixtures.TestAddresses[1], "10000", "20") paymentChannelCmd := d.RunSuccess(args...) messageCid, err := cid.Parse(strings.Trim(paymentChannelCmd.ReadStdout(), "\n")) require.NoError(t, err) var wg sync.WaitGroup wg.Add(1) go func() { wait := d.RunSuccess("message", "wait", "--return", "--message=false", "--receipt=false", messageCid.String(), ) _, ok := types.NewChannelIDFromString(strings.Trim(wait.ReadStdout(), "\n"), 10) assert.True(ok) wg.Done() }() d.RunSuccess("mining once") wg.Wait() } func TestPaymentChannelLs(t *testing.T) { t.Parallel() assert := assert.New(t) require := require.New(t) t.Run("Works with default payer", func(t *testing.T) { t.Parallel() payer, err := address.NewFromString(fixtures.TestAddresses[0]) require.NoError(err) target, err := address.NewFromString(fixtures.TestAddresses[1]) require.NoError(err) eol := types.NewBlockHeight(20) amt := types.NewAttoFILFromFIL(10000) daemonTestWithPaymentChannel(t, &payer, &target, amt, eol, func(d *th.TestDaemon, channelID *types.ChannelID) { ls := listChannelsAsStrs(d, &payer)[0] assert.Equal(fmt.Sprintf("%s: target: %s, amt: 10000, amt redeemed: 0, eol: 20", channelID, target.String()), ls) }) }) t.Run("Works with specified payer", func(t *testing.T) { t.Parallel() payer, err := address.NewFromString(fixtures.TestAddresses[0]) require.NoError(err) target, err := address.NewFromString(fixtures.TestAddresses[1]) require.NoError(err) eol := types.NewBlockHeight(20) amt := types.NewAttoFILFromFIL(10000) daemonTestWithPaymentChannel(t, &payer, &target, amt, eol, func(d *th.TestDaemon, channelID *types.ChannelID) { args := []string{"paych", "ls"} args = append(args, "--from", target.String()) args = append(args, "--payer", payer.String()) ls := th.RunSuccessLines(d, args...)[0] assert.Equal(fmt.Sprintf("%s: target: %s, amt: 10000, amt redeemed: 0, eol: 20", channelID, target.String()), ls) }) }) t.Run("Notifies when channels not found", func(t *testing.T) { t.Parallel() payer, err := address.NewFromString(fixtures.TestAddresses[0]) require.NoError(err) target, err := address.NewFromString(fixtures.TestAddresses[1]) require.NoError(err) eol := types.NewBlockHeight(20) amt := types.NewAttoFILFromFIL(10000) daemonTestWithPaymentChannel(t, &payer, &target, amt, eol, func(d *th.TestDaemon, channelID *types.ChannelID) { ls := listChannelsAsStrs(d, &target)[0] assert.Equal("no channels", ls) }) }) } func TestPaymentChannelVoucherSuccess(t *testing.T) { t.Parallel() require := require.New(t) payer, err := address.NewFromString(fixtures.TestAddresses[0]) require.NoError(err) target, err := address.NewFromString(fixtures.TestAddresses[1]) require.NoError(err) eol := types.NewBlockHeight(20) amt := types.NewAttoFILFromFIL(10000) daemonTestWithPaymentChannel(t, &payer, &target, amt, eol, func(d *th.TestDaemon, channelID *types.ChannelID) { assert := assert.New(t) voucher := mustCreateVoucher(t, d, channelID, types.NewAttoFILFromFIL(100), &payer) assert.Equal(*types.NewAttoFILFromFIL(100), voucher.Amount) }) } func TestPaymentChannelRedeemSuccess(t *testing.T) { t.Parallel() require := require.New(t) payer, err := address.NewFromString(fixtures.TestAddresses[0]) require.NoError(err) target, err := address.NewFromString(fixtures.TestAddresses[1]) require.NoError(err) eol := types.NewBlockHeight(20) amt := types.NewAttoFILFromFIL(10000) targetDaemon := th.NewDaemon( t, th.WithMiner(fixtures.TestMiners[0]), th.KeyFile(fixtures.KeyFilePaths()[1]), ).Start() defer targetDaemon.ShutdownSuccess() daemonTestWithPaymentChannel(t, &payer, &target, amt, eol, func(d *th.TestDaemon, channelID *types.ChannelID) { assert := assert.New(t) d.ConnectSuccess(targetDaemon) voucher := createVoucherStr(t, d, channelID, types.NewAttoFILFromFIL(111), &payer, uint64(0)) mustRedeemVoucher(t, targetDaemon, voucher, &target) ls := listChannelsAsStrs(targetDaemon, &payer)[0] assert.Equal(fmt.Sprintf("%v: target: %s, amt: 10000, amt redeemed: 111, eol: 20", channelID.String(), target.String()), ls) }) } func TestPaymentChannelRedeemTooEarlyFails(t *testing.T) { t.Parallel() require := require.New(t) payer, err := address.NewFromString(fixtures.TestAddresses[0]) require.NoError(err) target, err := address.NewFromString(fixtures.TestAddresses[1]) require.NoError(err) eol := types.NewBlockHeight(20) amt := types.NewAttoFILFromFIL(10000) targetDaemon := th.NewDaemon( t, th.WithMiner(fixtures.TestMiners[0]), th.KeyFile(fixtures.KeyFilePaths()[1]), ).Start() defer targetDaemon.ShutdownSuccess() daemonTestWithPaymentChannel(t, &payer, &target, amt, eol, func(d *th.TestDaemon, channelID *types.ChannelID) { assert := assert.New(t) d.ConnectSuccess(targetDaemon) voucher := createVoucherStr(t, d, channelID, types.NewAttoFILFromFIL(111), &payer, uint64(8)) // Wait for the voucher message to be processed. mustRedeemVoucher(t, targetDaemon, voucher, &target) ls := listChannelsAsStrs(targetDaemon, &payer)[0] assert.Equal(fmt.Sprintf("%v: target: %s, amt: 10000, amt redeemed: 0, eol: 20", channelID.String(), target.String()), ls) }) } func TestPaymentChannelReclaimSuccess(t *testing.T) { t.Parallel() require := require.New(t) // Initial Balance 10,000 payer, err := address.NewFromString(fixtures.TestAddresses[0]) require.NoError(err) // Initial Balance 50,000 target, err := address.NewFromString(fixtures.TestAddresses[1]) require.NoError(err) // Not used in logic eol := types.NewBlockHeight(5) amt := types.NewAttoFILFromFIL(1000) targetDaemon := th.NewDaemon(t, th.KeyFile(fixtures.KeyFilePaths()[1]), th.WithMiner(fixtures.TestMiners[0])).Start() defer targetDaemon.ShutdownSuccess() daemonTestWithPaymentChannel(t, &payer, &target, amt, eol, func(d *th.TestDaemon, channelID *types.ChannelID) { assert := assert.New(t) d.ConnectSuccess(targetDaemon) // payer creates a voucher to be redeemed by target (off-chain) voucher := createVoucherStr(t, d, channelID, types.NewAttoFILFromFIL(10), &payer, uint64(0)) // target redeems the voucher (on-chain) mustRedeemVoucher(t, targetDaemon, voucher, &target) lsStr := listChannelsAsStrs(targetDaemon, &payer)[0] assert.Equal(fmt.Sprintf("%v: target: %s, amt: 1000, amt redeemed: 10, eol: %s", channelID, target.String(), eol.String()), lsStr) d.RunSuccess("mining once") d.RunSuccess("mining once") // payer reclaims channel funds (on-chain) mustReclaimChannel(t, d, channelID, &payer) lsStr = listChannelsAsStrs(d, &payer)[0] assert.Contains(lsStr, "no channels") args := []string{"wallet", "balance", payer.String()} balStr := th.RunSuccessFirstLine(d, args...) // channel's original locked funds minus the redeemed voucher amount // are returned to the payer assert.Equal("999999999990", balStr) }) } func TestPaymentChannelCloseSuccess(t *testing.T) { require := require.New(t) // Initial Balance 10,000,000 payerA, err := address.NewFromString(fixtures.TestAddresses[0]) require.NoError(err) // Initial Balance 10,000,000 targetA, err := address.NewFromString(fixtures.TestAddresses[1]) require.NoError(err) payer := &payerA target := &targetA eol := types.NewBlockHeight(100) amt := types.NewAttoFILFromFIL(10000) targetDaemon := th.NewDaemon(t, th.KeyFile(fixtures.KeyFilePaths()[1]), th.WithMiner(fixtures.TestMiners[0])).Start() defer targetDaemon.ShutdownSuccess() daemonTestWithPaymentChannel(t, payer, target, amt, eol, func(d *th.TestDaemon, channelID *types.ChannelID) { assert := assert.New(t) d.ConnectSuccess(targetDaemon) // payer creates a voucher to be redeemed by target (off-chain) voucher := mustCreateVoucher(t, d, channelID, types.NewAttoFILFromFIL(10), payer) // target redeems the voucher (on-chain) and simultaneously closes the channel mustCloseChannel(t, targetDaemon, voucher, target) // channel has been closed lsStr := listChannelsAsStrs(targetDaemon, payer)[0] assert.Contains(lsStr, "no channels") // channel's original locked funds minus the redeemed voucher amount // are returned to the payer args := []string{"wallet", "balance", payer.String()} balStr := th.RunSuccessFirstLine(targetDaemon, args...) assert.Equal("999999999990", balStr) // target's balance reflects redeemed voucher args = []string{"wallet", "balance", target.String()} balStr = th.RunSuccessFirstLine(targetDaemon, args...) assert.Equal("1000000000010", balStr) }) } func TestPaymentChannelExtendSuccess(t *testing.T) { t.Parallel() require := require.New(t) payer, err := address.NewFromString(fixtures.TestAddresses[0]) require.NoError(err) target, err := address.NewFromString(fixtures.TestAddresses[1]) require.NoError(err) eol := types.NewBlockHeight(5) amt := types.NewAttoFILFromFIL(2000) daemonTestWithPaymentChannel(t, &payer, &target, amt, eol, func(d *th.TestDaemon, channelID *types.ChannelID) { assert := assert.New(t) extendedEOL := types.NewBlockHeight(6) extendedAmt := types.NewAttoFILFromFIL(3001) lsStr := listChannelsAsStrs(d, &payer)[0] assert.Equal(fmt.Sprintf("%v: target: %s, amt: 2000, amt redeemed: 0, eol: %s", channelID.String(), target.String(), eol.String()), lsStr) mustExtendChannel(t, d, channelID, extendedAmt, extendedEOL, &payer) lsStr = listChannelsAsStrs(d, &payer)[0] assert.Equal(fmt.Sprintf("%v: target: %s, amt: %s, amt redeemed: 0, eol: %s", channelID.String(), target.String(), extendedAmt.Add(amt), extendedEOL), lsStr) }) } func daemonTestWithPaymentChannel(t *testing.T, payerAddress *address.Address, targetAddress *address.Address, fundsToLock *types.AttoFIL, eol *types.BlockHeight, f func(*th.TestDaemon, *types.ChannelID)) { assert := assert.New(t) d := th.NewDaemon( t, th.WithMiner(fixtures.TestMiners[0]), th.KeyFile(fixtures.KeyFilePaths()[0]), ).Start() defer d.ShutdownSuccess() args := []string{"paych", "create"} args = append(args, "--from", payerAddress.String(), "--price", "0", "--limit", "300") args = append(args, targetAddress.String(), fundsToLock.String(), eol.String()) paymentChannelCmd := d.RunSuccess(args...) messageCid, err := cid.Parse(strings.Trim(paymentChannelCmd.ReadStdout(), "\n")) require.NoError(t, err) var wg sync.WaitGroup wg.Add(1) go func() { wait := d.RunSuccess("message", "wait", "--return", "--message=false", "--receipt=false", messageCid.String(), ) stdout := strings.Trim(wait.ReadStdout(), "\n") channelID, ok := types.NewChannelIDFromString(stdout, 10) assert.True(ok) f(d, channelID) wg.Done() }() d.RunSuccess("mining once") wg.Wait() } func mustCreateVoucher(t *testing.T, d *th.TestDaemon, channelID *types.ChannelID, amount *types.AttoFIL, fromAddress *address.Address) paymentbroker.PaymentVoucher { require := require.New(t) voucherString := createVoucherStr(t, d, channelID, amount, fromAddress, uint64(0)) _, cborVoucher, err := multibase.Decode(voucherString) require.NoError(err) var voucher paymentbroker.PaymentVoucher err = cbor.DecodeInto(cborVoucher, &voucher) require.NoError(err) return voucher } func createVoucherStr(t *testing.T, d *th.TestDaemon, channelID *types.ChannelID, amount *types.AttoFIL, payerAddress *address.Address, validAt uint64) string { args := []string{"paych", "voucher", channelID.String(), amount.String()} args = append(args, "--from", payerAddress.String(), "--validat", fmt.Sprintf("%d", validAt)) return th.RunSuccessFirstLine(d, args...) } func listChannelsAsStrs(d *th.TestDaemon, fromAddress *address.Address) []string { args := []string{"paych", "ls"} args = append(args, "--from", fromAddress.String()) return th.RunSuccessLines(d, args...) } func mustExtendChannel(t *testing.T, d *th.TestDaemon, channelID *types.ChannelID, amount *types.AttoFIL, eol *types.BlockHeight, payerAddress *address.Address) { require := require.New(t) args := []string{"paych", "extend"} args = append(args, "--from", payerAddress.String(), "--price", "0", "--limit", "300") args = append(args, channelID.String(), amount.String(), eol.String()) redeemCmd := d.RunSuccess(args...) messageCid, err := cid.Parse(strings.Trim(redeemCmd.ReadStdout(), "\n")) require.NoError(err) var wg sync.WaitGroup wg.Add(1) go func() { _ = d.RunSuccess("message", "wait", "--return=false", "--message=false", "--receipt=false", messageCid.String(), ) wg.Done() }() d.RunSuccess("mining once") wg.Wait() } func mustRedeemVoucher(t *testing.T, d *th.TestDaemon, voucher string, targetAddress *address.Address) { require := require.New(t) args := []string{"paych", "redeem", voucher} args = append(args, "--from", targetAddress.String(), "--price", "0", "--limit", "300") redeemCmd := d.RunSuccess(args...) messageCid, err := cid.Parse(strings.Trim(redeemCmd.ReadStdout(), "\n")) require.NoError(err) var wg sync.WaitGroup wg.Add(1) go func() { _ = d.RunSuccess("message", "wait", "--return=false", "--message=false", "--receipt=false", messageCid.String(), ) wg.Done() }() d.RunSuccess("mining once") wg.Wait() } func mustCloseChannel(t *testing.T, d *th.TestDaemon, voucher paymentbroker.PaymentVoucher, targetAddress *address.Address) { require := require.New(t) args := []string{"paych", "close", mustEncodeVoucherStr(t, voucher)} args = append(args, "--from", targetAddress.String(), "--price", "0", "--limit", "300") redeemCmd := d.RunSuccess(args...) messageCid, err := cid.Parse(strings.Trim(redeemCmd.ReadStdout(), "\n")) require.NoError(err) var wg sync.WaitGroup wg.Add(1) go func() { _ = d.RunSuccess("message", "wait", "--return=false", "--message=false", "--receipt=false", messageCid.String(), ) wg.Done() }() d.RunSuccess("mining once") wg.Wait() } func mustReclaimChannel(t *testing.T, d *th.TestDaemon, channelID *types.ChannelID, payerAddress *address.Address) { require := require.New(t) args := []string{"paych", "reclaim", channelID.String()} args = append(args, "--from", payerAddress.String(), "--price", "0", "--limit", "300") reclaimCmd := d.RunSuccess(args...) messageCid, err := cid.Parse(strings.Trim(reclaimCmd.ReadStdout(), "\n")) require.NoError(err) var wg sync.WaitGroup wg.Add(1) go func() { _ = d.RunSuccess("message", "wait", "--return=false", "--message=false", "--receipt=true", messageCid.String(), ) wg.Done() }() d.RunSuccess("mining once") wg.Wait() } func mustEncodeVoucherStr(t *testing.T, voucher paymentbroker.PaymentVoucher) string { require := require.New(t) bytes, err := cbor.DumpObject(voucher) require.NoError(err) encoded, err := multibase.Encode(multibase.Base58BTC, bytes) require.NoError(err) return encoded }
1
17,304
curious why the renumbering is required here and elsewhere?
filecoin-project-venus
go
@@ -426,9 +426,12 @@ func validateClusterPlatform(path *field.Path, platform hivev1.Platform) field.E if aws := platform.AWS; aws != nil { numberOfPlatforms++ awsPath := path.Child("aws") - if aws.CredentialsSecretRef.Name == "" { + if aws.CredentialsSecretRef.Name == "" && aws.CredentialsAssumeRole == nil { allErrs = append(allErrs, field.Required(awsPath.Child("credentialsSecretRef", "name"), "must specify secrets for AWS access")) } + if aws.CredentialsAssumeRole != nil && aws.CredentialsSecretRef.Name != "" { + allErrs = append(allErrs, field.Required(awsPath.Child("credentialsAssumeRole"), "cannot specify assume role when credentials secret is provided")) + } if aws.Region == "" { allErrs = append(allErrs, field.Required(awsPath.Child("region"), "must specify AWS region")) }
1
package v1 import ( "fmt" "net/http" "reflect" "regexp" "strconv" "strings" log "github.com/sirupsen/logrus" admissionv1beta1 "k8s.io/api/admission/v1beta1" "k8s.io/apimachinery/pkg/api/errors" apivalidation "k8s.io/apimachinery/pkg/api/validation" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation" "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" hivev1 "github.com/openshift/hive/apis/hive/v1" hivev1aws "github.com/openshift/hive/apis/hive/v1/aws" "github.com/openshift/hive/pkg/constants" "github.com/openshift/hive/pkg/controller/awsprivatelink" "github.com/openshift/hive/pkg/manageddns" ) const ( clusterDeploymentGroup = "hive.openshift.io" clusterDeploymentVersion = "v1" clusterDeploymentResource = "clusterdeployments" clusterDeploymentAdmissionGroup = "admission.hive.openshift.io" clusterDeploymentAdmissionVersion = "v1" ) var ( mutableFields = []string{"CertificateBundles", "ClusterMetadata", "ControlPlaneConfig", "Ingress", "Installed", "PreserveOnDelete", "ClusterPoolRef", "PowerState", "HibernateAfter", "InstallAttemptsLimit", "MachineManagement"} ) // ClusterDeploymentValidatingAdmissionHook is a struct that is used to reference what code should be run by the generic-admission-server. type ClusterDeploymentValidatingAdmissionHook struct { decoder *admission.Decoder validManagedDomains []string fs *featureSet awsPrivateLinkConfig *hivev1.AWSPrivateLinkConfig } // NewClusterDeploymentValidatingAdmissionHook constructs a new ClusterDeploymentValidatingAdmissionHook func NewClusterDeploymentValidatingAdmissionHook(decoder *admission.Decoder) *ClusterDeploymentValidatingAdmissionHook { logger := log.WithField("validatingWebhook", "clusterdeployment") managedDomains, err := manageddns.ReadManagedDomainsFile() if err != nil { logger.WithError(err).Fatal("Unable to read managedDomains file") } domains := []string{} for _, md := range managedDomains { domains = append(domains, md.Domains...) } aplConfig, err := awsprivatelink.ReadAWSPrivateLinkControllerConfigFile() if err != nil { logger.WithError(err).Fatal("Unable to read AWS Private Link Config file") } logger.WithField("managedDomains", domains).Info("Read managed domains") return &ClusterDeploymentValidatingAdmissionHook{ decoder: decoder, validManagedDomains: domains, fs: newFeatureSet(), awsPrivateLinkConfig: aplConfig, } } // ValidatingResource is called by generic-admission-server on startup to register the returned REST resource through which the // webhook is accessed by the kube apiserver. // For example, generic-admission-server uses the data below to register the webhook on the REST resource "/apis/admission.hive.openshift.io/v1/clusterdeploymentvalidators". // When the kube apiserver calls this registered REST resource, the generic-admission-server calls the Validate() method below. func (a *ClusterDeploymentValidatingAdmissionHook) ValidatingResource() (plural schema.GroupVersionResource, singular string) { log.WithFields(log.Fields{ "group": clusterDeploymentAdmissionGroup, "version": clusterDeploymentAdmissionVersion, "resource": "clusterdeploymentvalidator", }).Info("Registering validation REST resource") // NOTE: This GVR is meant to be different than the ClusterDeployment CRD GVR which has group "hive.openshift.io". return schema.GroupVersionResource{ Group: clusterDeploymentAdmissionGroup, Version: clusterDeploymentAdmissionVersion, Resource: "clusterdeploymentvalidators", }, "clusterdeploymentvalidator" } // Initialize is called by generic-admission-server on startup to setup any special initialization that your webhook needs. func (a *ClusterDeploymentValidatingAdmissionHook) Initialize(kubeClientConfig *rest.Config, stopCh <-chan struct{}) error { log.WithFields(log.Fields{ "group": clusterDeploymentAdmissionGroup, "version": clusterDeploymentAdmissionVersion, "resource": "clusterdeploymentvalidator", }).Info("Initializing validation REST resource") return nil // No initialization needed right now. } // Validate is called by generic-admission-server when the registered REST resource above is called with an admission request. // Usually it's the kube apiserver that is making the admission validation request. func (a *ClusterDeploymentValidatingAdmissionHook) Validate(admissionSpec *admissionv1beta1.AdmissionRequest) *admissionv1beta1.AdmissionResponse { contextLogger := log.WithFields(log.Fields{ "operation": admissionSpec.Operation, "group": admissionSpec.Resource.Group, "version": admissionSpec.Resource.Version, "resource": admissionSpec.Resource.Resource, "method": "Validate", }) if !a.shouldValidate(admissionSpec) { contextLogger.Info("Skipping validation for request") // The request object isn't something that this validator should validate. // Therefore, we say that it's Allowed. return &admissionv1beta1.AdmissionResponse{ Allowed: true, } } contextLogger.Info("Validating request") switch admissionSpec.Operation { case admissionv1beta1.Create: return a.validateCreate(admissionSpec) case admissionv1beta1.Update: return a.validateUpdate(admissionSpec) case admissionv1beta1.Delete: return a.validateDelete(admissionSpec) default: contextLogger.Info("Successful validation") return &admissionv1beta1.AdmissionResponse{ Allowed: true, } } } // shouldValidate explicitly checks if the request should validated. For example, this webhook may have accidentally been registered to check // the validity of some other type of object with a different GVR. func (a *ClusterDeploymentValidatingAdmissionHook) shouldValidate(admissionSpec *admissionv1beta1.AdmissionRequest) bool { contextLogger := log.WithFields(log.Fields{ "operation": admissionSpec.Operation, "group": admissionSpec.Resource.Group, "version": admissionSpec.Resource.Version, "resource": admissionSpec.Resource.Resource, "method": "shouldValidate", }) if admissionSpec.Resource.Group != clusterDeploymentGroup { contextLogger.Debug("Returning False, not our group") return false } if admissionSpec.Resource.Version != clusterDeploymentVersion { contextLogger.Debug("Returning False, it's our group, but not the right version") return false } if admissionSpec.Resource.Resource != clusterDeploymentResource { contextLogger.Debug("Returning False, it's our group and version, but not the right resource") return false } // If we get here, then we're supposed to validate the object. contextLogger.Debug("Returning True, passed all prerequisites.") return true } // validateCreate specifically validates create operations for ClusterDeployment objects. func (a *ClusterDeploymentValidatingAdmissionHook) validateCreate(admissionSpec *admissionv1beta1.AdmissionRequest) *admissionv1beta1.AdmissionResponse { contextLogger := log.WithFields(log.Fields{ "operation": admissionSpec.Operation, "group": admissionSpec.Resource.Group, "version": admissionSpec.Resource.Version, "resource": admissionSpec.Resource.Resource, "method": "validateCreate", }) if admResp := validatefeatureGates(a.decoder, admissionSpec, a.fs, contextLogger); admResp != nil { contextLogger.Errorf("object was rejected due to feature gate failures") return admResp } cd := &hivev1.ClusterDeployment{} if err := a.decoder.DecodeRaw(admissionSpec.Object, cd); err != nil { contextLogger.Errorf("Failed unmarshaling Object: %v", err.Error()) return &admissionv1beta1.AdmissionResponse{ Allowed: false, Result: &metav1.Status{ Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, Message: err.Error(), }, } } // Add the new data to the contextLogger contextLogger.Data["object.Name"] = cd.Name // TODO: Put Create Validation Here (or in openAPIV3Schema validation section of crd) if len(cd.Name) > validation.DNS1123LabelMaxLength { message := fmt.Sprintf("Invalid cluster deployment name (.meta.name): %s", validation.MaxLenError(validation.DNS1123LabelMaxLength)) contextLogger.Error(message) return &admissionv1beta1.AdmissionResponse{ Allowed: false, Result: &metav1.Status{ Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, Message: message, }, } } if len(cd.Spec.ClusterName) > validation.DNS1123LabelMaxLength { message := fmt.Sprintf("Invalid cluster name (.spec.clusterName): %s", validation.MaxLenError(validation.DNS1123LabelMaxLength)) contextLogger.Error(message) return &admissionv1beta1.AdmissionResponse{ Allowed: false, Result: &metav1.Status{ Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, Message: message, }, } } // validate the ingress if ingressValidationResult := validateIngress(cd, contextLogger); ingressValidationResult != nil { return ingressValidationResult } // validate the certificate bundles if r := validateCertificateBundles(cd, contextLogger); r != nil { return r } if cd.Spec.ManageDNS { if !validateDomain(cd.Spec.BaseDomain, a.validManagedDomains) { message := "The base domain must be a child of one of the managed domains for ClusterDeployments with manageDNS set to true" return &admissionv1beta1.AdmissionResponse{ Allowed: false, Result: &metav1.Status{ Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, Message: message, }, } } } allErrs := field.ErrorList{} specPath := field.NewPath("spec") if !cd.Spec.Installed { if cd.Spec.Provisioning == nil { allErrs = append(allErrs, field.Required(specPath.Child("provisioning"), "provisioning is required if not installed")) } else { if cd.Spec.Provisioning.InstallConfigSecretRef == nil || cd.Spec.Provisioning.InstallConfigSecretRef.Name == "" { // InstallConfigSecretRef is not required for agent install strategy if cd.Spec.Provisioning.InstallStrategy == nil || cd.Spec.Provisioning.InstallStrategy.Agent == nil { allErrs = append(allErrs, field.Required(specPath.Child("provisioning", "installConfigSecretRef", "name"), "must specify an InstallConfig")) } } // validate the agent install strategy: if cd.Spec.Provisioning.InstallStrategy != nil && cd.Spec.Provisioning.InstallStrategy.Agent != nil { allErrs = append(allErrs, validateAgentInstallStrategy(specPath, cd)...) } else if cd.Spec.Platform.AgentBareMetal != nil { // agent bare metal platform can only be used with agent install strategy: allErrs = append(allErrs, field.Forbidden(specPath.Child("platform", "agentBareMetal"), "agent bare metal platform can only be used with agent install strategy")) } } } allErrs = append(allErrs, validateClusterPlatform(specPath.Child("platform"), cd.Spec.Platform)...) allErrs = append(allErrs, validateCanManageDNSForClusterPlatform(specPath, cd.Spec)...) if cd.Spec.Platform.AWS != nil { allErrs = append(allErrs, validateAWSPrivateLink(specPath.Child("platform", "aws"), cd.Spec.Platform.AWS, a.awsPrivateLinkConfig)...) } if cd.Spec.Provisioning != nil { if cd.Spec.Provisioning.SSHPrivateKeySecretRef != nil && cd.Spec.Provisioning.SSHPrivateKeySecretRef.Name == "" { allErrs = append(allErrs, field.Required(specPath.Child("provisioning", "sshPrivateKeySecretRef", "name"), "must specify a name for the ssh private key secret if the ssh private key secret is specified")) } } if poolRef := cd.Spec.ClusterPoolRef; poolRef != nil { if claimName := poolRef.ClaimName; claimName != "" { allErrs = append(allErrs, field.Invalid(specPath.Child("clusterPoolRef", "claimName"), claimName, "cannot create a ClusterDeployment that is already claimed")) } } if machineManagement := cd.Spec.MachineManagement; machineManagement != nil { if targetNamespace := machineManagement.TargetNamespace; targetNamespace != "" { allErrs = append(allErrs, field.Invalid(specPath.Child("machineManagement", "targetNamespace"), targetNamespace, "cannot set targetNamespace during create, targetNamespace is created and set by controllers")) } } if len(allErrs) > 0 { status := errors.NewInvalid(schemaGVK(admissionSpec.Kind).GroupKind(), admissionSpec.Name, allErrs).Status() return &admissionv1beta1.AdmissionResponse{ Allowed: false, Result: &status, } } // If we get here, then all checks passed, so the object is valid. contextLogger.Info("Successful validation") return &admissionv1beta1.AdmissionResponse{ Allowed: true, } } func validateAWSPrivateLink(path *field.Path, platform *hivev1aws.Platform, config *hivev1.AWSPrivateLinkConfig) field.ErrorList { allErrs := field.ErrorList{} pl := platform.PrivateLink if pl == nil || !pl.Enabled { return allErrs } if config == nil || len(config.EndpointVPCInventory) == 0 { allErrs = append(allErrs, field.Forbidden(path.Child("privateLink", "enabled"), "AWS PrivateLink is not supported in the environment")) return allErrs } supportedRegions := sets.NewString() for _, inv := range config.EndpointVPCInventory { supportedRegions.Insert(inv.Region) } if !supportedRegions.Has(platform.Region) { allErrs = append(allErrs, field.Forbidden(path.Child("privateLink", "enabled"), fmt.Sprintf("AWS Private Link is not supported in %s region", platform.Region))) } return allErrs } func validateAgentInstallStrategy(specPath *field.Path, cd *hivev1.ClusterDeployment) field.ErrorList { ais := cd.Spec.Provisioning.InstallStrategy.Agent allErrs := field.ErrorList{} agentPath := specPath.Child("provisioning", "installStrategy", "agent") // agent install strategy can only be used with agent bare metal platform today: if cd.Spec.Platform.AgentBareMetal == nil { allErrs = append(allErrs, field.Forbidden(agentPath, "agent install strategy can only be used with agent bare metal platform")) } // must use either 1 or 3 control plane agents: if ais.ProvisionRequirements.ControlPlaneAgents != 1 && ais.ProvisionRequirements.ControlPlaneAgents != 3 { allErrs = append(allErrs, field.Invalid( agentPath.Child("provisionRequirements", "controlPlaneAgents"), ais.ProvisionRequirements.ControlPlaneAgents, "cluster can only be formed with 1 or 3 control plane agents")) } // must use either 0 or >=2 worker agents due to limitations in assisted service: if ais.ProvisionRequirements.WorkerAgents == 1 { allErrs = append(allErrs, field.Invalid( agentPath.Child("provisionRequirements", "workerAgents"), ais.ProvisionRequirements.WorkerAgents, "cluster can only be formed with 0 or >= 2 worker agents")) } // install config secret ref should not be set for agent installs: if cd.Spec.Provisioning.InstallConfigSecretRef != nil { allErrs = append(allErrs, field.Forbidden(specPath.Child("provisioning", "installConfigSecretRef"), "custom install config cannot be used with agent install strategy")) } return allErrs } func validatefeatureGates(decoder *admission.Decoder, admissionSpec *admissionv1beta1.AdmissionRequest, fs *featureSet, contextLogger *log.Entry) *admissionv1beta1.AdmissionResponse { obj := &unstructured.Unstructured{} if err := decoder.DecodeRaw(admissionSpec.Object, obj); err != nil { contextLogger.Errorf("Failed unmarshaling Object: %v", err.Error()) return &admissionv1beta1.AdmissionResponse{ Allowed: false, Result: &metav1.Status{ Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, Message: err.Error(), }, } } contextLogger.WithField("enabledFeatureGates", fs.Enabled).Info("feature gates enabled") errs := field.ErrorList{} // To add validation for feature gates use these examples // errs = append(errs, equalOnlyWhenFeatureGate(fs, obj, "spec.platform.type", "AlphaPlatformAEnabled", "platformA")...) errs = append(errs, existsOnlyWhenFeatureGate(fs, obj, "spec.provisioning.installStrategy.agent", hivev1.FeatureGateAgentInstallStrategy)...) errs = append(errs, existsOnlyWhenFeatureGate(fs, obj, "spec.machineManagement", hivev1.FeatureGateMachineManagement)...) if len(errs) > 0 && len(errs.ToAggregate().Errors()) > 0 { status := errors.NewInvalid(schemaGVK(admissionSpec.Kind).GroupKind(), admissionSpec.Name, errs).Status() return &admissionv1beta1.AdmissionResponse{ Allowed: false, Result: &status, } } return nil } func validateClusterPlatform(path *field.Path, platform hivev1.Platform) field.ErrorList { allErrs := field.ErrorList{} numberOfPlatforms := 0 if aws := platform.AWS; aws != nil { numberOfPlatforms++ awsPath := path.Child("aws") if aws.CredentialsSecretRef.Name == "" { allErrs = append(allErrs, field.Required(awsPath.Child("credentialsSecretRef", "name"), "must specify secrets for AWS access")) } if aws.Region == "" { allErrs = append(allErrs, field.Required(awsPath.Child("region"), "must specify AWS region")) } } if azure := platform.Azure; azure != nil { numberOfPlatforms++ azurePath := path.Child("azure") if azure.CredentialsSecretRef.Name == "" { allErrs = append(allErrs, field.Required(azurePath.Child("credentialsSecretRef", "name"), "must specify secrets for Azure access")) } if azure.Region == "" { allErrs = append(allErrs, field.Required(azurePath.Child("region"), "must specify Azure region")) } if azure.BaseDomainResourceGroupName == "" { allErrs = append(allErrs, field.Required(azurePath.Child("baseDomainResourceGroupName"), "must specify the Azure resource group for the base domain")) } } if gcp := platform.GCP; gcp != nil { numberOfPlatforms++ gcpPath := path.Child("gcp") if gcp.CredentialsSecretRef.Name == "" { allErrs = append(allErrs, field.Required(gcpPath.Child("credentialsSecretRef", "name"), "must specify secrets for GCP access")) } if gcp.Region == "" { allErrs = append(allErrs, field.Required(gcpPath.Child("region"), "must specify GCP region")) } } if openstack := platform.OpenStack; openstack != nil { numberOfPlatforms++ openstackPath := path.Child("openStack") if openstack.CredentialsSecretRef.Name == "" { allErrs = append(allErrs, field.Required(openstackPath.Child("credentialsSecretRef", "name"), "must specify secrets for OpenStack access")) } if openstack.CertificatesSecretRef != nil && openstack.CertificatesSecretRef.Name == "" { allErrs = append(allErrs, field.Required(openstackPath.Child("certificatesSecretRef", "name"), "must specify name of the secret for OpenStack access")) } if openstack.Cloud == "" { allErrs = append(allErrs, field.Required(openstackPath.Child("cloud"), "must specify cloud section of credentials secret to use")) } } if vsphere := platform.VSphere; vsphere != nil { numberOfPlatforms++ vspherePath := path.Child("vsphere") if vsphere.CredentialsSecretRef.Name == "" { allErrs = append(allErrs, field.Required(vspherePath.Child("credentialsSecretRef", "name"), "must specify secrets for vSphere access")) } if vsphere.CertificatesSecretRef.Name == "" { allErrs = append(allErrs, field.Required(vspherePath.Child("certificatesSecretRef", "name"), "must specify certificates for vSphere access")) } if vsphere.VCenter == "" { allErrs = append(allErrs, field.Required(vspherePath.Child("vCenter"), "must specify vSphere vCenter")) } if vsphere.Datacenter == "" { allErrs = append(allErrs, field.Required(vspherePath.Child("datacenter"), "must specify vSphere datacenter")) } if vsphere.DefaultDatastore == "" { allErrs = append(allErrs, field.Required(vspherePath.Child("defaultDatastore"), "must specify vSphere defaultDatastore")) } } if ovirt := platform.Ovirt; ovirt != nil { numberOfPlatforms++ ovirtPath := path.Child("ovirt") if ovirt.CredentialsSecretRef.Name == "" { allErrs = append(allErrs, field.Required(ovirtPath.Child("credentialsSecretRef", "name"), "must specify secrets for oVirt access")) } if ovirt.CertificatesSecretRef.Name == "" { allErrs = append(allErrs, field.Required(ovirtPath.Child("certificatesSecretRef", "name"), "must specify certificates for oVirt access")) } if ovirt.ClusterID == "" { allErrs = append(allErrs, field.Required(ovirtPath.Child("ovirt_cluster_id"), "must specify ovirt_cluster_id")) } if ovirt.StorageDomainID == "" { allErrs = append(allErrs, field.Required(ovirtPath.Child("ovirt_storage_domain_id"), "must specify ovirt_storage_domain_id")) } } if baremetal := platform.BareMetal; baremetal != nil { numberOfPlatforms++ } if agent := platform.AgentBareMetal; agent != nil { numberOfPlatforms++ } switch { case numberOfPlatforms == 0: allErrs = append(allErrs, field.Required(path, "must specify a platform")) case numberOfPlatforms > 1: allErrs = append(allErrs, field.Invalid(path, platform, "must specify only a single platform")) } return allErrs } func validateCanManageDNSForClusterPlatform(specPath *field.Path, spec hivev1.ClusterDeploymentSpec) field.ErrorList { allErrs := field.ErrorList{} canManageDNS := false if spec.Platform.AWS != nil { canManageDNS = true } if spec.Platform.Azure != nil { canManageDNS = true } if spec.Platform.GCP != nil { canManageDNS = true } if !canManageDNS && spec.ManageDNS { allErrs = append(allErrs, field.Invalid(specPath.Child("manageDNS"), spec.ManageDNS, "cannot manage DNS for the selected platform")) } return allErrs } // validateUpdate specifically validates update operations for ClusterDeployment objects. func (a *ClusterDeploymentValidatingAdmissionHook) validateUpdate(admissionSpec *admissionv1beta1.AdmissionRequest) *admissionv1beta1.AdmissionResponse { contextLogger := log.WithFields(log.Fields{ "operation": admissionSpec.Operation, "group": admissionSpec.Resource.Group, "version": admissionSpec.Resource.Version, "resource": admissionSpec.Resource.Resource, "method": "validateUpdate", }) if admResp := validatefeatureGates(a.decoder, admissionSpec, a.fs, contextLogger); admResp != nil { contextLogger.Errorf("object was rejected due to feature gate failures") return admResp } cd := &hivev1.ClusterDeployment{} if err := a.decoder.DecodeRaw(admissionSpec.Object, cd); err != nil { contextLogger.Errorf("Failed unmarshaling Object: %v", err.Error()) return &admissionv1beta1.AdmissionResponse{ Allowed: false, Result: &metav1.Status{ Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, Message: err.Error(), }, } } // Add the new data to the contextLogger contextLogger.Data["object.Name"] = cd.Name oldObject := &hivev1.ClusterDeployment{} if err := a.decoder.DecodeRaw(admissionSpec.OldObject, oldObject); err != nil { contextLogger.Errorf("Failed unmarshaling OldObject: %v", err.Error()) return &admissionv1beta1.AdmissionResponse{ Allowed: false, Result: &metav1.Status{ Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, Message: err.Error(), }, } } // Add the new data to the contextLogger contextLogger.Data["oldObject.Name"] = oldObject.Name hasChangedImmutableField, changedFieldName := hasChangedImmutableField(&oldObject.Spec, &cd.Spec) if hasChangedImmutableField { message := fmt.Sprintf("Attempted to change ClusterDeployment.Spec.%v. ClusterDeployment.Spec is immutable except for %v", changedFieldName, mutableFields) contextLogger.Infof("Failed validation: %v", message) return &admissionv1beta1.AdmissionResponse{ Allowed: false, Result: &metav1.Status{ Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, Message: message, }, } } // validate the newly incoming ingress if ingressValidationResult := validateIngress(cd, contextLogger); ingressValidationResult != nil { return ingressValidationResult } // Now catch the case where there was a previously defined list and now it's being emptied hasClearedOutPreviouslyDefinedIngressList := hasClearedOutPreviouslyDefinedIngressList(&oldObject.Spec, &cd.Spec) if hasClearedOutPreviouslyDefinedIngressList { message := fmt.Sprintf("Previously defined a list of ingress objects, must provide a default ingress object") contextLogger.Infof("Failed validation: %v", message) return &admissionv1beta1.AdmissionResponse{ Allowed: false, Result: &metav1.Status{ Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, Message: message, }, } } allErrs := field.ErrorList{} specPath := field.NewPath("spec") if cd.Spec.Installed { if cd.Spec.ClusterMetadata != nil { if oldObject.Spec.Installed { allErrs = append(allErrs, apivalidation.ValidateImmutableField(cd.Spec.ClusterMetadata, oldObject.Spec.ClusterMetadata, specPath.Child("clusterMetadata"))...) } } else { allErrs = append(allErrs, field.Required(specPath.Child("clusterMetadata"), "installed cluster must have cluster metadata")) } } else { if oldObject.Spec.Installed { allErrs = append(allErrs, field.Invalid(specPath.Child("installed"), cd.Spec.Installed, "cannot make uninstalled once installed")) } } // Validate the ClusterPoolRef: switch oldPoolRef, newPoolRef := oldObject.Spec.ClusterPoolRef, cd.Spec.ClusterPoolRef; { case oldPoolRef != nil && newPoolRef != nil: allErrs = append(allErrs, apivalidation.ValidateImmutableField(newPoolRef.Namespace, oldPoolRef.Namespace, specPath.Child("clusterPoolRef", "namespace"))...) allErrs = append(allErrs, apivalidation.ValidateImmutableField(newPoolRef.PoolName, oldPoolRef.PoolName, specPath.Child("clusterPoolRef", "poolName"))...) if oldClaim := oldPoolRef.ClaimName; oldClaim != "" { allErrs = append(allErrs, apivalidation.ValidateImmutableField(newPoolRef.ClaimName, oldClaim, specPath.Child("clusterPoolRef", "claimName"))...) } case oldPoolRef != nil && newPoolRef == nil: allErrs = append(allErrs, field.Invalid(specPath.Child("clusterPoolRef"), newPoolRef, "cannot remove clusterPoolRef")) case oldPoolRef == nil && newPoolRef != nil: allErrs = append(allErrs, field.Invalid(specPath.Child("clusterPoolRef"), newPoolRef, "cannot add clusterPoolRef")) } // Validate cd.Spec.MachineManagement.TargetNamespace if cd.Spec.MachineManagement != nil { switch oldTargetNamespace, newTargetNamespace := oldObject.Spec.MachineManagement.TargetNamespace, cd.Spec.MachineManagement.TargetNamespace; { case oldTargetNamespace != "" && newTargetNamespace != "": allErrs = append(allErrs, apivalidation.ValidateImmutableField(cd.Spec.MachineManagement.TargetNamespace, oldObject.Spec.MachineManagement.TargetNamespace, specPath.Child("machineManagement", "targetNamespace"))...) case oldTargetNamespace != "" && newTargetNamespace == "": allErrs = append(allErrs, field.Invalid(specPath.Child("machineManagement", "targetNamespace"), newTargetNamespace, "cannot remove targetNamespace")) } } if len(allErrs) > 0 { contextLogger.WithError(allErrs.ToAggregate()).Info("failed validation") status := errors.NewInvalid(schemaGVK(admissionSpec.Kind).GroupKind(), admissionSpec.Name, allErrs).Status() return &admissionv1beta1.AdmissionResponse{ Allowed: false, Result: &status, } } // If we get here, then all checks passed, so the object is valid. contextLogger.Info("Successful validation") return &admissionv1beta1.AdmissionResponse{ Allowed: true, } } // validateDelete specifically validates delete operations for ClusterDeployment objects. func (a *ClusterDeploymentValidatingAdmissionHook) validateDelete(request *admissionv1beta1.AdmissionRequest) *admissionv1beta1.AdmissionResponse { logger := log.WithFields(log.Fields{ "operation": request.Operation, "group": request.Resource.Group, "version": request.Resource.Version, "resource": request.Resource.Resource, "method": "validateDelete", }) // If running on OpenShift 3.11, OldObject will not be populated. All we can do is accept the DELETE request. if len(request.OldObject.Raw) == 0 { logger.Info("Cannot validate the DELETE since OldObject is empty") return &admissionv1beta1.AdmissionResponse{ Allowed: true, } } oldObject := &hivev1.ClusterDeployment{} if err := a.decoder.DecodeRaw(request.OldObject, oldObject); err != nil { logger.Errorf("Failed unmarshaling Object: %v", err.Error()) return &admissionv1beta1.AdmissionResponse{ Allowed: false, Result: &metav1.Status{ Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, Message: err.Error(), }, } } logger.Data["object.Name"] = oldObject.Name var allErrs field.ErrorList if value, present := oldObject.Annotations[constants.ProtectedDeleteAnnotation]; present { if enabled, err := strconv.ParseBool(value); enabled && err == nil { allErrs = append(allErrs, field.Invalid( field.NewPath("metadata", "annotations", constants.ProtectedDeleteAnnotation), oldObject.Annotations[constants.ProtectedDeleteAnnotation], "cannot delete while annotation is present", )) } else { logger.WithField(constants.ProtectedDeleteAnnotation, value).Info("Protected Delete annotation present but not set to true") } } if len(allErrs) > 0 { logger.WithError(allErrs.ToAggregate()).Info("failed validation") status := errors.NewInvalid(schemaGVK(request.Kind).GroupKind(), request.Name, allErrs).Status() return &admissionv1beta1.AdmissionResponse{ Allowed: false, Result: &status, } } logger.Info("Successful validation") return &admissionv1beta1.AdmissionResponse{ Allowed: true, } } // isFieldMutable says whether the ClusterDeployment.spec field is meant to be mutable or not. func isFieldMutable(value string) bool { for _, mutableField := range mutableFields { if value == mutableField { return true } } return false } // hasChangedImmutableField determines if a ClusterDeployment.spec immutable field was changed. func hasChangedImmutableField(oldObject, cd *hivev1.ClusterDeploymentSpec) (bool, string) { ooElem := reflect.ValueOf(oldObject).Elem() noElem := reflect.ValueOf(cd).Elem() for i := 0; i < ooElem.NumField(); i++ { ooFieldName := ooElem.Type().Field(i).Name ooValue := ooElem.Field(i).Interface() noValue := noElem.Field(i).Interface() if !isFieldMutable(ooFieldName) && !reflect.DeepEqual(ooValue, noValue) { // The field isn't mutable -and- has been changed. DO NOT ALLOW. return true, ooFieldName } } return false, "" } func hasClearedOutPreviouslyDefinedIngressList(oldObject, cd *hivev1.ClusterDeploymentSpec) bool { // We don't allow a ClusterDeployment which had previously defined a list of Ingress objects // to then be cleared out. It either must be cleared from the beginning (ie just use default behavior), // or the ClusterDeployment must continue to define at least the 'default' ingress object. if len(oldObject.Ingress) > 0 && len(cd.Ingress) == 0 { return true } return false } func validateIngressDomainsShareClusterDomain(cd *hivev1.ClusterDeploymentSpec) bool { // ingress entries must share the same domain as the cluster // so watch for an ingress domain ending in: .<clusterName>.<baseDomain> regexString := fmt.Sprintf(`(?i).*\.%s.%s$`, cd.ClusterName, cd.BaseDomain) sharedSubdomain := regexp.MustCompile(regexString) for _, ingress := range cd.Ingress { if !sharedSubdomain.Match([]byte(ingress.Domain)) { return false } } return true } func validateIngressDomainsNotWildcard(cd *hivev1.ClusterDeploymentSpec) bool { // check for domains with leading '*' // the * is unnecessary as the ingress controller assumes a wildcard for _, ingress := range cd.Ingress { if ingress.Domain[0] == '*' { return false } } return true } func validateIngressServingCertificateExists(cd *hivev1.ClusterDeploymentSpec) bool { // Include the empty string in the set of certs so that an ingress with // an empty serving certificate passes. certs := sets.NewString("") for _, cert := range cd.CertificateBundles { certs.Insert(cert.Name) } for _, ingress := range cd.Ingress { if !certs.Has(ingress.ServingCertificate) { return false } } return true } // empty ingress is allowed (for create), but if it's non-zero // it must include an entry for 'default' func validateIngressList(cd *hivev1.ClusterDeploymentSpec) bool { if len(cd.Ingress) == 0 { return true } defaultFound := false for _, ingress := range cd.Ingress { if ingress.Name == "default" { defaultFound = true } } if !defaultFound { return false } return true } func validateDomain(domain string, validDomains []string) bool { matchFound := false for _, validDomain := range validDomains { // Do not allow the base domain to be the same as one of the managed domains. if domain == validDomain { return false } dottedValidDomain := "." + validDomain if !strings.HasSuffix(domain, dottedValidDomain) { continue } childPart := strings.TrimSuffix(domain, dottedValidDomain) if !strings.ContainsRune(childPart, '.') { matchFound = true } } return matchFound } func validateIngress(cd *hivev1.ClusterDeployment, contextLogger *log.Entry) *admissionv1beta1.AdmissionResponse { if !validateIngressList(&cd.Spec) { message := fmt.Sprintf("Ingress list must include a default entry") contextLogger.Infof("Failed validation: %v", message) return &admissionv1beta1.AdmissionResponse{ Allowed: false, Result: &metav1.Status{ Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, Message: message, }, } } if !validateIngressDomainsNotWildcard(&cd.Spec) { message := "Ingress domains must not lead with *" contextLogger.Infof("Failed validation: %v", message) return &admissionv1beta1.AdmissionResponse{ Allowed: false, Result: &metav1.Status{ Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, Message: message, }, } } if !validateIngressDomainsShareClusterDomain(&cd.Spec) { message := "Ingress domains must share the same domain as the cluster" contextLogger.Infof("Failed validation: %v", message) return &admissionv1beta1.AdmissionResponse{ Allowed: false, Result: &metav1.Status{ Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, Message: message, }, } } if !validateIngressServingCertificateExists(&cd.Spec) { message := "Ingress has serving certificate that does not exist in certificate bundle" contextLogger.Infof("Failed validation: %v", message) return &admissionv1beta1.AdmissionResponse{ Allowed: false, Result: &metav1.Status{ Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, Message: message, }, } } // everything passed return nil } func validateCertificateBundles(cd *hivev1.ClusterDeployment, contextLogger *log.Entry) *admissionv1beta1.AdmissionResponse { for _, certBundle := range cd.Spec.CertificateBundles { if certBundle.Name == "" { message := "Certificate bundle is missing a name" contextLogger.Infof("Failed validation: %v", message) return &admissionv1beta1.AdmissionResponse{ Allowed: false, Result: &metav1.Status{ Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, Message: message, }, } } if certBundle.CertificateSecretRef.Name == "" { message := "Certificate bundle is missing a secret reference" contextLogger.Infof("Failed validation: %v", message) return &admissionv1beta1.AdmissionResponse{ Allowed: false, Result: &metav1.Status{ Status: metav1.StatusFailure, Code: http.StatusBadRequest, Reason: metav1.StatusReasonBadRequest, Message: message, }, } } } return nil }
1
17,188
s/must specify secrets/must specify secrets or Role info/
openshift-hive
go
@@ -63,8 +63,8 @@ C2::Application.routes.draw do end mount Peek::Railtie => "/peek" + mount Blazer::Engine, at: "blazer" if Rails.env.development? mount LetterOpenerWeb::Engine => "letter_opener" - mount Blazer::Engine, at: "blazer" end end
1
C2::Application.routes.draw do ActiveAdmin.routes(self) root to: "home#index" get "/error" => "home#error" get "/profile" => "profile#show" post "/profile" => "profile#update" get "/summary" => "summary#index" get "/summary/:fiscal_year" => "summary#index" get "/feedback" => "feedback#index" get "/feedback/thanks" => "feedback#thanks" post "/feedback" => "feedback#create" match "/auth/:provider/callback" => "auth#oauth_callback", via: [:get] get "/auth/failure" => "auth#failure" post "/logout" => "auth#logout" resources :help, only: [:index, :show] # mandrill-rails resource :inbox, controller: "inbox", only: [:show, :create] namespace :api do scope :v1 do namespace :ncr do resources :work_orders, only: [:index] end resources :users, only: [:index] end end resources :proposals, only: [:index, :show] do member do get "approve" # this route has special protection to prevent the confused deputy problem # if you are adding a new controller which performs an action, use post instead post "approve" get "cancel_form" post "cancel" get "history" end collection do get "archive" get "download", defaults: { format: "csv" } get "query" end resources :comments, only: :create resources :attachments, only: [:create, :destroy, :show] resources :observations, only: [:create, :destroy] end resources :reports, only: [:index, :show, :create, :destroy] namespace :ncr do resources :work_orders, except: [:index, :destroy] get "/dashboard" => "dashboard#index" end namespace :gsa18f do resources :procurements, except: [:index, :destroy] get "/dashboard" => "dashboard#index" end mount Peek::Railtie => "/peek" if Rails.env.development? mount LetterOpenerWeb::Engine => "letter_opener" mount Blazer::Engine, at: "blazer" end end
1
16,874
let's leave this as a dev-only feature, and copy prod db to local env when needed. that keeps blazer security issues to a minimum.
18F-C2
rb
@@ -22,7 +22,7 @@ namespace OpenTelemetry.Trace.Samplers public sealed class AlwaysOffActivitySampler : ActivitySampler { /// <inheritdoc /> - public override string Description { get; } = nameof(AlwaysOffActivitySampler); + public override string Description { get; } = "AlwaysOffSampler"; /// <inheritdoc /> public override SamplingResult ShouldSample(in ActivitySamplingParameters samplingParameters)
1
// <copyright file="AlwaysOffActivitySampler.cs" company="OpenTelemetry Authors"> // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // </copyright> namespace OpenTelemetry.Trace.Samplers { /// <summary> /// Sampler implementation which never samples any activity. /// </summary> public sealed class AlwaysOffActivitySampler : ActivitySampler { /// <inheritdoc /> public override string Description { get; } = nameof(AlwaysOffActivitySampler); /// <inheritdoc /> public override SamplingResult ShouldSample(in ActivitySamplingParameters samplingParameters) { return new SamplingResult(false); } } }
1
15,016
We'll be renaming ActivitySampler to Sampler anyway, so this change will be non-required. Prefer to avoid changes here to avoid merge conflict with my PR doing the rename.
open-telemetry-opentelemetry-dotnet
.cs
@@ -278,7 +278,10 @@ func mutateHeadersByRules(headers, rules http.Header, repl httpserver.Replacer) } else if strings.HasPrefix(ruleField, "-") { headers.Del(strings.TrimPrefix(ruleField, "-")) } else if len(ruleValues) > 0 { - headers.Set(ruleField, repl.Replace(ruleValues[len(ruleValues)-1])) + var replacement = repl.Replace(ruleValues[len(ruleValues)-1]) + if len(replacement) > 0 { + headers.Set(ruleField, replacement) + } } } }
1
// Package proxy is middleware that proxies HTTP requests. package proxy import ( "errors" "net" "net/http" "net/url" "strings" "sync/atomic" "time" "github.com/mholt/caddy/caddyhttp/httpserver" ) // Proxy represents a middleware instance that can proxy requests. type Proxy struct { Next httpserver.Handler Upstreams []Upstream } // Upstream manages a pool of proxy upstream hosts. type Upstream interface { // The path this upstream host should be routed on From() string // Selects an upstream host to be routed to. It // should return a suitable upstream host, or nil // if no such hosts are available. Select(*http.Request) *UpstreamHost // Checks if subpath is not an ignored path AllowedPath(string) bool // Gets how long to try selecting upstream hosts // in the case of cascading failures. GetTryDuration() time.Duration // Gets how long to wait between selecting upstream // hosts in the case of cascading failures. GetTryInterval() time.Duration } // UpstreamHostDownFunc can be used to customize how Down behaves. type UpstreamHostDownFunc func(*UpstreamHost) bool // UpstreamHost represents a single proxy upstream type UpstreamHost struct { Conns int64 // must be first field to be 64-bit aligned on 32-bit systems MaxConns int64 Name string // hostname of this upstream host UpstreamHeaders http.Header DownstreamHeaders http.Header FailTimeout time.Duration CheckDown UpstreamHostDownFunc WithoutPathPrefix string ReverseProxy *ReverseProxy Fails int32 Unhealthy bool } // Down checks whether the upstream host is down or not. // Down will try to use uh.CheckDown first, and will fall // back to some default criteria if necessary. func (uh *UpstreamHost) Down() bool { if uh.CheckDown == nil { // Default settings return uh.Unhealthy || uh.Fails > 0 } return uh.CheckDown(uh) } // Full checks whether the upstream host has reached its maximum connections func (uh *UpstreamHost) Full() bool { return uh.MaxConns > 0 && uh.Conns >= uh.MaxConns } // Available checks whether the upstream host is available for proxying to func (uh *UpstreamHost) Available() bool { return !uh.Down() && !uh.Full() } // ServeHTTP satisfies the httpserver.Handler interface. func (p Proxy) ServeHTTP(w http.ResponseWriter, r *http.Request) (int, error) { // start by selecting most specific matching upstream config upstream := p.match(r) if upstream == nil { return p.Next.ServeHTTP(w, r) } // this replacer is used to fill in header field values replacer := httpserver.NewReplacer(r, nil, "") // outreq is the request that makes a roundtrip to the backend outreq := createUpstreamRequest(r) // The keepRetrying function will return true if we should // loop and try to select another host, or false if we // should break and stop retrying. start := time.Now() keepRetrying := func() bool { // if we've tried long enough, break if time.Since(start) >= upstream.GetTryDuration() { return false } // otherwise, wait and try the next available host time.Sleep(upstream.GetTryInterval()) return true } var backendErr error for { // since Select() should give us "up" hosts, keep retrying // hosts until timeout (or until we get a nil host). host := upstream.Select(r) if host == nil { if backendErr == nil { backendErr = errors.New("no hosts available upstream") } if !keepRetrying() { break } continue } if rr, ok := w.(*httpserver.ResponseRecorder); ok && rr.Replacer != nil { rr.Replacer.Set("upstream", host.Name) } proxy := host.ReverseProxy // a backend's name may contain more than just the host, // so we parse it as a URL to try to isolate the host. if nameURL, err := url.Parse(host.Name); err == nil { outreq.Host = nameURL.Host if proxy == nil { proxy = NewSingleHostReverseProxy(nameURL, host.WithoutPathPrefix, http.DefaultMaxIdleConnsPerHost) } // use upstream credentials by default if outreq.Header.Get("Authorization") == "" && nameURL.User != nil { pwd, _ := nameURL.User.Password() outreq.SetBasicAuth(nameURL.User.Username(), pwd) } } else { outreq.Host = host.Name } if proxy == nil { return http.StatusInternalServerError, errors.New("proxy for host '" + host.Name + "' is nil") } // set headers for request going upstream if host.UpstreamHeaders != nil { // modify headers for request that will be sent to the upstream host mutateHeadersByRules(outreq.Header, host.UpstreamHeaders, replacer) if hostHeaders, ok := outreq.Header["Host"]; ok && len(hostHeaders) > 0 { outreq.Host = hostHeaders[len(hostHeaders)-1] } } // prepare a function that will update response // headers coming back downstream var downHeaderUpdateFn respUpdateFn if host.DownstreamHeaders != nil { downHeaderUpdateFn = createRespHeaderUpdateFn(host.DownstreamHeaders, replacer) } // tell the proxy to serve the request atomic.AddInt64(&host.Conns, 1) backendErr = proxy.ServeHTTP(w, outreq, downHeaderUpdateFn) atomic.AddInt64(&host.Conns, -1) // if no errors, we're done here if backendErr == nil { return 0, nil } if _, ok := backendErr.(httpserver.MaxBytesExceeded); ok { return http.StatusRequestEntityTooLarge, backendErr } // failover; remember this failure for some time if // request failure counting is enabled timeout := host.FailTimeout if timeout > 0 { atomic.AddInt32(&host.Fails, 1) go func(host *UpstreamHost, timeout time.Duration) { time.Sleep(timeout) atomic.AddInt32(&host.Fails, -1) }(host, timeout) } // if we've tried long enough, break if !keepRetrying() { break } } return http.StatusBadGateway, backendErr } // match finds the best match for a proxy config based on r. func (p Proxy) match(r *http.Request) Upstream { var u Upstream var longestMatch int for _, upstream := range p.Upstreams { basePath := upstream.From() if !httpserver.Path(r.URL.Path).Matches(basePath) || !upstream.AllowedPath(r.URL.Path) { continue } if len(basePath) > longestMatch { longestMatch = len(basePath) u = upstream } } return u } // createUpstremRequest shallow-copies r into a new request // that can be sent upstream. // // Derived from reverseproxy.go in the standard Go httputil package. func createUpstreamRequest(r *http.Request) *http.Request { outreq := new(http.Request) *outreq = *r // includes shallow copies of maps, but okay // We should set body to nil explicitly if request body is empty. // For server requests the Request Body is always non-nil. if r.ContentLength == 0 { outreq.Body = nil } // Restore URL Path if it has been modified if outreq.URL.RawPath != "" { outreq.URL.Opaque = outreq.URL.RawPath } // Remove hop-by-hop headers to the backend. Especially // important is "Connection" because we want a persistent // connection, regardless of what the client sent to us. This // is modifying the same underlying map from r (shallow // copied above) so we only copy it if necessary. var copiedHeaders bool for _, h := range hopHeaders { if outreq.Header.Get(h) != "" { if !copiedHeaders { outreq.Header = make(http.Header) copyHeader(outreq.Header, r.Header) copiedHeaders = true } outreq.Header.Del(h) } } if clientIP, _, err := net.SplitHostPort(r.RemoteAddr); err == nil { // If we aren't the first proxy, retain prior // X-Forwarded-For information as a comma+space // separated list and fold multiple headers into one. if prior, ok := outreq.Header["X-Forwarded-For"]; ok { clientIP = strings.Join(prior, ", ") + ", " + clientIP } outreq.Header.Set("X-Forwarded-For", clientIP) } return outreq } func createRespHeaderUpdateFn(rules http.Header, replacer httpserver.Replacer) respUpdateFn { return func(resp *http.Response) { mutateHeadersByRules(resp.Header, rules, replacer) } } func mutateHeadersByRules(headers, rules http.Header, repl httpserver.Replacer) { for ruleField, ruleValues := range rules { if strings.HasPrefix(ruleField, "+") { for _, ruleValue := range ruleValues { headers.Add(strings.TrimPrefix(ruleField, "+"), repl.Replace(ruleValue)) } } else if strings.HasPrefix(ruleField, "-") { headers.Del(strings.TrimPrefix(ruleField, "-")) } else if len(ruleValues) > 0 { headers.Set(ruleField, repl.Replace(ruleValues[len(ruleValues)-1])) } } }
1
9,482
This applies only in the "set" headers case, but what about "adding" a header (field prefixed with `+`)?
caddyserver-caddy
go
@@ -13,6 +13,7 @@ package net.sourceforge.pmd; * @version $Revision$, $Date$ * @since August 30, 2002 */ +@Deprecated public class PMDException extends Exception { private static final long serialVersionUID = 6938647389367956874L;
1
/** * BSD-style license; for more info see http://pmd.sourceforge.net/license.html */ package net.sourceforge.pmd; /** * A convenience exception wrapper. Contains the original exception, if any. * Also, contains a severity number (int). Zero implies no severity. The higher * the number the greater the severity. * * @author Donald A. Leckie * @version $Revision$, $Date$ * @since August 30, 2002 */ public class PMDException extends Exception { private static final long serialVersionUID = 6938647389367956874L; private int severity; /** * Creates a new PMD exception with the specified message. * * @param message * the message */ public PMDException(String message) { super(message); } /** * Creates a new PMD exception with the specified message and the given * reason as root cause. * * @param message * the message * @param reason * the root cause */ public PMDException(String message, Exception reason) { super(message, reason); } public void setSeverity(int severity) { this.severity = severity; } public int getSeverity() { return severity; } }
1
17,576
I'm not sure we should deprecate this. I kinda like the idea, that we would provide all exceptions (if we throw any) with a common super type. Of course, the exception should be not a checked exception like this one, but rather a runtime exception. We maybe need additionally an internal exception that we would convert into such a public API exception. But that's out of scope of this PR - that's the question of exception handling.
pmd-pmd
java
@@ -41,6 +41,11 @@ class TabDeletedError(Exception): """Exception raised when _tab_index is called for a deleted tab.""" +class MarkNotSetError(Exception): + + """Exception raised when _tab_index is called for a deleted tab.""" + + class TabbedBrowser(tabwidget.TabWidget): """A TabWidget with QWebViews inside.
1
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2014-2016 Florian Bruhin (The Compiler) <[email protected]> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. """The main tabbed browser widget.""" import functools import collections from PyQt5.QtWidgets import QSizePolicy from PyQt5.QtCore import pyqtSignal, pyqtSlot, QTimer, QUrl from PyQt5.QtGui import QIcon from qutebrowser.config import config from qutebrowser.keyinput import modeman from qutebrowser.mainwindow import tabwidget from qutebrowser.browser import signalfilter, webview from qutebrowser.utils import log, usertypes, utils, qtutils, objreg, urlutils UndoEntry = collections.namedtuple('UndoEntry', ['url', 'history']) class TabDeletedError(Exception): """Exception raised when _tab_index is called for a deleted tab.""" class TabbedBrowser(tabwidget.TabWidget): """A TabWidget with QWebViews inside. Provides methods to manage tabs, convenience methods to interact with the current tab (cur_*) and filters signals to re-emit them when they occurred in the currently visible tab. For all tab-specific signals (cur_*) emitted by a tab, this happens: - the signal gets filtered with _filter_signals and self.cur_* gets emitted if the signal occurred in the current tab. Attributes: search_text/search_flags: Search parameters which are shared between all tabs. _win_id: The window ID this tabbedbrowser is associated with. _filter: A SignalFilter instance. _now_focused: The tab which is focused now. _tab_insert_idx_left: Where to insert a new tab with tabbar -> new-tab-position set to 'left'. _tab_insert_idx_right: Same as above, for 'right'. _undo_stack: List of UndoEntry namedtuples of closed tabs. shutting_down: Whether we're currently shutting down. Signals: cur_progress: Progress of the current tab changed (loadProgress). cur_load_started: Current tab started loading (loadStarted) cur_load_finished: Current tab finished loading (loadFinished) cur_statusbar_message: Current tab got a statusbar message (statusBarMessage) cur_url_text_changed: Current URL text changed. cur_link_hovered: Link hovered in current tab (linkHovered) cur_scroll_perc_changed: Scroll percentage of current tab changed. arg 1: x-position in %. arg 2: y-position in %. cur_load_status_changed: Loading status of current tab changed. close_window: The last tab was closed, close this window. resized: Emitted when the browser window has resized, so the completion widget can adjust its size to it. arg: The new size. current_tab_changed: The current tab changed to the emitted WebView. new_tab: Emits the new WebView and its index when a new tab is opened. """ cur_progress = pyqtSignal(int) cur_load_started = pyqtSignal() cur_load_finished = pyqtSignal(bool) cur_statusbar_message = pyqtSignal(str) cur_url_text_changed = pyqtSignal(str) cur_link_hovered = pyqtSignal(str, str, str) cur_scroll_perc_changed = pyqtSignal(int, int) cur_load_status_changed = pyqtSignal(str) close_window = pyqtSignal() resized = pyqtSignal('QRect') got_cmd = pyqtSignal(str) current_tab_changed = pyqtSignal(webview.WebView) new_tab = pyqtSignal(webview.WebView, int) def __init__(self, win_id, parent=None): super().__init__(win_id, parent) self._win_id = win_id self._tab_insert_idx_left = 0 self._tab_insert_idx_right = -1 self.shutting_down = False self.tabCloseRequested.connect(self.on_tab_close_requested) self.currentChanged.connect(self.on_current_changed) self.cur_load_started.connect(self.on_cur_load_started) self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding) self._undo_stack = [] self._filter = signalfilter.SignalFilter(win_id, self) self._now_focused = None self.search_text = None self.search_flags = 0 objreg.get('config').changed.connect(self.update_favicons) objreg.get('config').changed.connect(self.update_window_title) objreg.get('config').changed.connect(self.update_tab_titles) def __repr__(self): return utils.get_repr(self, count=self.count()) def _tab_index(self, tab): """Get the index of a given tab. Raises TabDeletedError if the tab doesn't exist anymore. """ try: idx = self.indexOf(tab) except RuntimeError as e: log.webview.debug("Got invalid tab ({})!".format(e)) raise TabDeletedError(e) if idx == -1: log.webview.debug("Got invalid tab (index is -1)!") raise TabDeletedError("index is -1!") return idx def widgets(self): """Get a list of open tab widgets. We don't implement this as generator so we can delete tabs while iterating over the list. """ w = [] for i in range(self.count()): w.append(self.widget(i)) return w @config.change_filter('ui', 'window-title-format') def update_window_title(self): """Change the window title to match the current tab.""" idx = self.currentIndex() if idx == -1: # (e.g. last tab removed) log.webview.debug("Not updating window title because index is -1") return tabtitle = self.page_title(idx) widget = self.widget(idx) fields = {} if widget.load_status == webview.LoadStatus.loading: fields['perc'] = '[{}%] '.format(widget.progress) else: fields['perc'] = '' fields['perc_raw'] = widget.progress fields['title'] = tabtitle fields['title_sep'] = ' - ' if tabtitle else '' fields['id'] = self._win_id y = widget.scroll_pos[1] if y <= 0: scroll_pos = 'top' elif y >= 100: scroll_pos = 'bot' else: scroll_pos = '{:2}%'.format(y) fields['scroll_pos'] = scroll_pos fmt = config.get('ui', 'window-title-format') self.window().setWindowTitle(fmt.format(**fields)) def _connect_tab_signals(self, tab): """Set up the needed signals for tab.""" page = tab.page() frame = page.mainFrame() # filtered signals tab.linkHovered.connect( self._filter.create(self.cur_link_hovered, tab)) tab.loadProgress.connect( self._filter.create(self.cur_progress, tab)) frame.loadFinished.connect( self._filter.create(self.cur_load_finished, tab)) frame.loadStarted.connect( self._filter.create(self.cur_load_started, tab)) tab.statusBarMessage.connect( self._filter.create(self.cur_statusbar_message, tab)) tab.scroll_pos_changed.connect( self._filter.create(self.cur_scroll_perc_changed, tab)) tab.scroll_pos_changed.connect(self.on_scroll_pos_changed) tab.url_text_changed.connect( self._filter.create(self.cur_url_text_changed, tab)) tab.load_status_changed.connect( self._filter.create(self.cur_load_status_changed, tab)) tab.url_text_changed.connect( functools.partial(self.on_url_text_changed, tab)) # misc tab.titleChanged.connect( functools.partial(self.on_title_changed, tab)) tab.iconChanged.connect( functools.partial(self.on_icon_changed, tab)) tab.loadProgress.connect( functools.partial(self.on_load_progress, tab)) frame.loadFinished.connect( functools.partial(self.on_load_finished, tab)) frame.loadStarted.connect( functools.partial(self.on_load_started, tab)) page.windowCloseRequested.connect( functools.partial(self.on_window_close_requested, tab)) def current_url(self): """Get the URL of the current tab. Intended to be used from command handlers. Return: The current URL as QUrl. """ widget = self.currentWidget() if widget is None: url = QUrl() else: url = widget.cur_url # It's possible for url to be invalid, but the caller will handle that. qtutils.ensure_valid(url) return url def shutdown(self): """Try to shut down all tabs cleanly.""" self.shutting_down = True for tab in self.widgets(): self._remove_tab(tab) def close_tab(self, tab): """Close a tab. Args: tab: The QWebView to be closed. """ last_close = config.get('tabs', 'last-close') count = self.count() if last_close == 'ignore' and count == 1: return self._remove_tab(tab) if count == 1: # We just closed the last tab above. if last_close == 'close': self.close_window.emit() elif last_close == 'blank': self.openurl(QUrl('about:blank'), newtab=True) elif last_close == 'startpage': url = QUrl(config.get('general', 'startpage')[0]) self.openurl(url, newtab=True) elif last_close == 'default-page': url = config.get('general', 'default-page') self.openurl(url, newtab=True) def _remove_tab(self, tab): """Remove a tab from the tab list and delete it properly. Args: tab: The QWebView to be closed. """ idx = self.indexOf(tab) if idx == -1: raise TabDeletedError("tab {} is not contained in " "TabbedWidget!".format(tab)) if tab is self._now_focused: self._now_focused = None if tab is objreg.get('last-focused-tab', None, scope='window', window=self._win_id): objreg.delete('last-focused-tab', scope='window', window=self._win_id) if tab.cur_url.isValid(): history_data = qtutils.serialize(tab.history()) entry = UndoEntry(tab.cur_url, history_data) self._undo_stack.append(entry) elif tab.cur_url.isEmpty(): # There are some good reasons why an URL could be empty # (target="_blank" with a download, see [1]), so we silently ignore # this. # [1] https://github.com/The-Compiler/qutebrowser/issues/163 pass else: # We display a warnings for URLs which are not empty but invalid - # but we don't return here because we want the tab to close either # way. urlutils.invalid_url_error(self._win_id, tab.cur_url, "saving tab") tab.shutdown() self.removeTab(idx) tab.deleteLater() def undo(self): """Undo removing of a tab.""" # Remove unused tab which may be created after the last tab is closed last_close = config.get('tabs', 'last-close') if last_close in ['blank', 'startpage', 'default-page']: only_one_tab_open = self.count() == 1 no_history = self.widget(0).history().count() == 1 urls = { 'blank': QUrl('about:blank'), 'startpage': QUrl(config.get('general', 'startpage')[0]), 'default-page': config.get('general', 'default-page'), } first_tab_url = self.widget(0).page().mainFrame().requestedUrl() last_close_urlstr = urls[last_close].toString().rstrip('/') first_tab_urlstr = first_tab_url.toString().rstrip('/') last_close_url_used = first_tab_urlstr == last_close_urlstr if only_one_tab_open and no_history and last_close_url_used: self.removeTab(0) url, history_data = self._undo_stack.pop() newtab = self.tabopen(url, background=False) qtutils.deserialize(history_data, newtab.history()) @pyqtSlot('QUrl', bool) def openurl(self, url, newtab): """Open a URL, used as a slot. Args: url: The URL to open as QUrl. newtab: True to open URL in a new tab, False otherwise. """ qtutils.ensure_valid(url) if newtab or self.currentWidget() is None: self.tabopen(url, background=False) else: self.currentWidget().openurl(url) @pyqtSlot(int) def on_tab_close_requested(self, idx): """Close a tab via an index.""" tab = self.widget(idx) if tab is None: log.webview.debug("Got invalid tab {} for index {}!".format( tab, idx)) return self.close_tab(tab) @pyqtSlot(webview.WebView) def on_window_close_requested(self, widget): """Close a tab with a widget given.""" try: self.close_tab(widget) except TabDeletedError: log.webview.debug("Requested to close {!r} which does not " "exist!".format(widget)) @pyqtSlot('QUrl', bool) def tabopen(self, url=None, background=None, explicit=False): """Open a new tab with a given URL. Inner logic for open-tab and open-tab-bg. Also connect all the signals we need to _filter_signals. Args: url: The URL to open as QUrl or None for an empty tab. background: Whether to open the tab in the background. if None, the background-tabs setting decides. explicit: Whether the tab was opened explicitly. If this is set, the new position might be different. With the default settings we handle it like Chromium does: - Tabs from clicked links etc. are to the right of the current. - Explicitly opened tabs are at the very right. Return: The opened WebView instance. """ if url is not None: qtutils.ensure_valid(url) log.webview.debug("Creating new tab with URL {}".format(url)) if config.get('tabs', 'tabs-are-windows') and self.count() > 0: from qutebrowser.mainwindow import mainwindow window = mainwindow.MainWindow() window.show() tabbed_browser = objreg.get('tabbed-browser', scope='window', window=window.win_id) return tabbed_browser.tabopen(url, background, explicit) tab = webview.WebView(self._win_id, self) self._connect_tab_signals(tab) idx = self._get_new_tab_idx(explicit) self.insertTab(idx, tab, "") if url is not None: tab.openurl(url) if background is None: background = config.get('tabs', 'background-tabs') if not background: self.setCurrentWidget(tab) tab.show() self.new_tab.emit(tab, idx) return tab def _get_new_tab_idx(self, explicit): """Get the index of a tab to insert. Args: explicit: Whether the tab was opened explicitly. Return: The index of the new tab. """ if explicit: pos = config.get('tabs', 'new-tab-position-explicit') else: pos = config.get('tabs', 'new-tab-position') if pos == 'left': idx = self._tab_insert_idx_left # On first sight, we'd think we have to decrement # self._tab_insert_idx_left here, as we want the next tab to be # *before* the one we just opened. However, since we opened a tab # *to the left* of the currently focused tab, indices will shift by # 1 automatically. elif pos == 'right': idx = self._tab_insert_idx_right self._tab_insert_idx_right += 1 elif pos == 'first': idx = 0 elif pos == 'last': idx = -1 else: raise ValueError("Invalid new-tab-position '{}'.".format(pos)) log.webview.debug("new-tab-position {} -> opening new tab at {}, " "next left: {} / right: {}".format( pos, idx, self._tab_insert_idx_left, self._tab_insert_idx_right)) return idx @config.change_filter('tabs', 'show-favicons') def update_favicons(self): """Update favicons when config was changed.""" show = config.get('tabs', 'show-favicons') for i, tab in enumerate(self.widgets()): if show: self.setTabIcon(i, tab.icon()) else: self.setTabIcon(i, QIcon()) @pyqtSlot() def on_load_started(self, tab): """Clear icon and update title when a tab started loading. Args: tab: The tab where the signal belongs to. """ try: idx = self._tab_index(tab) except TabDeletedError: # We can get signals for tabs we already deleted... return self.update_tab_title(idx) if tab.keep_icon: tab.keep_icon = False else: self.setTabIcon(idx, QIcon()) if idx == self.currentIndex(): self.update_window_title() @pyqtSlot() def on_cur_load_started(self): """Leave insert/hint mode when loading started.""" modeman.maybe_leave(self._win_id, usertypes.KeyMode.insert, 'load started') modeman.maybe_leave(self._win_id, usertypes.KeyMode.hint, 'load started') @pyqtSlot(webview.WebView, str) def on_title_changed(self, tab, text): """Set the title of a tab. Slot for the titleChanged signal of any tab. Args: tab: The WebView where the title was changed. text: The text to set. """ if not text: log.webview.debug("Ignoring title change to '{}'.".format(text)) return try: idx = self._tab_index(tab) except TabDeletedError: # We can get signals for tabs we already deleted... return log.webview.debug("Changing title for idx {} to '{}'".format( idx, text)) self.set_page_title(idx, text) if idx == self.currentIndex(): self.update_window_title() @pyqtSlot(webview.WebView, str) def on_url_text_changed(self, tab, url): """Set the new URL as title if there's no title yet. Args: tab: The WebView where the title was changed. url: The new URL. """ try: idx = self._tab_index(tab) except TabDeletedError: # We can get signals for tabs we already deleted... return if not self.page_title(idx): self.set_page_title(idx, url) @pyqtSlot(webview.WebView) def on_icon_changed(self, tab): """Set the icon of a tab. Slot for the iconChanged signal of any tab. Args: tab: The WebView where the title was changed. """ if not config.get('tabs', 'show-favicons'): return try: idx = self._tab_index(tab) except TabDeletedError: # We can get signals for tabs we already deleted... return self.setTabIcon(idx, tab.icon()) @pyqtSlot(usertypes.KeyMode) def on_mode_left(self, mode): """Give focus to current tab if command mode was left.""" if mode in (usertypes.KeyMode.command, usertypes.KeyMode.prompt, usertypes.KeyMode.yesno): widget = self.currentWidget() log.modes.debug("Left status-input mode, focusing {!r}".format( widget)) if widget is None: return widget.setFocus() @pyqtSlot(int) def on_current_changed(self, idx): """Set last-focused-tab and leave hinting mode when focus changed.""" if idx == -1 or self.shutting_down: # closing the last tab (before quitting) or shutting down return tab = self.widget(idx) log.modes.debug("Current tab changed, focusing {!r}".format(tab)) tab.setFocus() for mode in (usertypes.KeyMode.hint, usertypes.KeyMode.insert, usertypes.KeyMode.caret, usertypes.KeyMode.passthrough): modeman.maybe_leave(self._win_id, mode, 'tab changed') if self._now_focused is not None: objreg.register('last-focused-tab', self._now_focused, update=True, scope='window', window=self._win_id) self._now_focused = tab self.current_tab_changed.emit(tab) QTimer.singleShot(0, self.update_window_title) self._tab_insert_idx_left = self.currentIndex() self._tab_insert_idx_right = self.currentIndex() + 1 @pyqtSlot() def on_cmd_return_pressed(self): """Set focus when the commandline closes.""" log.modes.debug("Commandline closed, focusing {!r}".format(self)) def on_load_progress(self, tab, perc): """Adjust tab indicator on load progress.""" try: idx = self._tab_index(tab) except TabDeletedError: # We can get signals for tabs we already deleted... return start = config.get('colors', 'tabs.indicator.start') stop = config.get('colors', 'tabs.indicator.stop') system = config.get('colors', 'tabs.indicator.system') color = utils.interpolate_color(start, stop, perc, system) self.set_tab_indicator_color(idx, color) self.update_tab_title(idx) if idx == self.currentIndex(): self.update_window_title() def on_load_finished(self, tab): """Adjust tab indicator when loading finished. We don't take loadFinished's ok argument here as it always seems to be true when the QWebPage has an ErrorPageExtension implemented. See https://github.com/The-Compiler/qutebrowser/issues/84 """ try: idx = self._tab_index(tab) except TabDeletedError: # We can get signals for tabs we already deleted... return if tab.page().error_occurred: color = config.get('colors', 'tabs.indicator.error') else: start = config.get('colors', 'tabs.indicator.start') stop = config.get('colors', 'tabs.indicator.stop') system = config.get('colors', 'tabs.indicator.system') color = utils.interpolate_color(start, stop, 100, system) self.set_tab_indicator_color(idx, color) self.update_tab_title(idx) if idx == self.currentIndex(): self.update_window_title() @pyqtSlot() def on_scroll_pos_changed(self): """Update tab and window title when scroll position changed.""" self.update_window_title() self.update_tab_title(self.currentIndex()) def resizeEvent(self, e): """Extend resizeEvent of QWidget to emit a resized signal afterwards. Args: e: The QResizeEvent """ super().resizeEvent(e) self.resized.emit(self.geometry()) def wheelEvent(self, e): """Override wheelEvent of QWidget to forward it to the focused tab. Args: e: The QWheelEvent """ if self._now_focused is not None: self._now_focused.wheelEvent(e) else: e.ignore()
1
14,524
You'll need to adjust the docstring :wink:
qutebrowser-qutebrowser
py
@@ -81,7 +81,8 @@ void ReaderProxy::start(const ReaderProxyData& reader_attributes) reader_attributes.guid(), reader_attributes.remote_locators().unicast, reader_attributes.remote_locators().multicast, - reader_attributes.m_expectsInlineQos); + reader_attributes.m_expectsInlineQos, + /*reader_attributes.guid().is_on_same_process_as(writer_->getGuid()*/false); is_active_ = true; reader_attributes_ = reader_attributes;
1
// Copyright 2016 Proyectos y Sistemas de Mantenimiento SL (eProsima). // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. /** * @file ReaderProxy.cpp * */ #include <fastrtps/log/Log.h> #include <fastrtps/rtps/history/WriterHistory.h> #include <fastrtps/rtps/writer/ReaderProxy.h> #include <fastrtps/rtps/writer/StatefulWriter.h> #include <fastrtps/rtps/resources/TimedEvent.h> #include <fastrtps/utils/TimeConversion.h> #include <fastrtps/rtps/common/LocatorListComparisons.hpp> #include "../participant/RTPSParticipantImpl.h" #include <mutex> #include <cassert> #include <algorithm> #include "../history/HistoryAttributesExtension.hpp" namespace eprosima { namespace fastrtps { namespace rtps { ReaderProxy::ReaderProxy( const WriterTimes& times, const RemoteLocatorsAllocationAttributes& loc_alloc, StatefulWriter* writer) : is_active_(false) , locator_info_(writer->getRTPSParticipant(), loc_alloc.max_unicast_locators, loc_alloc.max_multicast_locators) , reader_attributes_(loc_alloc.max_unicast_locators, loc_alloc.max_multicast_locators) , writer_(writer) , changes_for_reader_(resource_limits_from_history(writer->mp_history->m_att, 0)) , nack_supression_event_(nullptr) , timers_enabled_(false) , last_acknack_count_(0) , last_nackfrag_count_(0) { nack_supression_event_ = new TimedEvent(writer_->getRTPSParticipant()->getEventResource(), [&](TimedEvent::EventCode code) -> bool { if (TimedEvent::EVENT_SUCCESS == code) { writer_->perform_nack_supression(reader_attributes_.guid()); } return false; }, TimeConv::Time_t2MilliSecondsDouble(times.nackSupressionDuration)); stop(); } ReaderProxy::~ReaderProxy() { if (nack_supression_event_) { delete(nack_supression_event_); nack_supression_event_ = nullptr; } } void ReaderProxy::start(const ReaderProxyData& reader_attributes) { locator_info_.start( reader_attributes.guid(), reader_attributes.remote_locators().unicast, reader_attributes.remote_locators().multicast, reader_attributes.m_expectsInlineQos); is_active_ = true; reader_attributes_ = reader_attributes; timers_enabled_.store(reader_attributes_.m_qos.m_reliability.kind == RELIABLE_RELIABILITY_QOS); logInfo(RTPS_WRITER, "Reader Proxy started"); } bool ReaderProxy::update(const ReaderProxyData& reader_attributes) { if ((reader_attributes_.m_qos == reader_attributes.m_qos) && (reader_attributes_.remote_locators().unicast == reader_attributes.remote_locators().unicast) && (reader_attributes_.remote_locators().multicast == reader_attributes.remote_locators().multicast) && (reader_attributes_.m_expectsInlineQos == reader_attributes.m_expectsInlineQos)) { return false; } reader_attributes_ = reader_attributes; locator_info_.update( reader_attributes.remote_locators().unicast, reader_attributes.remote_locators().multicast, reader_attributes.m_expectsInlineQos); return true; } void ReaderProxy::stop() { locator_info_.stop(reader_attributes_.guid()); is_active_ = false; reader_attributes_.guid(c_Guid_Unknown); disable_timers(); changes_for_reader_.clear(); last_acknack_count_ = 0; last_nackfrag_count_ = 0; changes_low_mark_ = SequenceNumber_t(); } void ReaderProxy::disable_timers() { if (timers_enabled_.exchange(false)) { nack_supression_event_->cancel_timer(); } } void ReaderProxy::update_nack_supression_interval(const Duration_t& interval) { nack_supression_event_->update_interval(interval); } void ReaderProxy::add_change( const ChangeForReader_t& change, bool restart_nack_supression) { if (restart_nack_supression && timers_enabled_.load()) { nack_supression_event_->restart_timer(); } add_change(change); } void ReaderProxy::add_change( const ChangeForReader_t& change, bool restart_nack_supression, const std::chrono::time_point<std::chrono::steady_clock>& max_blocking_time) { if (restart_nack_supression && timers_enabled_) { nack_supression_event_->restart_timer(max_blocking_time); } add_change(change); } void ReaderProxy::add_change( const ChangeForReader_t& change) { assert(change.getSequenceNumber() > changes_low_mark_); assert(changes_for_reader_.empty() ? true : change.getSequenceNumber() > changes_for_reader_.back().getSequenceNumber()); // For best effort readers, changes are acked when being sent if (changes_for_reader_.empty() && change.getStatus() == ACKNOWLEDGED) { changes_low_mark_ = change.getSequenceNumber(); return; } // Irrelevant changes are not added to the collection if (!change.isRelevant()) { return; } if (changes_for_reader_.push_back(change) == nullptr) { // This should never happen assert(false); logError(RTPS_WRITER, "Error adding change " << change.getSequenceNumber() << " to reader proxy " << \ reader_attributes_.guid()); } } bool ReaderProxy::has_changes() const { return !changes_for_reader_.empty(); } bool ReaderProxy::change_is_acked(const SequenceNumber_t& seq_num) const { if (seq_num <= changes_low_mark_ || changes_for_reader_.empty()) { return true; } ChangeConstIterator chit = find_change(seq_num); if (chit == changes_for_reader_.end()) { // There is a hole in changes_for_reader_ // This means a change was removed. // The case is equivalent to the !chit->isRelevant() code below return true; } return !chit->isRelevant() || chit->getStatus() == ACKNOWLEDGED; } void ReaderProxy::acked_changes_set(const SequenceNumber_t& seq_num) { SequenceNumber_t future_low_mark = seq_num; if (seq_num > changes_low_mark_) { ChangeIterator chit = find_change(seq_num, false); changes_for_reader_.erase(changes_for_reader_.begin(), chit); } else { // Special case. Currently only used on Builtin StatefulWriters // after losing lease duration. SequenceNumber_t current_sequence = seq_num; SequenceNumber_t min_sequence = writer_->get_seq_num_min(); if (seq_num < min_sequence) { current_sequence = min_sequence; } future_low_mark = current_sequence; bool should_sort = false; for (; current_sequence <= changes_low_mark_; ++current_sequence) { // Skip all consecutive changes already in the collection ChangeConstIterator it = find_change(current_sequence); while( it != changes_for_reader_.end() && current_sequence <= changes_low_mark_ && it->getSequenceNumber() == current_sequence) { ++current_sequence; ++it; } if (current_sequence <= changes_low_mark_) { CacheChange_t* change = nullptr; if (writer_->mp_history->get_change(current_sequence, writer_->getGuid(), &change)) { should_sort = true; ChangeForReader_t cr(change); cr.setStatus(UNACKNOWLEDGED); changes_for_reader_.push_back(cr); } } } // Keep changes sorted by sequence number if (should_sort) { std::sort(changes_for_reader_.begin(), changes_for_reader_.end(), ChangeForReaderCmp()); } } changes_low_mark_ = future_low_mark - 1; } bool ReaderProxy::requested_changes_set(const SequenceNumberSet_t& seq_num_set) { bool isSomeoneWasSetRequested = false; seq_num_set.for_each([&](SequenceNumber_t sit) { ChangeIterator chit = find_change(sit, true); if (chit != changes_for_reader_.end() && UNACKNOWLEDGED == chit->getStatus()) { chit->setStatus(REQUESTED); chit->markAllFragmentsAsUnsent(); isSomeoneWasSetRequested = true; } }); if (isSomeoneWasSetRequested) { logInfo(RTPS_WRITER, "Requested Changes: " << seq_num_set); } return isSomeoneWasSetRequested; } bool ReaderProxy::set_change_to_status( const SequenceNumber_t& seq_num, ChangeForReaderStatus_t status, bool restart_nack_supression) { if (restart_nack_supression && is_reliable()) { assert(timers_enabled_.load()); nack_supression_event_->restart_timer(); } if (seq_num <= changes_low_mark_) { return false; } ChangeIterator it = find_change(seq_num, true); bool change_was_modified = false; // If the status is UNDERWAY (change was right now sent) and the reader is besteffort, // then the status has to be changed to ACKNOWLEDGED. if(UNDERWAY == status && !is_reliable()) { status = ACKNOWLEDGED; } // If the change following the low mark is acknowledged, low mark is advanced. // Note that this could be the first change in the collection or a hole if the // first unacknowledged change is irrelevant. if (status == ACKNOWLEDGED && seq_num == changes_low_mark_ + 1) { changes_low_mark_ = seq_num; change_was_modified = true; } if (it != changes_for_reader_.end()) { if (status == ACKNOWLEDGED && changes_low_mark_ == seq_num) { // Erase the first change when it is acknowledged assert(it == changes_for_reader_.begin()); changes_for_reader_.erase(it); } else { // Otherwise change status if (it->getStatus() != status) { it->setStatus(status); change_was_modified = true; } } } return change_was_modified; } bool ReaderProxy::mark_fragment_as_sent_for_change( const SequenceNumber_t& seq_num, FragmentNumber_t frag_num, bool& was_last_fragment) { was_last_fragment = false; if (seq_num <= changes_low_mark_) { return false; } bool change_found = false; ChangeIterator it = find_change(seq_num, true); if (it != changes_for_reader_.end()) { change_found = true; it->markFragmentsAsSent(frag_num); was_last_fragment = it->getUnsentFragments().empty(); } return change_found; } bool ReaderProxy::perform_nack_supression() { return convert_status_on_all_changes(UNDERWAY, UNACKNOWLEDGED); } bool ReaderProxy::perform_acknack_response() { return convert_status_on_all_changes(REQUESTED, UNSENT); } bool ReaderProxy::convert_status_on_all_changes( ChangeForReaderStatus_t previous, ChangeForReaderStatus_t next) { assert(previous > next); // NOTE: This is only called for REQUESTED=>UNSENT (acknack response) or // UNDERWAY=>UNACKNOWLEDGED (nack supression) bool at_least_one_modified = false; for(ChangeForReader_t& change : changes_for_reader_) { if (change.getStatus() == previous) { at_least_one_modified = true; change.setStatus(next); } } return at_least_one_modified; } void ReaderProxy::change_has_been_removed(const SequenceNumber_t& seq_num) { // Check sequence number is in the container, because it was not clean up. if (changes_for_reader_.empty() || seq_num < changes_for_reader_.begin()->getSequenceNumber()) { return; } // Element may not be in the container when marked as irrelevant. auto chit = find_change(seq_num); changes_for_reader_.erase(chit); } bool ReaderProxy::has_unacknowledged() const { for (const ChangeForReader_t& it : changes_for_reader_) { if (it.isRelevant() && it.getStatus() == UNACKNOWLEDGED) { return true; } } return false; } bool ReaderProxy::requested_fragment_set( const SequenceNumber_t& seq_num, const FragmentNumberSet_t& frag_set) { // Locate the outbound change referenced by the NACK_FRAG ChangeIterator changeIter = find_change(seq_num, true); if (changeIter == changes_for_reader_.end()) { return false; } changeIter->markFragmentsAsUnsent(frag_set); // If it was UNSENT, we shouldn't switch back to REQUESTED to prevent stalling. if (changeIter->getStatus() != UNSENT) { changeIter->setStatus(REQUESTED); } return true; } bool ReaderProxy::process_nack_frag( const GUID_t& reader_guid, uint32_t nack_count, const SequenceNumber_t& seq_num, const FragmentNumberSet_t& fragments_state) { if (reader_attributes_.guid() == reader_guid) { if (last_nackfrag_count_ < nack_count) { last_nackfrag_count_ = nack_count; if (requested_fragment_set(seq_num, fragments_state)) { return true; } } } return false; } static bool change_less_than_sequence( const ChangeForReader_t& change, const SequenceNumber_t& seq_num) { return change.getSequenceNumber() < seq_num; } ReaderProxy::ChangeIterator ReaderProxy::find_change( const SequenceNumber_t& seq_num, bool exact) { ReaderProxy::ChangeIterator it; ReaderProxy::ChangeIterator end = changes_for_reader_.end(); it = std::lower_bound(changes_for_reader_.begin(), end, seq_num, change_less_than_sequence); return (!exact) ? it : it == end ? it : it->getSequenceNumber() == seq_num ? it : end; } ReaderProxy::ChangeConstIterator ReaderProxy::find_change(const SequenceNumber_t& seq_num) const { ReaderProxy::ChangeConstIterator it; ReaderProxy::ChangeConstIterator end = changes_for_reader_.end(); it = std::lower_bound(changes_for_reader_.begin(), end, seq_num, change_less_than_sequence); return it == end ? it : it->getSequenceNumber() == seq_num ? it : end; } bool ReaderProxy::are_there_gaps() { return (0 < changes_for_reader_.size() && changes_low_mark_ + uint32_t(changes_for_reader_.size()) != changes_for_reader_.rbegin()->getSequenceNumber()); } } // namespace rtps } // namespace fastrtps } // namespace eprosima
1
16,804
Add a TODO explaining why this is being done
eProsima-Fast-DDS
cpp
@@ -26,7 +26,8 @@ goog.provide('Blockly.VariableMap'); -goog.require('Blockly.VariableModel'); +goog.require('Blockly.Events.VarDelete'); +goog.require('Blockly.Events.VarRename'); /** * Class for a variable map. This contains a dictionary data structure with
1
/** * @license * Visual Blocks Editor * * Copyright 2017 Google Inc. * https://developers.google.com/blockly/ * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @fileoverview Object representing a map of variables and their types. * @author [email protected] (Marisa Leung) */ 'use strict'; goog.provide('Blockly.VariableMap'); goog.require('Blockly.VariableModel'); /** * Class for a variable map. This contains a dictionary data structure with * variable types as keys and lists of variables as values. The list of * variables are the type indicated by the key. * @param {!Blockly.Workspace} workspace The workspace this map belongs to. * @constructor */ Blockly.VariableMap = function(workspace) { /** * A map from variable type to list of variable names. The lists contain all * of the named variables in the workspace, including variables * that are not currently in use. * @type {!Object.<string, !Array.<Blockly.VariableModel>>} * @private */ this.variableMap_ = {}; /** * The workspace this map belongs to. * @type {!Blockly.Workspace} */ this.workspace = workspace; }; /** * Clear the variable map. */ Blockly.VariableMap.prototype.clear = function() { this.variableMap_ = new Object(null); }; /* Begin functions for renaming variables. */ /** * Rename the given variable by updating its name in the variable map. * @param {!Blockly.VariableModel} variable Variable to rename. * @param {string} newName New variable name. * @package */ Blockly.VariableMap.prototype.renameVariable = function(variable, newName) { var type = variable.type; var conflictVar = this.getVariable(newName, type); var blocks = this.workspace.getAllBlocks(); Blockly.Events.setGroup(true); try { if (!conflictVar) { this.renameVariableAndUses_(variable, newName, blocks); } else { // We don't want to rename the variable if one with the exact new name // already exists. console.warn('Unexpected conflict when attempting to rename ' + 'variable with name: ' + variable.name + ' and id: ' + variable.getId() + ' to new name: ' + newName + '. A variable with the new name already exists' + ' and has id: ' + conflictVar.getId()); } } finally { Blockly.Events.setGroup(false); } }; /** * Rename a variable by updating its name in the variable map. Identify the * variable to rename with the given ID. * @param {string} id ID of the variable to rename. * @param {string} newName New variable name. */ Blockly.VariableMap.prototype.renameVariableById = function(id, newName) { var variable = this.getVariableById(id); if (!variable) { throw new Error('Tried to rename a variable that didn\'t exist. ID: ' + id); } this.renameVariable(variable, newName); }; /** * Update the name of the given variable and refresh all references to it. * The new name must not conflict with any existing variable names. * @param {!Blockly.VariableModel} variable Variable to rename. * @param {string} newName New variable name. * @param {!Array.<!Blockly.Block>} blocks The list of all blocks in the * workspace. * @private */ Blockly.VariableMap.prototype.renameVariableAndUses_ = function(variable, newName, blocks) { Blockly.Events.fire(new Blockly.Events.VarRename(variable, newName)); variable.name = newName; for (var i = 0; i < blocks.length; i++) { blocks[i].updateVarName(variable); } }; /** * Update the name of the given variable to the same name as an existing * variable. The two variables are coalesced into a single variable with the ID * of the existing variable that was already using newName. * Refresh all references to the variable. * @param {!Blockly.VariableModel} variable Variable to rename. * @param {string} newName New variable name. * @param {!Blockly.VariableModel} conflictVar The variable that was already * using newName. * @param {!Array.<!Blockly.Block>} blocks The list of all blocks in the * workspace. * @private */ Blockly.VariableMap.prototype.renameVariableWithConflict_ = function(variable, newName, conflictVar, blocks) { var type = variable.type; var oldCase = conflictVar.name; if (newName != oldCase) { // Simple rename to change the case and update references. this.renameVariableAndUses_(conflictVar, newName, blocks); } // These blocks now refer to a different variable. // These will fire change events. for (var i = 0; i < blocks.length; i++) { blocks[i].renameVarById(variable.getId(), conflictVar.getId()); } // Finally delete the original variable, which is now unreferenced. Blockly.Events.fire(new Blockly.Events.VarDelete(variable)); // And remove it from the list. var variableList = this.getVariablesOfType(type); var variableIndex = variableList.indexOf(variable); this.variableMap_[type].splice(variableIndex, 1); }; /* End functions for renaming variabless. */ /** * Create a variable with a given name, optional type, and optional id. * @param {!string} name The name of the variable. This must be unique across * each variable type. * @param {?string} opt_type The type of the variable like 'int' or 'string'. * Does not need to be unique. Field_variable can filter variables based on * their type. This will default to '' which is a specific type. * @param {string=} opt_id The unique ID of the variable. This will default to * a UUID. * @return {?Blockly.VariableModel} The newly created variable. */ Blockly.VariableMap.prototype.createVariable = function(name, opt_type, opt_id) { var variable = this.getVariable(name, opt_type); if (variable) { if (opt_id && variable.getId() != opt_id) { throw Error('Variable "' + name + '" is already in use and its id is "' + variable.getId() + '" which conflicts with the passed in ' + 'id, "' + opt_id + '".'); } // The variable already exists and has the same ID. return variable; } if (opt_id && this.getVariableById(opt_id)) { throw Error('Variable id, "' + opt_id + '", is already in use.'); } opt_id = opt_id || Blockly.utils.genUid(); opt_type = opt_type || ''; variable = new Blockly.VariableModel(this.workspace, name, opt_type, opt_id); // If opt_type is not a key, create a new list. if (!this.variableMap_[opt_type]) { this.variableMap_[opt_type] = [variable]; } else { // Else append the variable to the preexisting list. this.variableMap_[opt_type].push(variable); } return variable; }; /* Begin functions for variable deletion. */ /** * Delete a variable. * @param {Blockly.VariableModel} variable Variable to delete. */ Blockly.VariableMap.prototype.deleteVariable = function(variable) { var variableList = this.variableMap_[variable.type]; for (var i = 0, tempVar; tempVar = variableList[i]; i++) { if (tempVar.getId() == variable.getId()) { variableList.splice(i, 1); Blockly.Events.fire(new Blockly.Events.VarDelete(variable)); return; } } }; /** * Delete a variable and all of its uses from this workspace by the passed * in ID. May prompt the user for confirmation. * @param {string} id ID of variable to delete. */ Blockly.VariableMap.prototype.deleteVariableById = function(id) { var variable = this.getVariableById(id); if (variable) { // Check whether this variable is a function parameter before deleting. var variableName = variable.name; var uses = this.getVariableUsesById(id); for (var i = 0, block; block = uses[i]; i++) { if (block.type == Blockly.PROCEDURES_DEFINITION_BLOCK_TYPE || block.type == 'procedures_defreturn') { var procedureName = block.getFieldValue('NAME'); var deleteText = Blockly.Msg.CANNOT_DELETE_VARIABLE_PROCEDURE. replace('%1', variableName). replace('%2', procedureName); Blockly.alert(deleteText); return; } } var map = this; if (uses.length > 1) { // Confirm before deleting multiple blocks. var confirmText = Blockly.Msg.DELETE_VARIABLE_CONFIRMATION. replace('%1', String(uses.length)). replace('%2', variableName); Blockly.confirm(confirmText, function(ok) { if (ok) { map.deleteVariableInternal_(variable, uses); } }); } else { // No confirmation necessary for a single block. map.deleteVariableInternal_(variable, uses); } } else { console.warn("Can't delete non-existent variable: " + id); } }; /** * Deletes a variable and all of its uses from this workspace without asking the * user for confirmation. * @param {!Blockly.VariableModel} variable Variable to delete. * @param {!Array.<!Blockly.Block>} uses An array of uses of the variable. * @private */ Blockly.VariableMap.prototype.deleteVariableInternal_ = function(variable, uses) { var existingGroup = Blockly.Events.getGroup(); if (!existingGroup) { Blockly.Events.setGroup(true); } try { for (var i = 0; i < uses.length; i++) { uses[i].dispose(true, false); } this.deleteVariable(variable); } finally { if (!existingGroup) { Blockly.Events.setGroup(false); } } }; /* End functions for variable deletion. */ /** * Find the variable by the given name and type and return it. Return null if * it is not found. * @param {string} name The name to check for. * @param {string=} opt_type The type of the variable. If not provided it * defaults to the empty string, which is a specific type. * @return {Blockly.VariableModel} The variable with the given name, or null if * it was not found. */ Blockly.VariableMap.prototype.getVariable = function(name, opt_type) { var type = opt_type || ''; var list = this.variableMap_[type]; if (list) { for (var j = 0, variable; variable = list[j]; j++) { if (variable.name == name) { return variable; } } } return null; }; /** * Find the variable by the given ID and return it. Return null if it is not * found. * @param {!string} id The id to check for. * @return {?Blockly.VariableModel} The variable with the given id. */ Blockly.VariableMap.prototype.getVariableById = function(id) { var keys = Object.keys(this.variableMap_); for (var i = 0; i < keys.length; i++ ) { var key = keys[i]; for (var j = 0, variable; variable = this.variableMap_[key][j]; j++) { if (variable.getId() == id) { return variable; } } } return null; }; /** * Get a list containing all of the variables of a specified type. If type is * null, return list of variables with empty string type. * @param {?string} type Type of the variables to find. * @return {!Array.<!Blockly.VariableModel>} The sought after variables of the * passed in type. An empty array if none are found. */ Blockly.VariableMap.prototype.getVariablesOfType = function(type) { type = type || ''; var variable_list = this.variableMap_[type]; if (variable_list) { return variable_list.slice(); } return []; }; /** * Return all variable types. This list always contains the empty string. * @return {!Array.<string>} List of variable types. * @package */ Blockly.VariableMap.prototype.getVariableTypes = function() { var types = Object.keys(this.variableMap_); var hasEmpty = false; for (var i = 0; i < types.length; i++) { if (types[i] == '') { hasEmpty = true; } } if (!hasEmpty) { types.push(''); } return types; }; /** * Return all variables of all types. * @return {!Array.<!Blockly.VariableModel>} List of variable models. */ Blockly.VariableMap.prototype.getAllVariables = function() { var all_variables = []; var keys = Object.keys(this.variableMap_); for (var i = 0; i < keys.length; i++ ) { all_variables = all_variables.concat(this.variableMap_[keys[i]]); } return all_variables; }; /** * Find all the uses of a named variable. * @param {string} id ID of the variable to find. * @return {!Array.<!Blockly.Block>} Array of block usages. */ Blockly.VariableMap.prototype.getVariableUsesById = function(id) { var uses = []; var blocks = this.workspace.getAllBlocks(); // Iterate through every block and check the name. for (var i = 0; i < blocks.length; i++) { var blockVariables = blocks[i].getVarModels(); if (blockVariables) { for (var j = 0; j < blockVariables.length; j++) { if (blockVariables[j].getId() == id) { uses.push(blocks[i]); } } } } return uses; };
1
9,307
This file still uses Blockly.VariableModel...
LLK-scratch-blocks
js
@@ -129,8 +129,10 @@ type ManagedRemoteAccess struct { SSHKeyName *string `json:"sshKeyName,omitempty"` // SourceSecurityGroups specifies which security groups are allowed access - // An empty array opens port 22 to the public internet SourceSecurityGroups []string `json:"sourceSecurityGroups,omitempty"` + + // Public specifies whether to open port 22 to the public internet + Public bool `json:"public,omitempty"` } // AWSManagedMachinePoolStatus defines the observed state of AWSManagedMachinePool
1
/* Copyright 2020 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1alpha3 import ( "fmt" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1alpha3" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" "sigs.k8s.io/cluster-api/errors" ) const ( // ManagedMachinePoolFinalizer allows the controller to clean up resources on delete ManagedMachinePoolFinalizer = "awsmanagedmachinepools.infrastructure.cluster.x-k8s.io" ) // ManagedMachineAMIType specifies which AWS AMI to use for a managed MachinePool type ManagedMachineAMIType string const ( // Al2x86_64 is the default AMI type Al2x86_64 ManagedMachineAMIType = "AL2_x86_64" // Al2x86_64GPU is the x86-64 GPU AMI type Al2x86_64GPU ManagedMachineAMIType = "AL2_x86_64_GPU" // Al2Arm64 is the Arm AMI type Al2Arm64 ManagedMachineAMIType = "AL2_ARM_64" ) var ( // DefaultEKSNodegroupRole is the name of the default IAM role to use for EKS nodegroups // if no other role is supplied in the spec and if iam role creation is not enabled. The default // can be created using clusterawsadm or created manually DefaultEKSNodegroupRole = fmt.Sprintf("eks-nodegroup%s", infrav1.DefaultNameSuffix) ) // AWSManagedMachinePoolSpec defines the desired state of AWSManagedMachinePool type AWSManagedMachinePoolSpec struct { // EKSNodegroupName specifies the name of the nodegroup in AWS // corresponding to this MachinePool. If you don't specify a name // then a default name will be created based on the namespace and // name of the managed machine pool. // +optional EKSNodegroupName string `json:"eksNodegroupName,omitempty"` // SubnetIDs specifies which subnets are used for the // auto scaling group of this nodegroup // +optional SubnetIDs []string `json:"subnetIDs,omitempty"` // AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the // ones added by default. // +optional AdditionalTags infrav1.Tags `json:"additionalTags,omitempty"` // RoleName specifies the name of IAM role for the node group. // If the role is pre-existing we will treat it as unmanaged // and not delete it on deletion. If the EKSEnableIAM feature // flag is true and no name is supplied then a role is created. // +optional RoleName string `json:"roleName,omitempty"` // AMIVersion defines the desired AMI release version. If no version number // is supplied then the latest version for the Kubernetes version // will be used // +kubebuilder:validation:MinLength:=2 // +optional AMIVersion *string `json:"amiVersion,omitempty"` // AMIType defines the AMI type // +kubebuilder:validation:Enum:=AL2_x86_64;AL2_x86_64_GPU;AL2_ARM_64 // +kubebuilder:default:=AL2_x86_64 // +optional AMIType *ManagedMachineAMIType `json:"amiType,omitempty"` // Labels specifies labels for the Kubernetes node objects // +optional Labels map[string]string `json:"labels,omitempty"` // DiskSize specifies the root disk size // +optional DiskSize *int32 `json:"diskSize,omitempty"` // InstanceType specifies the AWS instance type // +optional InstanceType *string `json:"instanceType,omitempty"` // Scaling specifies scaling for the ASG behind this pool // +optional Scaling *ManagedMachinePoolScaling `json:"scaling,omitempty"` // RemoteAccess specifies how machines can be accessed remotely // +optional RemoteAccess *ManagedRemoteAccess `json:"remoteAccess,omitempty"` // ProviderIDList are the provider IDs of instances in the // autoscaling group corresponding to the nodegroup represented by this // machine pool // +optional ProviderIDList []string `json:"providerIDList,omitempty"` } // ManagedMachinePoolScaling specifies scaling options type ManagedMachinePoolScaling struct { MinSize *int32 `json:"minSize,omitempty"` MaxSize *int32 `json:"maxSize,omitempty"` } // ManagedRemoteAccess specifies remote access settings for EC2 instances type ManagedRemoteAccess struct { // SSHKeyName specifies which EC2 SSH key can be used to access machines. // If left empty, the key from the control plane is used. SSHKeyName *string `json:"sshKeyName,omitempty"` // SourceSecurityGroups specifies which security groups are allowed access // An empty array opens port 22 to the public internet SourceSecurityGroups []string `json:"sourceSecurityGroups,omitempty"` } // AWSManagedMachinePoolStatus defines the observed state of AWSManagedMachinePool type AWSManagedMachinePoolStatus struct { // Ready denotes that the AWSManagedMachinePool nodegroup has joined // the cluster // +kubebuilder:default=false Ready bool `json:"ready"` // Replicas is the most recently observed number of replicas. // +optional Replicas int32 `json:"replicas"` // FailureReason will be set in the event that there is a terminal problem // reconciling the MachinePool and will contain a succinct value suitable // for machine interpretation. // // This field should not be set for transitive errors that a controller // faces that are expected to be fixed automatically over // time (like service outages), but instead indicate that something is // fundamentally wrong with the Machine's spec or the configuration of // the controller, and that manual intervention is required. Examples // of terminal errors would be invalid combinations of settings in the // spec, values that are unsupported by the controller, or the // responsible controller itself being critically misconfigured. // // Any transient errors that occur during the reconciliation of MachinePools // can be added as events to the MachinePool object and/or logged in the // controller's output. // +optional FailureReason *errors.MachineStatusError `json:"failureReason,omitempty"` // FailureMessage will be set in the event that there is a terminal problem // reconciling the MachinePool and will contain a more verbose string suitable // for logging and human consumption. // // This field should not be set for transitive errors that a controller // faces that are expected to be fixed automatically over // time (like service outages), but instead indicate that something is // fundamentally wrong with the MachinePool's spec or the configuration of // the controller, and that manual intervention is required. Examples // of terminal errors would be invalid combinations of settings in the // spec, values that are unsupported by the controller, or the // responsible controller itself being critically misconfigured. // // Any transient errors that occur during the reconciliation of MachinePools // can be added as events to the MachinePool object and/or logged in the // controller's output. // +optional FailureMessage *string `json:"failureMessage,omitempty"` // Conditions defines current service state of the managed machine pool // +optional Conditions clusterv1.Conditions `json:"conditions,omitempty"` } // +kubebuilder:object:root=true // +kubebuilder:resource:path=awsmanagedmachinepools,scope=Namespaced,categories=cluster-api // +kubebuilder:storageversion // +kubebuilder:subresource:status // +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready",description="MachinePool ready status" // +kubebuilder:printcolumn:name="Replicas",type="integer",JSONPath=".status.replicas",description="Number of replicas" // AWSManagedMachinePool is the Schema for the awsmanagedmachinepools API type AWSManagedMachinePool struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` Spec AWSManagedMachinePoolSpec `json:"spec,omitempty"` Status AWSManagedMachinePoolStatus `json:"status,omitempty"` } func (r *AWSManagedMachinePool) GetConditions() clusterv1.Conditions { return r.Status.Conditions } func (r *AWSManagedMachinePool) SetConditions(conditions clusterv1.Conditions) { r.Status.Conditions = conditions } // +kubebuilder:object:root=true // AWSManagedMachinePoolList contains a list of AWSManagedMachinePools type AWSManagedMachinePoolList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` Items []AWSManagedMachinePool `json:"items"` } func init() { SchemeBuilder.Register(&AWSManagedMachinePool{}, &AWSManagedMachinePoolList{}) }
1
17,910
Small nit, in the PR description its `publicAccess` but here its `public`. Guessing the preferred naming is public?
kubernetes-sigs-cluster-api-provider-aws
go
@@ -106,10 +106,11 @@ func (c *Variable) Close() error { // Decode is a function type for unmarshaling/decoding bytes into given object. type Decode func([]byte, interface{}) error -// Decoder is a helper for decoding bytes into a particular Go type object. The Variable objects -// produced by a particular driver.Watcher should always contain the same type for Variable.Value -// field. A driver.Watcher can use/construct a Decoder object with an associated type (Type) and -// decoding function (Func) for decoding retrieved bytes into Variable.Value. +// Decoder is a helper for decoding bytes into a particular Go type object. The +// Variable objects produced by a particular driver.Watcher should always +// contain the same type for Variable.Value field. A driver.Watcher can +// use/construct a Decoder object with an associated type (Type) and decoding +// function (Func) for decoding retrieved bytes into Variable.Value. type Decoder struct { Type reflect.Type // Func is a Decode function.
1
// Copyright 2018 The Go Cloud Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limtations under the License. // Package runtimevar provides an interface for reading runtime variables and // ability to detect changes and get updates on those variables. package runtimevar import ( "bytes" "context" "encoding/gob" "encoding/json" "fmt" "reflect" "time" "github.com/google/go-cloud/runtimevar/driver" ) // Snapshot contains a variable and metadata about it. type Snapshot struct { // Value is an object containing a runtime variable The type of // this object is set by the driver and it should always be the same type for the same Variable // object. A driver implementation can provide the ability to configure the object type and a // decoding scheme where variables are stored as bytes in the backend service. Clients // should not mutate this object as it can be accessed by multiple goroutines. Value interface{} // UpdateTime is the time when the last changed was detected. UpdateTime time.Time } // Variable provides the ability to read runtime variables with its blocking Watch method. type Variable struct { watcher driver.Watcher nextCall time.Time prev driver.State } // New constructs a Variable object given a driver.Watcher implementation. func New(w driver.Watcher) *Variable { return &Variable{watcher: w} } // Watch blocks until there are variable changes, the Context's Done channel // closes or a new error occurs. // // If the variable changes, the method returns a Snapshot object containing the // updated value. // // If method returns an error, the returned Snapshot object is a zero value and cannot be used. // // The first call to this method should return the current variable unless there are errors in // retrieving the value. // // Users should not call this method from multiple goroutines as implementations may not guarantee // safety in data access. It is typical to use only one goroutine calling this method in a loop. // // To stop this function from blocking, caller can passed in Context object constructed via // WithCancel and call the cancel function. func (c *Variable) Watch(ctx context.Context) (Snapshot, error) { for { wait := c.nextCall.Sub(time.Now()) if wait > 0 { select { case <-ctx.Done(): return Snapshot{}, ctx.Err() case <-time.After(wait): // Continue. } } cur, wait := c.watcher.WatchVariable(ctx, c.prev) c.nextCall = time.Now().Add(wait) if cur == nil { // No change. continue } // Something new to return! c.prev = cur v, err := cur.Value() if err != nil { // Mask underlying errors. return Snapshot{}, fmt.Errorf("Variable.Watch: %v", err) } return Snapshot{Value: v, UpdateTime: cur.UpdateTime()}, nil } } // Close cleans up any resources used by the Variable object. func (c *Variable) Close() error { return c.watcher.Close() } // Decode is a function type for unmarshaling/decoding bytes into given object. type Decode func([]byte, interface{}) error // Decoder is a helper for decoding bytes into a particular Go type object. The Variable objects // produced by a particular driver.Watcher should always contain the same type for Variable.Value // field. A driver.Watcher can use/construct a Decoder object with an associated type (Type) and // decoding function (Func) for decoding retrieved bytes into Variable.Value. type Decoder struct { Type reflect.Type // Func is a Decode function. Func Decode } // NewDecoder constructs a Decoder for given object that uses the given Decode function. func NewDecoder(obj interface{}, fn Decode) *Decoder { return &Decoder{ Type: reflect.TypeOf(obj), Func: fn, } } // Decode decodes given bytes into an object of type Type using Func. func (d *Decoder) Decode(b []byte) (interface{}, error) { nv := reflect.New(d.Type).Interface() if err := d.Func(b, nv); err != nil { return nil, err } ptr := reflect.ValueOf(nv) return ptr.Elem().Interface(), nil } // Simple Decoder objects. var ( StringDecoder = &Decoder{ Type: reflect.TypeOf(""), Func: stringDecode, } BytesDecoder = &Decoder{ Type: reflect.TypeOf([]byte{}), Func: bytesDecode, } ) // Decode functions. var ( JSONDecode = json.Unmarshal ) // GobDecode gob decodes bytes into given object. func GobDecode(data []byte, obj interface{}) error { return gob.NewDecoder(bytes.NewBuffer(data)).Decode(obj) } func stringDecode(b []byte, obj interface{}) error { // obj is a pointer to a string. v := reflect.ValueOf(obj).Elem() v.SetString(string(b)) return nil } func bytesDecode(b []byte, obj interface{}) error { // obj is a pointer to []byte. v := reflect.ValueOf(obj).Elem() v.SetBytes(b) return nil }
1
11,500
Maybe `driver.Watcher` -> provider (2x)? This is the concrete type, this user doesn't really know anything about the driver.
google-go-cloud
go
@@ -21,12 +21,10 @@ import java.io.IOException; import org.apache.lucene.codecs.DocValuesConsumer; import org.apache.lucene.search.DocIdSetIterator; -import org.apache.lucene.search.SortField; -abstract class DocValuesWriter { - abstract void finish(int numDoc); +abstract class DocValuesWriter<T> { abstract void flush(SegmentWriteState state, Sorter.DocMap sortMap, DocValuesConsumer consumer) throws IOException; - abstract Sorter.DocComparator getDocComparator(int numDoc, SortField sortField) throws IOException; + abstract T getDocValues(); abstract DocIdSetIterator getDocIdSet(); }
1
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.lucene.index; import java.io.IOException; import org.apache.lucene.codecs.DocValuesConsumer; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.SortField; abstract class DocValuesWriter { abstract void finish(int numDoc); abstract void flush(SegmentWriteState state, Sorter.DocMap sortMap, DocValuesConsumer consumer) throws IOException; abstract Sorter.DocComparator getDocComparator(int numDoc, SortField sortField) throws IOException; abstract DocIdSetIterator getDocIdSet(); }
1
33,857
can we remove this since `getDocValues` already returns an iterator? (we might need to do `T extends DocIdSetIterator` above)
apache-lucene-solr
java
@@ -0,0 +1,8 @@ +class String + def fix_encoding_if_invalid! + unless self.valid_encoding? + self.encode!('utf-8', 'binary', invalid: :replace, undef: :replace) + end + self + end +end
1
1
6,783
Don't check this in. This is already implemented in core_extensions/ruby/string.rb
blackducksoftware-ohloh-ui
rb
@@ -277,7 +277,7 @@ Block.prototype._getHash = function() { var idProperty = { configurable: false, - writeable: false, + enumerable: true, /** * @returns {string} - The big endian hash buffer of the header */
1
'use strict'; var _ = require('lodash'); var BlockHeader = require('./blockheader'); var BN = require('../crypto/bn'); var BufferUtil = require('../util/buffer'); var BufferReader = require('../encoding/bufferreader'); var BufferWriter = require('../encoding/bufferwriter'); var Hash = require('../crypto/hash'); var JSUtil = require('../util/js'); var Transaction = require('../transaction'); var $ = require('../util/preconditions'); /** * Instantiate a Block from a Buffer, JSON object, or Object with * the properties of the Block * * @param {*} - A Buffer, JSON string, or Object * @returns {Block} * @constructor */ function Block(arg) { if (!(this instanceof Block)) { return new Block(arg); } _.extend(this, Block._from(arg)); return this; } // https://github.com/bitcoin/bitcoin/blob/b5fa132329f0377d787a4a21c1686609c2bfaece/src/primitives/block.h#L14 Block.MAX_BLOCK_SIZE = 1000000; /** * @param {*} - A Buffer, JSON string or Object * @returns {Object} - An object representing block data * @throws {TypeError} - If the argument was not recognized * @private */ Block._from = function _from(arg) { var info = {}; if (BufferUtil.isBuffer(arg)) { info = Block._fromBufferReader(BufferReader(arg)); } else if (JSUtil.isValidJSON(arg)) { info = Block._fromJSON(arg); } else if (_.isObject(arg)) { info = Block._fromObject(arg); } else { throw new TypeError('Unrecognized argument for Block'); } return info; }; /** * @param {String} - A JSON string * @returns {Object} - An object representing block data * @private */ Block._fromJSON = function _fromJSON(data) { $.checkArgument(JSUtil.isValidJSON(data), 'data must be valid JSON'); data = JSON.parse(data); return Block._fromObject(data); }; /** * @param {Object} - A plain javascript object * @returns {Object} - An object representing block data * @private */ Block._fromObject = function _fromObject(data) { var transactions = []; data.transactions.forEach(function(tx) { transactions.push(Transaction().fromJSON(tx)); }); var info = { header: BlockHeader.fromObject(data.header), transactions: transactions }; return info; }; /** * @param {String} - A JSON string * @returns {Block} - An instance of block */ Block.fromJSON = function fromJSON(json) { var info = Block._fromJSON(json); return new Block(info); }; /** * @param {Object} - A plain javascript object * @returns {Block} - An instance of block */ Block.fromObject = function fromObject(obj) { var info = Block._fromObject(obj); return new Block(info); }; /** * @param {BufferReader} - Block data * @returns {Object} - An object representing the block data * @private */ Block._fromBufferReader = function _fromBufferReader(br) { var info = {}; $.checkState(!br.finished(), 'No block data received'); info.header = BlockHeader.fromBufferReader(br); var transactions = br.readVarintNum(); info.transactions = []; for (var i = 0; i < transactions; i++) { info.transactions.push(Transaction().fromBufferReader(br)); } return info; }; /** * @param {BufferReader} - A buffer reader of the block * @returns {Block} - An instance of block */ Block.fromBufferReader = function fromBufferReader(br) { $.checkArgument(br, 'br is required'); var info = Block._fromBufferReader(br); return new Block(info); }; /** * @param {Buffer} - A buffer of the block * @returns {Block} - An instance of block */ Block.fromBuffer = function fromBuffer(buf) { return Block.fromBufferReader(new BufferReader(buf)); }; /** * @param {string} - str - A hex encoded string of the block * @returns {Block} - A hex encoded string of the block */ Block.fromString = function fromString(str) { var buf = new Buffer(str, 'hex'); return Block.fromBuffer(buf); }; /** * @param {Binary} - Raw block binary data or buffer * @returns {Block} - An instance of block */ Block.fromRawBlock = function fromRawBlock(data) { if (!BufferUtil.isBuffer(data)) { data = new Buffer(data, 'binary'); } var br = BufferReader(data); br.pos = Block.Values.START_OF_BLOCK; var info = Block._fromBufferReader(br); return new Block(info); }; /** * @returns {Object} - A plain object with the block properties */ Block.prototype.toObject = function toObject() { var transactions = []; this.transactions.forEach(function(tx) { transactions.push(tx.toObject()); }); return { header: this.header.toObject(), transactions: transactions }; }; /** * @returns {string} - A JSON string */ Block.prototype.toJSON = function toJSON() { return JSON.stringify(this.toObject()); }; /** * @returns {Buffer} - A buffer of the block */ Block.prototype.toBuffer = function toBuffer() { return this.toBufferWriter().concat(); }; /** * @returns {string} - A hex encoded string of the block */ Block.prototype.toString = function toString() { return this.toBuffer().toString('hex'); }; /** * @param {BufferWriter} - An existing instance of BufferWriter * @returns {BufferWriter} - An instance of BufferWriter representation of the Block */ Block.prototype.toBufferWriter = function toBufferWriter(bw) { if (!bw) { bw = new BufferWriter(); } bw.write(this.header.toBuffer()); bw.writeVarintNum(this.transactions.length); for (var i = 0; i < this.transactions.length; i++) { this.transactions[i].toBufferWriter(bw); } return bw; }; /** * Will iterate through each transaction and return an array of hashes * @returns {Array} - An array with transaction hashes */ Block.prototype.getTransactionHashes = function getTransactionHashes() { var hashes = []; if (this.transactions.length === 0) { return [Block.Values.NULL_HASH]; } for (var t = 0; t < this.transactions.length; t++) { hashes.push(this.transactions[t]._getHash()); } return hashes; }; /** * Will build a merkle tree of all the transactions, ultimately arriving at * a single point, the merkle root. * @link https://en.bitcoin.it/wiki/Protocol_specification#Merkle_Trees * @returns {Array} - An array with each level of the tree after the other. */ Block.prototype.getMerkleTree = function getMerkleTree() { var tree = this.getTransactionHashes(); var j = 0; for (var size = this.transactions.length; size > 1; size = Math.floor((size + 1) / 2)) { for (var i = 0; i < size; i += 2) { var i2 = Math.min(i + 1, size - 1); var buf = Buffer.concat([tree[j + i], tree[j + i2]]); tree.push(Hash.sha256sha256(buf)); } j += size; } return tree; }; /** * Calculates the merkleRoot from the transactions. * @returns {Buffer} - A buffer of the merkle root hash */ Block.prototype.getMerkleRoot = function getMerkleRoot() { var tree = this.getMerkleTree(); return tree[tree.length - 1]; }; /** * Verifies that the transactions in the block match the header merkle root * @returns {Boolean} - If the merkle roots match */ Block.prototype.validMerkleRoot = function validMerkleRoot() { var h = new BN(this.header.merkleRoot.toString('hex'), 'hex'); var c = new BN(this.getMerkleRoot().toString('hex'), 'hex'); if (h.cmp(c) !== 0) { return false; } return true; }; /** * @returns {Buffer} - The little endian hash buffer of the header */ Block.prototype._getHash = function() { return this.header._getHash(); }; var idProperty = { configurable: false, writeable: false, /** * @returns {string} - The big endian hash buffer of the header */ get: function() { if (!this._id) { this._id = this.header.id; } return this._id; }, set: _.noop }; Object.defineProperty(Block.prototype, 'id', idProperty); Object.defineProperty(Block.prototype, 'hash', idProperty); /** * @returns {string} - A string formated for the console */ Block.prototype.inspect = function inspect() { return '<Block ' + this.id + '>'; }; Block.Values = { START_OF_BLOCK: 8, // Start of block in raw block data NULL_HASH: new Buffer('0000000000000000000000000000000000000000000000000000000000000000', 'hex') }; module.exports = Block;
1
14,268
This is used for both `id`, and `hash` we may not want these both to be enumerable.
bitpay-bitcore
js
@@ -641,9 +641,16 @@ module RSpec formatter_loader.default_formatter = value end - # @private + # @api public + # + # Returns a duplicate of the formatters currently loaded in + # the `FormatterLoader` for introspection. + # + # Note as this is a duplicate, any mutations will be disregarded. + # + # @return [Array] the formatters currently loaded def formatters - formatter_loader.formatters + formatter_loader.formatters.dup end # @private
1
require 'fileutils' RSpec::Support.require_rspec_core "backtrace_formatter" RSpec::Support.require_rspec_core "ruby_project" RSpec::Support.require_rspec_core "formatters/deprecation_formatter" module RSpec module Core # Stores runtime configuration information. # # Configuration options are loaded from `~/.rspec`, `.rspec`, # `.rspec-local`, command line switches, and the `SPEC_OPTS` environment # variable (listed in lowest to highest precedence; for example, an option # in `~/.rspec` can be overridden by an option in `.rspec-local`). # # @example Standard settings # RSpec.configure do |c| # c.drb = true # c.drb_port = 1234 # c.default_path = 'behavior' # end # # @example Hooks # RSpec.configure do |c| # c.before(:suite) { establish_connection } # c.before(:example) { log_in_as :authorized } # c.around(:example) { |ex| Database.transaction(&ex) } # end # # @see RSpec.configure # @see Hooks class Configuration include RSpec::Core::Hooks # @private class MustBeConfiguredBeforeExampleGroupsError < StandardError; end # @private def self.define_reader(name) define_method(name) do variable = instance_variable_defined?("@#{name}") ? instance_variable_get("@#{name}") : nil value_for(name, variable) end end # @private def self.define_aliases(name, alias_name) alias_method alias_name, name alias_method "#{alias_name}=", "#{name}=" define_predicate_for alias_name end # @private def self.define_predicate_for(*names) names.each {|name| alias_method "#{name}?", name} end # @private # # Invoked by the `add_setting` instance method. Use that method on a # `Configuration` instance rather than this class method. def self.add_setting(name, opts={}) raise "Use the instance add_setting method if you want to set a default" if opts.has_key?(:default) attr_writer name add_read_only_setting name Array(opts[:alias_with]).each do |alias_name| define_aliases(name, alias_name) end end # @private # # As `add_setting` but only add the reader def self.add_read_only_setting(name, opts={}) raise "Use the instance add_setting method if you want to set a default" if opts.has_key?(:default) define_reader name define_predicate_for name end # @macro [attach] add_setting # @!attribute [rw] $1 # @!method $1=(value) # # @macro [attach] define_reader # @!attribute [r] $1 # @macro add_setting # Path to use if no path is provided to the `rspec` command (default: # `"spec"`). Allows you to just type `rspec` instead of `rspec spec` to # run all the examples in the `spec` directory. add_setting :default_path # @macro add_setting # Run examples over DRb (default: `false`). RSpec doesn't supply the DRb # server, but you can use tools like spork. add_setting :drb # @macro add_setting # The drb_port (default: nil). add_setting :drb_port # @macro add_setting # Default: `$stderr`. add_setting :error_stream # Indicates if the DSL has been exposed off of modules and `main`. # Default: true def expose_dsl_globally? Core::DSL.exposed_globally? end # Use this to expose the core RSpec DSL via `Module` and the `main` # object. It will be set automatically but you can override it to # remove the DSL. # Default: true def expose_dsl_globally=(value) if value Core::DSL.expose_globally! Core::SharedExampleGroup::TopLevelDSL.expose_globally! else Core::DSL.remove_globally! Core::SharedExampleGroup::TopLevelDSL.remove_globally! end end # Determines where deprecation warnings are printed. # Defaults to `$stderr`. # @return [IO, String] IO to write to or filename to write to define_reader :deprecation_stream # Determines where deprecation warnings are printed. # @param value [IO, String] IO to write to or filename to write to def deprecation_stream=(value) if @reporter && !value.equal?(@deprecation_stream) warn "RSpec's reporter has already been initialized with " + "#{deprecation_stream.inspect} as the deprecation stream, so your change to "+ "`deprecation_stream` will be ignored. You should configure it earlier for " + "it to take effect, or use the `--deprecation-out` CLI option. " + "(Called from #{CallerFilter.first_non_rspec_line})" else @deprecation_stream = value end end # @macro add_setting # Clean up and exit after the first failure (default: `false`). add_setting :fail_fast # @macro add_setting # Prints the formatter output of your suite without running any # examples or hooks. add_setting :dry_run # @macro add_setting # The exit code to return if there are any failures (default: 1). add_setting :failure_exit_code # @macro define_reader # Indicates files configured to be required define_reader :requires # @macro define_reader # Returns dirs that have been prepended to the load path by the `-I` command line option define_reader :libs # @macro add_setting # Determines where RSpec will send its output. # Default: `$stdout`. define_reader :output_stream # Set the output stream for reporter # @attr value [IO] value for output, defaults to $stdout def output_stream=(value) if @reporter && !value.equal?(@output_stream) warn "RSpec's reporter has already been initialized with " + "#{output_stream.inspect} as the output stream, so your change to "+ "`output_stream` will be ignored. You should configure it earlier for " + "it to take effect. (Called from #{CallerFilter.first_non_rspec_line})" else @output_stream = value end end # @macro add_setting # Load files matching this pattern (default: `'**/*_spec.rb'`) add_setting :pattern # Set pattern to match files to load # @attr value [String] the filename pattern to filter spec files by def pattern= value if @spec_files_loaded RSpec.warning "Configuring `pattern` to #{value} has no effect since RSpec has already loaded the spec files." end @pattern = value end # @macro add_setting # Report the times for the slowest examples (default: `false`). # Use this to specify the number of examples to include in the profile. add_setting :profile_examples # @macro add_setting # Run all examples if none match the configured filters (default: `false`). add_setting :run_all_when_everything_filtered # @macro add_setting # Color to use to indicate success. # @param color [Symbol] defaults to `:green` but can be set to one of the # following: `[:black, :white, :red, :green, :yellow, # :blue, :magenta, :cyan]` add_setting :success_color # @macro add_setting # Color to use to print pending examples. # @param color [Symbol] defaults to `:yellow` but can be set to one of the # following: `[:black, :white, :red, :green, :yellow, # :blue, :magenta, :cyan]` add_setting :pending_color # @macro add_setting # Color to use to indicate failure. # @param color [Symbol] defaults to `:red` but can be set to one of the # following: `[:black, :white, :red, :green, :yellow, # :blue, :magenta, :cyan]` add_setting :failure_color # @macro add_setting # The default output color. # @param color [Symbol] defaults to `:white` but can be set to one of the # following:`[:black, :white, :red, :green, :yellow, # :blue, :magenta, :cyan]` add_setting :default_color # @macro add_setting # Color used when a pending example is fixed. # @param color [Symbol] defaults to `:blue` but can be set to one of the # following: `[:black, :white, :red, :green, :yellow, # :blue, :magenta, :cyan]` add_setting :fixed_color # @macro add_setting # Color used to print details. # @param color [Symbol] defaults to `:cyan` but can be set to one of the # following: `[:black, :white, :red, :green, :yellow, # :blue, :magenta, :cyan]` add_setting :detail_color # Deprecated. This config option was added in RSpec 2 to pave the way # for this being the default behavior in RSpec 3. Now this option is # a no-op. def treat_symbols_as_metadata_keys_with_true_values=(value) RSpec.deprecate("RSpec::Core::Configuration#treat_symbols_as_metadata_keys_with_true_values=", :message => "RSpec::Core::Configuration#treat_symbols_as_metadata_keys_with_true_values= " + "is deprecated, it is now set to true as default and setting it to false has no effect.") end # Record the start time of the spec suite to measure load time add_setting :start_time # @private add_setting :tty # @private add_setting :include_or_extend_modules # @private attr_writer :files_to_run # @private add_setting :expecting_with_rspec # @private attr_accessor :filter_manager # @private attr_reader :backtrace_formatter, :ordering_manager def initialize @start_time = $_rspec_core_load_started_at || ::RSpec::Core::Time.now @expectation_frameworks = [] @include_or_extend_modules = [] @mock_framework = nil @files_or_directories_to_run = [] @color = false @pattern = '**/*_spec.rb' @failure_exit_code = 1 @spec_files_loaded = false @backtrace_formatter = BacktraceFormatter.new @default_path = 'spec' @deprecation_stream = $stderr @output_stream = $stdout @reporter = nil @filter_manager = FilterManager.new @ordering_manager = Ordering::ConfigurationManager.new @preferred_options = {} @failure_color = :red @success_color = :green @pending_color = :yellow @default_color = :white @fixed_color = :blue @detail_color = :cyan @profile_examples = false @requires = [] @libs = [] end # @private # # Used to set higher priority option values from the command line. def force(hash) ordering_manager.force(hash) @preferred_options.merge!(hash) end # @private def reset @spec_files_loaded = false @reporter = nil @formatter_loader = nil end # @overload add_setting(name) # @overload add_setting(name, opts) # @option opts [Symbol] :default # # set a default value for the generated getter and predicate methods: # # add_setting(:foo, :default => "default value") # # @option opts [Symbol] :alias_with # # Use `:alias_with` to alias the setter, getter, and predicate to another # name, or names: # # add_setting(:foo, :alias_with => :bar) # add_setting(:foo, :alias_with => [:bar, :baz]) # # Adds a custom setting to the RSpec.configuration object. # # RSpec.configuration.add_setting :foo # # Used internally and by extension frameworks like rspec-rails, so they # can add config settings that are domain specific. For example: # # RSpec.configure do |c| # c.add_setting :use_transactional_fixtures, # :default => true, # :alias_with => :use_transactional_examples # end # # `add_setting` creates three methods on the configuration object, a # setter, a getter, and a predicate: # # RSpec.configuration.foo=(value) # RSpec.configuration.foo # RSpec.configuration.foo? # returns true if foo returns anything but nil or false def add_setting(name, opts={}) default = opts.delete(:default) (class << self; self; end).class_exec do add_setting(name, opts) end __send__("#{name}=", default) if default end # Returns the configured mock framework adapter module def mock_framework mock_with :rspec unless @mock_framework @mock_framework end # Delegates to mock_framework=(framework) def mock_framework=(framework) mock_with framework end # Regexps used to exclude lines from backtraces. # # Excludes lines from ruby (and jruby) source, installed gems, anything # in any "bin" directory, and any of the rspec libs (outside gem # installs) by default. # # You can modify the list via the getter, or replace it with the setter. # # To override this behaviour and display a full backtrace, use # `--backtrace`on the command line, in a `.rspec` file, or in the # `rspec_options` attribute of RSpec's rake task. def backtrace_exclusion_patterns @backtrace_formatter.exclusion_patterns end # Set regular expressions used to exclude lines in backtrace # @param patterns [Regexp] set the backtrace exlusion pattern def backtrace_exclusion_patterns=(patterns) @backtrace_formatter.exclusion_patterns = patterns end # Regexps used to include lines in backtraces. # # Defaults to [Regexp.new Dir.getwd]. # # Lines that match an exclusion _and_ an inclusion pattern # will be included. # # You can modify the list via the getter, or replace it with the setter. def backtrace_inclusion_patterns @backtrace_formatter.inclusion_patterns end # Set regular expressions used to include lines in backtrace # @attr patterns [Regexp] set backtrace_formatter inclusion_patterns def backtrace_inclusion_patterns=(patterns) @backtrace_formatter.inclusion_patterns = patterns end # @private MOCKING_ADAPTERS = { :rspec => :RSpec, :flexmock => :Flexmock, :rr => :RR, :mocha => :Mocha, :nothing => :Null } # Sets the mock framework adapter module. # # `framework` can be a Symbol or a Module. # # Given any of `:rspec`, `:mocha`, `:flexmock`, or `:rr`, configures the # named framework. # # Given `:nothing`, configures no framework. Use this if you don't use # any mocking framework to save a little bit of overhead. # # Given a Module, includes that module in every example group. The module # should adhere to RSpec's mock framework adapter API: # # setup_mocks_for_rspec # - called before each example # # verify_mocks_for_rspec # - called after each example if the example hasn't yet failed. # Framework should raise an exception when expectations fail # # teardown_mocks_for_rspec # - called after verify_mocks_for_rspec (even if there are errors) # # If the module responds to `configuration` and `mock_with` receives a block, # it will yield the configuration object to the block e.g. # # config.mock_with OtherMockFrameworkAdapter do |mod_config| # mod_config.custom_setting = true # end def mock_with(framework) framework_module = if framework.is_a?(Module) framework else const_name = MOCKING_ADAPTERS.fetch(framework) do raise ArgumentError, "Unknown mocking framework: #{framework.inspect}. " + "Pass a module or one of #{MOCKING_ADAPTERS.keys.inspect}" end RSpec::Support.require_rspec_core "mocking_adapters/#{const_name.to_s.downcase}" RSpec::Core::MockingAdapters.const_get(const_name) end new_name, old_name = [framework_module, @mock_framework].map do |mod| mod.respond_to?(:framework_name) ? mod.framework_name : :unnamed end unless new_name == old_name assert_no_example_groups_defined(:mock_framework) end if block_given? raise "#{framework_module} must respond to `configuration` so that mock_with can yield it." unless framework_module.respond_to?(:configuration) yield framework_module.configuration end @mock_framework = framework_module end # Returns the configured expectation framework adapter module(s) def expectation_frameworks expect_with :rspec if @expectation_frameworks.empty? @expectation_frameworks end # Delegates to expect_with(framework) def expectation_framework=(framework) expect_with(framework) end # Sets the expectation framework module(s) to be included in each example # group. # # `frameworks` can be `:rspec`, `:stdlib`, a custom module, or any # combination thereof: # # config.expect_with :rspec # config.expect_with :stdlib # config.expect_with :rspec, :stdlib # config.expect_with OtherExpectationFramework # # RSpec will translate `:rspec` and `:stdlib` into the appropriate # modules. # # ## Configuration # # If the module responds to `configuration`, `expect_with` will # yield the `configuration` object if given a block: # # config.expect_with OtherExpectationFramework do |custom_config| # custom_config.custom_setting = true # end def expect_with(*frameworks) modules = frameworks.map do |framework| case framework when Module framework when :rspec require 'rspec/expectations' self.expecting_with_rspec = true ::RSpec::Matchers when :stdlib # This require is kept here rather than in # stdlib_assertions_adapter so that we can stub it out sanely in # tests. require 'test/unit/assertions' require 'rspec/core/stdlib_assertions_adapter' ::RSpec::Core::StdlibAssertionsAdapter else raise ArgumentError, "#{framework.inspect} is not supported" end end if (modules - @expectation_frameworks).any? assert_no_example_groups_defined(:expect_with) end if block_given? raise "expect_with only accepts a block with a single argument. Call expect_with #{modules.length} times, once with each argument, instead." if modules.length > 1 raise "#{modules.first} must respond to `configuration` so that expect_with can yield it." unless modules.first.respond_to?(:configuration) yield modules.first.configuration end @expectation_frameworks.push(*modules) end # Check if full backtrace is enabled # @return [Boolean] is full backtrace enabled def full_backtrace? @backtrace_formatter.full_backtrace? end # Toggle full backtrace # @attr true_or_false [Boolean] toggle full backtrace display def full_backtrace=(true_or_false) @backtrace_formatter.full_backtrace = true_or_false end # Returns the configuration option for color, but should not # be used to check if color is supported. # # @see color_enabled? # @return [Boolean] def color value_for(:color, @color) end # Check if color is enabled for a particular output # @param output [IO] an output stream to use, defaults to the current # `output_stream` # @return [Boolean] def color_enabled?(output = output_stream) output_to_tty?(output) && color end # Toggle output color # @attr true_or_false [Boolean] toggle color enabled def color=(true_or_false) if true_or_false if RSpec.world.windows_os? and not ENV['ANSICON'] RSpec.warning "You must use ANSICON 1.31 or later (http://adoxa.3eeweb.com/ansicon/) to use colour on Windows" @color = false else @color = true end end end # @private def libs=(libs) libs.map do |lib| @libs.unshift lib $LOAD_PATH.unshift lib end end # Run examples matching on `description` in all files to run. # @param description [String, Regexp] the pattern to filter on def full_description=(description) filter_run :full_description => Regexp.union(*Array(description).map {|d| Regexp.new(d) }) end # @return [Array] full description filter def full_description filter.fetch :full_description, nil end # @overload add_formatter(formatter) # # Adds a formatter to the formatters collection. `formatter` can be a # string representing any of the built-in formatters (see # `built_in_formatter`), or a custom formatter class. # # ### Note # # For internal purposes, `add_formatter` also accepts the name of a class # and paths to use for output streams, but you should consider that a # private api that may change at any time without notice. def add_formatter(formatter_to_use, *paths) paths << output_stream if paths.empty? formatter_loader.add formatter_to_use, *paths end alias_method :formatter=, :add_formatter # The formatter that will be used if no formatter has been set. # Defaults to 'progress'. def default_formatter formatter_loader.default_formatter end # Sets a fallback formatter to use if none other has been set. # # @example # # RSpec.configure do |rspec| # rspec.default_formatter = 'doc' # end def default_formatter=(value) formatter_loader.default_formatter = value end # @private def formatters formatter_loader.formatters end # @private def formatter_loader @formatter_loader ||= Formatters::Loader.new(Reporter.new(self)) end # @private def reporter @reporter ||= begin formatter_loader.setup_default output_stream, deprecation_stream formatter_loader.reporter end end # @api private # # Defaults `profile_examples` to 10 examples when `@profile_examples` is `true`. # def profile_examples profile = value_for(:profile_examples, @profile_examples) if profile && !profile.is_a?(Integer) 10 else profile end end # @private def files_or_directories_to_run=(*files) files = files.flatten files << default_path if (command == 'rspec' || Runner.running_in_drb?) && default_path && files.empty? @files_or_directories_to_run = files @files_to_run = nil end # The spec files RSpec will run # @return [Array] specified files about to run def files_to_run @files_to_run ||= get_files_to_run(@files_or_directories_to_run) end # Creates a method that delegates to `example` including the submitted # `args`. Used internally to add variants of `example` like `pending`: # @param name [String] example name alias # @param args [Array<Symbol>, Hash] metadata for the generated example # # @note The specific example alias below (`pending`) is already # defined for you. # @note Use with caution. This extends the language used in your # specs, but does not add any additional documentation. We use this # in rspec to define methods like `focus` and `xit`, but we also add # docs for those methods. # # @example # RSpec.configure do |config| # config.alias_example_to :pending, :pending => true # end # # # This lets you do this: # # describe Thing do # pending "does something" do # thing = Thing.new # end # end # # # ... which is the equivalent of # # describe Thing do # it "does something", :pending => true do # thing = Thing.new # end # end def alias_example_to(name, *args) extra_options = Metadata.build_hash_from(args) RSpec::Core::ExampleGroup.define_example_method(name, extra_options) end # Creates a method that defines an example group with the provided # metadata. Can be used to define example group/metadata shortcuts. # # @example # RSpec.configure do |config| # config.alias_example_group_to :describe_model, :type => :model # end # # shared_context_for "model tests", :type => :model do # # define common model test helper methods, `let` declarations, etc # end # # # This lets you do this: # # RSpec.describe_model User do # end # # # ... which is the equivalent of # # RSpec.describe User, :type => :model do # end # # @note The defined aliased will also be added to the top level # (e.g. `main` and from within modules) if # `expose_dsl_globally` is set to true. # @see #alias_example_to # @see #expose_dsl_globally= def alias_example_group_to(new_name, *args) extra_options = Metadata.build_hash_from(args) RSpec::Core::ExampleGroup.define_example_group_method(new_name, extra_options) end # Define an alias for it_should_behave_like that allows different # language (like "it_has_behavior" or "it_behaves_like") to be # employed when including shared examples. # # @example # RSpec.configure do |config| # config.alias_it_behaves_like_to(:it_has_behavior, 'has behavior:') # end # # # allows the user to include a shared example group like: # # describe Entity do # it_has_behavior 'sortability' do # let(:sortable) { Entity.new } # end # end # # # which is reported in the output as: # # Entity # # has behavior: sortability # # ...sortability examples here # # @note Use with caution. This extends the language used in your # specs, but does not add any additional documentation. We use this # in rspec to define `it_should_behave_like` (for backward # compatibility), but we also add docs for that method. def alias_it_behaves_like_to(new_name, report_label = '') RSpec::Core::ExampleGroup.define_nested_shared_group_method(new_name, report_label) end alias_method :alias_it_should_behave_like_to, :alias_it_behaves_like_to # Adds key/value pairs to the `inclusion_filter`. If `args` # includes any symbols that are not part of the hash, each symbol # is treated as a key in the hash with the value `true`. # # ### Note # # Filters set using this method can be overridden from the command line # or config files (e.g. `.rspec`). # # @example # # given this declaration # describe "something", :foo => 'bar' do # # ... # end # # # any of the following will include that group # config.filter_run_including :foo => 'bar' # config.filter_run_including :foo => /^ba/ # config.filter_run_including :foo => lambda {|v| v == 'bar'} # config.filter_run_including :foo => lambda {|v,m| m[:foo] == 'bar'} # # # given a proc with an arity of 1, the lambda is passed the value related to the key, e.g. # config.filter_run_including :foo => lambda {|v| v == 'bar'} # # # given a proc with an arity of 2, the lambda is passed the value related to the key, # # and the metadata itself e.g. # config.filter_run_including :foo => lambda {|v,m| m[:foo] == 'bar'} # # filter_run_including :foo # same as filter_run_including :foo => true def filter_run_including(*args) filter_manager.include_with_low_priority Metadata.build_hash_from(args) end alias_method :filter_run, :filter_run_including # Clears and reassigns the `inclusion_filter`. Set to `nil` if you don't # want any inclusion filter at all. # # ### Warning # # This overrides any inclusion filters/tags set on the command line or in # configuration files. def inclusion_filter=(filter) filter_manager.include_only Metadata.build_hash_from([filter]) end alias_method :filter=, :inclusion_filter= # Returns the `inclusion_filter`. If none has been set, returns an empty # hash. def inclusion_filter filter_manager.inclusions end alias_method :filter, :inclusion_filter # Adds key/value pairs to the `exclusion_filter`. If `args` # includes any symbols that are not part of the hash, each symbol # is treated as a key in the hash with the value `true`. # # ### Note # # Filters set using this method can be overridden from the command line # or config files (e.g. `.rspec`). # # @example # # given this declaration # describe "something", :foo => 'bar' do # # ... # end # # # any of the following will exclude that group # config.filter_run_excluding :foo => 'bar' # config.filter_run_excluding :foo => /^ba/ # config.filter_run_excluding :foo => lambda {|v| v == 'bar'} # config.filter_run_excluding :foo => lambda {|v,m| m[:foo] == 'bar'} # # # given a proc with an arity of 1, the lambda is passed the value related to the key, e.g. # config.filter_run_excluding :foo => lambda {|v| v == 'bar'} # # # given a proc with an arity of 2, the lambda is passed the value related to the key, # # and the metadata itself e.g. # config.filter_run_excluding :foo => lambda {|v,m| m[:foo] == 'bar'} # # filter_run_excluding :foo # same as filter_run_excluding :foo => true def filter_run_excluding(*args) filter_manager.exclude_with_low_priority Metadata.build_hash_from(args) end # Clears and reassigns the `exclusion_filter`. Set to `nil` if you don't # want any exclusion filter at all. # # ### Warning # # This overrides any exclusion filters/tags set on the command line or in # configuration files. def exclusion_filter=(filter) filter_manager.exclude_only Metadata.build_hash_from([filter]) end # Returns the `exclusion_filter`. If none has been set, returns an empty # hash. def exclusion_filter filter_manager.exclusions end # Tells RSpec to include `mod` in example groups. Methods defined in # `mod` are exposed to examples (not example groups). Use `filters` to # constrain the groups in which to include the module. # # @example # # module AuthenticationHelpers # def login_as(user) # # ... # end # end # # module UserHelpers # def users(username) # # ... # end # end # # RSpec.configure do |config| # config.include(UserHelpers) # included in all modules # config.include(AuthenticationHelpers, :type => :request) # end # # describe "edit profile", :type => :request do # it "can be viewed by owning user" do # login_as users(:jdoe) # get "/profiles/jdoe" # assert_select ".username", :text => 'jdoe' # end # end # # @see #extend def include(mod, *filters) include_or_extend_modules << [:include, mod, Metadata.build_hash_from(filters)] end # Tells RSpec to extend example groups with `mod`. Methods defined in # `mod` are exposed to example groups (not examples). Use `filters` to # constrain the groups to extend. # # Similar to `include`, but behavior is added to example groups, which # are classes, rather than the examples, which are instances of those # classes. # # @example # # module UiHelpers # def run_in_browser # # ... # end # end # # RSpec.configure do |config| # config.extend(UiHelpers, :type => :request) # end # # describe "edit profile", :type => :request do # run_in_browser # # it "does stuff in the client" do # # ... # end # end # # @see #include def extend(mod, *filters) include_or_extend_modules << [:extend, mod, Metadata.build_hash_from(filters)] end # @private # # Used internally to extend a group with modules using `include` and/or # `extend`. def configure_group(group) include_or_extend_modules.each do |include_or_extend, mod, filters| next unless filters.empty? || group.any_apply?(filters) __send__("safe_#{include_or_extend}", mod, group) end end # @private def safe_include(mod, host) host.__send__(:include, mod) unless host < mod end # @private def requires=(paths) directories = ['lib', default_path].select { |p| File.directory? p } RSpec::Core::RubyProject.add_to_load_path(*directories) paths.each {|path| require path} @requires += paths end # @private if RUBY_VERSION.to_f >= 1.9 # @private def safe_extend(mod, host) host.extend(mod) unless host.singleton_class < mod end else # @private def safe_extend(mod, host) host.extend(mod) unless (class << host; self; end).included_modules.include?(mod) end end # @private def configure_mock_framework RSpec::Core::ExampleGroup.__send__(:include, mock_framework) conditionally_disable_mocks_monkey_patching end # @private def configure_expectation_framework expectation_frameworks.each do |framework| RSpec::Core::ExampleGroup.__send__(:include, framework) end conditionally_disable_expectations_monkey_patching end # @private def load_spec_files files_to_run.uniq.each {|f| load File.expand_path(f) } @spec_files_loaded = true end # @private DEFAULT_FORMATTER = lambda { |string| string } # Formats the docstring output using the block provided. # # @example # # This will strip the descriptions of both examples and example groups. # RSpec.configure do |config| # config.format_docstrings { |s| s.strip } # end def format_docstrings(&block) @format_docstrings_block = block_given? ? block : DEFAULT_FORMATTER end # @private def format_docstrings_block @format_docstrings_block ||= DEFAULT_FORMATTER end # @private def self.delegate_to_ordering_manager(*methods) methods.each do |method| define_method method do |*args, &block| ordering_manager.__send__(method, *args, &block) end end end # @macro delegate_to_ordering_manager # # Sets the seed value and sets the default global ordering to random. delegate_to_ordering_manager :seed= # @macro delegate_to_ordering_manager # Seed for random ordering (default: generated randomly each run). # # When you run specs with `--order random`, RSpec generates a random seed # for the randomization and prints it to the `output_stream` (assuming # you're using RSpec's built-in formatters). If you discover an ordering # dependency (i.e. examples fail intermittently depending on order), set # this (on Configuration or on the command line with `--seed`) to run # using the same seed while you debug the issue. # # We recommend, actually, that you use the command line approach so you # don't accidentally leave the seed encoded. delegate_to_ordering_manager :seed # @macro delegate_to_ordering_manager # # Sets the default global order and, if order is `'rand:<seed>'`, also sets the seed. delegate_to_ordering_manager :order= # @macro delegate_to_ordering_manager # Registers a named ordering strategy that can later be # used to order an example group's subgroups by adding # `:order => <name>` metadata to the example group. # # @param name [Symbol] The name of the ordering. # @yield Block that will order the given examples or example groups # @yieldparam list [Array<RSpec::Core::Example>, Array<RSpec::Core::ExampleGroup>] The examples or groups to order # @yieldreturn [Array<RSpec::Core::Example>, Array<RSpec::Core::ExampleGroup>] The re-ordered examples or groups # # @example # RSpec.configure do |rspec| # rspec.register_ordering :reverse do |list| # list.reverse # end # end # # describe MyClass, :order => :reverse do # # ... # end # # @note Pass the symbol `:global` to set the ordering strategy that # will be used to order the top-level example groups and any example # groups that do not have declared `:order` metadata. delegate_to_ordering_manager :register_ordering # @private delegate_to_ordering_manager :seed_used?, :ordering_registry # Set Ruby warnings on or off def warnings=(value) $VERBOSE = !!value end # @return [Boolean] Whether or not ruby warnings are enabled. def warnings? $VERBOSE end # Exposes the current running example via the named # helper method. RSpec 2.x exposed this via `example`, # but in RSpec 3.0, the example is instead exposed via # an arg yielded to `it`, `before`, `let`, etc. However, # some extension gems (such as Capybara) depend on the # RSpec 2.x's `example` method, so this config option # can be used to maintain compatibility. # # @param method_name [Symbol] the name of the helper method # # @example # # RSpec.configure do |rspec| # rspec.expose_current_running_example_as :example # end # # describe MyClass do # before do # # `example` can be used here because of the above config. # do_something if example.metadata[:type] == "foo" # end # end def expose_current_running_example_as(method_name) ExposeCurrentExample.module_exec do extend RSpec::SharedContext let(method_name) { |ex| ex } end include ExposeCurrentExample end # @private module ExposeCurrentExample; end # Turns deprecation warnings into errors, in order to surface # the full backtrace of the call site. This can be useful when # you need more context to address a deprecation than the # single-line call site normally provided. # # @example # # RSpec.configure do |rspec| # rspec.raise_errors_for_deprecations! # end def raise_errors_for_deprecations! self.deprecation_stream = Formatters::DeprecationFormatter::RaiseErrorStream.new end # Enables zero monkey patching mode for RSpec. It removes monkey # patching of the top-level DSL methods (`describe`, # `shared_examples_for`, etc) onto `main` and `Module`, instead # requiring you to prefix these methods with `RSpec.`. It enables # expect-only syntax for rspec-mocks and rspec-expectations. It # simply disables monkey patching on whatever pieces of rspec # the user is using. # # @note It configures rspec-mocks and rspec-expectations only # if the user is using those (either explicitly or implicitly # by not setting `mock_with` or `expect_with` to anything else). # # @note If the user uses this options with `mock_with :mocha` # (or similiar) they will still have monkey patching active # in their test environment from mocha. # # @example # # # It disables all monkey patching # RSpec.configure do |config| # config.disable_monkey_patching! # end # # # Is an equivalent to # RSpec.configure do |config| # config.expose_dsl_globally = false # # config.mock_with :rspec do |mocks| # mocks.syntax = :expect # mocks.patch_marshal_to_support_partial_doubles = false # end # # config.mock_with :rspec do |expectations| # expectations.syntax = :expect # end # end def disable_monkey_patching! self.expose_dsl_globally = false self.disable_monkey_patching = true conditionally_disable_mocks_monkey_patching conditionally_disable_expectations_monkey_patching end # @private attr_accessor :disable_monkey_patching private def get_files_to_run(paths) FlatMap.flat_map(paths) do |path| path = path.gsub(File::ALT_SEPARATOR, File::SEPARATOR) if File::ALT_SEPARATOR File.directory?(path) ? gather_directories(path) : extract_location(path) end.sort end def gather_directories(path) stripped = "{#{pattern.gsub(/\s*,\s*/, ',')}}" files = pattern =~ /^#{Regexp.escape path}/ ? Dir[stripped] : Dir["#{path}/#{stripped}"] files.sort end def extract_location(path) if path =~ /^(.*?)((?:\:\d+)+)$/ path, lines = $1, $2[1..-1].split(":").map{|n| n.to_i} filter_manager.add_location path, lines end path end def command $0.split(File::SEPARATOR).last end def value_for(key, default=nil) @preferred_options.has_key?(key) ? @preferred_options[key] : default end def assert_no_example_groups_defined(config_option) if RSpec.world.example_groups.any? raise MustBeConfiguredBeforeExampleGroupsError.new( "RSpec's #{config_option} configuration option must be configured before " + "any example groups are defined, but you have already defined a group." ) end end def output_to_tty?(output=output_stream) tty? || (output.respond_to?(:tty?) && output.tty?) end def conditionally_disable_mocks_monkey_patching return unless disable_monkey_patching && rspec_mocks_loaded? RSpec::Mocks.configuration.tap do |config| config.syntax = :expect config.patch_marshal_to_support_partial_doubles = false end end def conditionally_disable_expectations_monkey_patching return unless disable_monkey_patching && rspec_expectations_loaded? RSpec::Expectations.configuration.syntax = :expect end def rspec_mocks_loaded? defined?(RSpec::Mocks.configuration) end def rspec_expectations_loaded? defined?(RSpec::Expectations.configuration) end end end end
1
12,696
Is this necessary? I would expect YARD to treat it as public anyway... (Don't hold off merging on this...I'm mostly just curious).
rspec-rspec-core
rb
@@ -578,6 +578,7 @@ func (e *MutableStateImpl) GetActivityScheduledEvent( e.executionState.RunId, ai.ScheduledEventBatchId, ai.ScheduleId, + ai.Version, currentBranchToken, ) if err != nil {
1
// The MIT License // // Copyright (c) 2020 Temporal Technologies Inc. All rights reserved. // // Copyright (c) 2020 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package workflow import ( "fmt" "math/rand" "time" "github.com/gogo/protobuf/proto" "github.com/pborman/uuid" commandpb "go.temporal.io/api/command/v1" commonpb "go.temporal.io/api/common/v1" enumspb "go.temporal.io/api/enums/v1" failurepb "go.temporal.io/api/failure/v1" historypb "go.temporal.io/api/history/v1" "go.temporal.io/api/serviceerror" taskqueuepb "go.temporal.io/api/taskqueue/v1" workflowpb "go.temporal.io/api/workflow/v1" "go.temporal.io/api/workflowservice/v1" enumsspb "go.temporal.io/server/api/enums/v1" historyspb "go.temporal.io/server/api/history/v1" "go.temporal.io/server/api/historyservice/v1" persistencespb "go.temporal.io/server/api/persistence/v1" workflowspb "go.temporal.io/server/api/workflow/v1" "go.temporal.io/server/common" "go.temporal.io/server/common/backoff" "go.temporal.io/server/common/cache" "go.temporal.io/server/common/clock" "go.temporal.io/server/common/cluster" "go.temporal.io/server/common/convert" "go.temporal.io/server/common/definition" "go.temporal.io/server/common/enums" "go.temporal.io/server/common/log" "go.temporal.io/server/common/log/tag" "go.temporal.io/server/common/metrics" "go.temporal.io/server/common/migration" "go.temporal.io/server/common/persistence" "go.temporal.io/server/common/persistence/versionhistory" "go.temporal.io/server/common/primitives/timestamp" "go.temporal.io/server/common/searchattribute" "go.temporal.io/server/service/history/configs" "go.temporal.io/server/service/history/consts" "go.temporal.io/server/service/history/events" "go.temporal.io/server/service/history/shard" ) const ( emptyUUID = "emptyUuid" mutableStateInvalidHistoryActionMsg = "invalid history builder state for action" mutableStateInvalidHistoryActionMsgTemplate = mutableStateInvalidHistoryActionMsg + ": %v" ) var ( // ErrWorkflowFinished indicates trying to mutate mutable state after workflow finished ErrWorkflowFinished = serviceerror.NewInternal("invalid mutable state action: mutation after finish") // ErrMissingTimerInfo indicates missing timer info ErrMissingTimerInfo = serviceerror.NewInternal("unable to get timer info") // ErrMissingActivityInfo indicates missing activity info ErrMissingActivityInfo = serviceerror.NewInternal("unable to get activity info") // ErrMissingChildWorkflowInfo indicates missing child workflow info ErrMissingChildWorkflowInfo = serviceerror.NewInternal("unable to get child workflow info") // ErrMissingRequestCancelInfo indicates missing request cancel info ErrMissingRequestCancelInfo = serviceerror.NewInternal("unable to get request cancel info") // ErrMissingSignalInfo indicates missing signal external ErrMissingSignalInfo = serviceerror.NewInternal("unable to get signal info") // ErrMissingWorkflowStartEvent indicates missing workflow start event ErrMissingWorkflowStartEvent = serviceerror.NewInternal("unable to get workflow start event") // ErrMissingWorkflowCompletionEvent indicates missing workflow completion event ErrMissingWorkflowCompletionEvent = serviceerror.NewInternal("unable to get workflow completion event") // ErrMissingActivityScheduledEvent indicates missing workflow activity scheduled event ErrMissingActivityScheduledEvent = serviceerror.NewInternal("unable to get activity scheduled event") // ErrMissingChildWorkflowInitiatedEvent indicates missing child workflow initiated event ErrMissingChildWorkflowInitiatedEvent = serviceerror.NewInternal("unable to get child workflow initiated event") ) type ( MutableStateImpl struct { pendingActivityTimerHeartbeats map[int64]time.Time // Schedule Event ID -> LastHeartbeatTimeoutVisibilityInSeconds. pendingActivityInfoIDs map[int64]*persistencespb.ActivityInfo // Schedule Event ID -> Activity Info. pendingActivityIDToEventID map[string]int64 // Activity ID -> Schedule Event ID of the activity. updateActivityInfos map[int64]*persistencespb.ActivityInfo // Modified activities from last update. deleteActivityInfos map[int64]struct{} // Deleted activities from last update. syncActivityTasks map[int64]struct{} // Activity to be sync to remote pendingTimerInfoIDs map[string]*persistencespb.TimerInfo // User Timer ID -> Timer Info. pendingTimerEventIDToID map[int64]string // User Timer Start Event ID -> User Timer ID. updateTimerInfos map[string]*persistencespb.TimerInfo // Modified timers from last update. deleteTimerInfos map[string]struct{} // Deleted timers from last update. pendingChildExecutionInfoIDs map[int64]*persistencespb.ChildExecutionInfo // Initiated Event ID -> Child Execution Info updateChildExecutionInfos map[int64]*persistencespb.ChildExecutionInfo // Modified ChildExecution Infos since last update deleteChildExecutionInfos map[int64]struct{} // Deleted ChildExecution Info since last update pendingRequestCancelInfoIDs map[int64]*persistencespb.RequestCancelInfo // Initiated Event ID -> RequestCancelInfo updateRequestCancelInfos map[int64]*persistencespb.RequestCancelInfo // Modified RequestCancel Infos since last update, for persistence update deleteRequestCancelInfos map[int64]struct{} // Deleted RequestCancel Info since last update, for persistence update pendingSignalInfoIDs map[int64]*persistencespb.SignalInfo // Initiated Event ID -> SignalInfo updateSignalInfos map[int64]*persistencespb.SignalInfo // Modified SignalInfo since last update deleteSignalInfos map[int64]struct{} // Deleted SignalInfo since last update pendingSignalRequestedIDs map[string]struct{} // Set of signaled requestIds updateSignalRequestedIDs map[string]struct{} // Set of signaled requestIds since last update deleteSignalRequestedIDs map[string]struct{} // Deleted signaled requestId executionInfo *persistencespb.WorkflowExecutionInfo // Workflow mutable state info. executionState *persistencespb.WorkflowExecutionState hBuilder *HistoryBuilder // in memory only attributes // indicate the current version currentVersion int64 // buffer events from DB bufferEventsInDB []*historypb.HistoryEvent // indicates the workflow state in DB, can be used to calculate // whether this workflow is pointed by current workflow record stateInDB enumsspb.WorkflowExecutionState // TODO deprecate nextEventIDInDB in favor of dbRecordVersion // indicates the next event ID in DB, for conditional update nextEventIDInDB int64 // indicates the DB record version, for conditional update dbRecordVersion int64 // namespace entry contains a snapshot of namespace // NOTE: do not use the failover version inside, use currentVersion above namespaceEntry *cache.NamespaceCacheEntry // record if a event has been applied to mutable state // TODO: persist this to db appliedEvents map[string]struct{} InsertTransferTasks []persistence.Task InsertTimerTasks []persistence.Task InsertReplicationTasks []persistence.Task InsertVisibilityTasks []persistence.Task // do not rely on this, this is only updated on // Load() and closeTransactionXXX methods. So when // a transaction is in progress, this value will be // wrong. This exist primarily for visibility via CLI checksum *persistencespb.Checksum taskGenerator TaskGenerator workflowTaskManager *workflowTaskStateMachine QueryRegistry QueryRegistry shard shard.Context clusterMetadata cluster.Metadata eventsCache events.Cache config *configs.Config timeSource clock.TimeSource logger log.Logger metricsClient metrics.Client } ) var _ MutableState = (*MutableStateImpl)(nil) func NewMutableState( shard shard.Context, eventsCache events.Cache, logger log.Logger, namespaceEntry *cache.NamespaceCacheEntry, startTime time.Time, ) *MutableStateImpl { s := &MutableStateImpl{ updateActivityInfos: make(map[int64]*persistencespb.ActivityInfo), pendingActivityTimerHeartbeats: make(map[int64]time.Time), pendingActivityInfoIDs: make(map[int64]*persistencespb.ActivityInfo), pendingActivityIDToEventID: make(map[string]int64), deleteActivityInfos: make(map[int64]struct{}), syncActivityTasks: make(map[int64]struct{}), pendingTimerInfoIDs: make(map[string]*persistencespb.TimerInfo), pendingTimerEventIDToID: make(map[int64]string), updateTimerInfos: make(map[string]*persistencespb.TimerInfo), deleteTimerInfos: make(map[string]struct{}), updateChildExecutionInfos: make(map[int64]*persistencespb.ChildExecutionInfo), pendingChildExecutionInfoIDs: make(map[int64]*persistencespb.ChildExecutionInfo), deleteChildExecutionInfos: make(map[int64]struct{}), updateRequestCancelInfos: make(map[int64]*persistencespb.RequestCancelInfo), pendingRequestCancelInfoIDs: make(map[int64]*persistencespb.RequestCancelInfo), deleteRequestCancelInfos: make(map[int64]struct{}), updateSignalInfos: make(map[int64]*persistencespb.SignalInfo), pendingSignalInfoIDs: make(map[int64]*persistencespb.SignalInfo), deleteSignalInfos: make(map[int64]struct{}), updateSignalRequestedIDs: make(map[string]struct{}), pendingSignalRequestedIDs: make(map[string]struct{}), deleteSignalRequestedIDs: make(map[string]struct{}), currentVersion: namespaceEntry.GetFailoverVersion(), bufferEventsInDB: nil, stateInDB: enumsspb.WORKFLOW_EXECUTION_STATE_VOID, nextEventIDInDB: common.FirstEventID, dbRecordVersion: 0, namespaceEntry: namespaceEntry, appliedEvents: make(map[string]struct{}), QueryRegistry: NewQueryRegistry(), shard: shard, clusterMetadata: shard.GetClusterMetadata(), eventsCache: eventsCache, config: shard.GetConfig(), timeSource: shard.GetTimeSource(), logger: logger, metricsClient: shard.GetMetricsClient(), } // making new workflow to use db record version for CAS instead of next event ID if migration.IsDBVersionEnabled() { s.dbRecordVersion = 1 } s.executionInfo = &persistencespb.WorkflowExecutionInfo{ WorkflowTaskVersion: common.EmptyVersion, WorkflowTaskScheduleId: common.EmptyEventID, WorkflowTaskStartedId: common.EmptyEventID, WorkflowTaskRequestId: emptyUUID, WorkflowTaskTimeout: timestamp.DurationFromSeconds(0), WorkflowTaskAttempt: 1, LastWorkflowTaskStartId: common.EmptyEventID, StartTime: timestamp.TimePtr(startTime), VersionHistories: versionhistory.NewVersionHistories(&historyspb.VersionHistory{}), } s.executionState = &persistencespb.WorkflowExecutionState{State: enumsspb.WORKFLOW_EXECUTION_STATE_CREATED, Status: enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING} s.hBuilder = NewMutableHistoryBuilder( s.timeSource, s.shard.GenerateTransferTaskIDs, s.currentVersion, common.FirstEventID, s.bufferEventsInDB, ) s.taskGenerator = NewTaskGenerator(shard.GetNamespaceCache(), s.logger, s) s.workflowTaskManager = newWorkflowTaskStateMachine(s) return s } func newMutableStateBuilderFromDB( shard shard.Context, eventsCache events.Cache, logger log.Logger, namespaceEntry *cache.NamespaceCacheEntry, dbRecord *persistencespb.WorkflowMutableState, dbRecordVersion int64, ) (*MutableStateImpl, error) { // startTime will be overridden by DB record startTime := time.Time{} mutableState := NewMutableState(shard, eventsCache, logger, namespaceEntry, startTime) if dbRecord.ActivityInfos != nil { mutableState.pendingActivityInfoIDs = dbRecord.ActivityInfos } for _, activityInfo := range dbRecord.ActivityInfos { mutableState.pendingActivityIDToEventID[activityInfo.ActivityId] = activityInfo.ScheduleId if (activityInfo.TimerTaskStatus & TimerTaskStatusCreatedHeartbeat) > 0 { // Sets last pending timer heartbeat to year 2000. // This ensures at least one heartbeat task will be processed for the pending activity. mutableState.pendingActivityTimerHeartbeats[activityInfo.ScheduleId] = time.Unix(946684800, 0) } } if dbRecord.TimerInfos != nil { mutableState.pendingTimerInfoIDs = dbRecord.TimerInfos } for _, timerInfo := range dbRecord.TimerInfos { mutableState.pendingTimerEventIDToID[timerInfo.GetStartedId()] = timerInfo.GetTimerId() } if dbRecord.ChildExecutionInfos != nil { mutableState.pendingChildExecutionInfoIDs = dbRecord.ChildExecutionInfos } if dbRecord.RequestCancelInfos != nil { mutableState.pendingRequestCancelInfoIDs = dbRecord.RequestCancelInfos } if dbRecord.SignalInfos != nil { mutableState.pendingSignalInfoIDs = dbRecord.SignalInfos } mutableState.pendingSignalRequestedIDs = convert.StringSliceToSet(dbRecord.SignalRequestedIds) mutableState.executionState = dbRecord.ExecutionState mutableState.executionInfo = dbRecord.ExecutionInfo // Workflows created before 1.11 doesn't have ExecutionTime and it must be computed for backwards compatibility. // Remove this "if" block when it is ok to rely on executionInfo.ExecutionTime only (added 6/9/21). if mutableState.executionInfo.ExecutionTime == nil { startEvent, err := mutableState.GetStartEvent() if err != nil { return nil, err } backoffDuration := timestamp.DurationValue(startEvent.GetWorkflowExecutionStartedEventAttributes().GetFirstWorkflowTaskBackoff()) mutableState.executionInfo.ExecutionTime = timestamp.TimePtr(timestamp.TimeValue(mutableState.executionInfo.GetStartTime()).Add(backoffDuration)) } mutableState.hBuilder = NewMutableHistoryBuilder( mutableState.timeSource, mutableState.shard.GenerateTransferTaskIDs, common.EmptyVersion, dbRecord.NextEventId, dbRecord.BufferedEvents, ) mutableState.currentVersion = common.EmptyVersion mutableState.bufferEventsInDB = dbRecord.BufferedEvents mutableState.stateInDB = dbRecord.ExecutionState.State mutableState.nextEventIDInDB = dbRecord.NextEventId mutableState.dbRecordVersion = dbRecordVersion mutableState.checksum = dbRecord.Checksum if len(dbRecord.Checksum.GetValue()) > 0 { switch { case mutableState.shouldInvalidateCheckum(): mutableState.checksum = nil mutableState.metricsClient.IncCounter(metrics.WorkflowContextScope, metrics.MutableStateChecksumInvalidated) case mutableState.shouldVerifyChecksum(): if err := verifyMutableStateChecksum(mutableState, dbRecord.Checksum); err != nil { // we ignore checksum verification errors for now until this // feature is tested and/or we have mechanisms in place to deal // with these types of errors mutableState.metricsClient.IncCounter(metrics.WorkflowContextScope, metrics.MutableStateChecksumMismatch) mutableState.logError("mutable state checksum mismatch", tag.Error(err)) } } } return mutableState, nil } func (e *MutableStateImpl) CloneToProto() *persistencespb.WorkflowMutableState { ms := &persistencespb.WorkflowMutableState{ ActivityInfos: e.pendingActivityInfoIDs, TimerInfos: e.pendingTimerInfoIDs, ChildExecutionInfos: e.pendingChildExecutionInfoIDs, RequestCancelInfos: e.pendingRequestCancelInfoIDs, SignalInfos: e.pendingSignalInfoIDs, SignalRequestedIds: convert.StringSetToSlice(e.pendingSignalRequestedIDs), ExecutionInfo: e.executionInfo, ExecutionState: e.executionState, NextEventId: e.hBuilder.NextEventID(), BufferedEvents: e.bufferEventsInDB, Checksum: e.checksum, } return proto.Clone(ms).(*persistencespb.WorkflowMutableState) } func (e *MutableStateImpl) GetCurrentBranchToken() ([]byte, error) { currentVersionHistory, err := versionhistory.GetCurrentVersionHistory(e.executionInfo.VersionHistories) if err != nil { return nil, err } return currentVersionHistory.GetBranchToken(), nil } // SetHistoryTree set treeID/historyBranches func (e *MutableStateImpl) SetHistoryTree( treeID string, ) error { initialBranchToken, err := persistence.NewHistoryBranchToken(treeID) if err != nil { return err } return e.SetCurrentBranchToken(initialBranchToken) } func (e *MutableStateImpl) SetCurrentBranchToken( branchToken []byte, ) error { currentVersionHistory, err := versionhistory.GetCurrentVersionHistory(e.executionInfo.VersionHistories) if err != nil { return err } versionhistory.SetVersionHistoryBranchToken(currentVersionHistory, branchToken) return nil } func (e *MutableStateImpl) SetHistoryBuilder(hBuilder *HistoryBuilder) { e.hBuilder = hBuilder } func (e *MutableStateImpl) GetExecutionInfo() *persistencespb.WorkflowExecutionInfo { return e.executionInfo } func (e *MutableStateImpl) GetExecutionState() *persistencespb.WorkflowExecutionState { return e.executionState } func (e *MutableStateImpl) FlushBufferedEvents() { if e.HasInFlightWorkflowTask() { return } e.updatePendingEventIDs(e.hBuilder.FlushBufferToCurrentBatch()) } func (e *MutableStateImpl) UpdateCurrentVersion( version int64, forceUpdate bool, ) error { if state, _ := e.GetWorkflowStateStatus(); state == enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED { // do not update current version only when workflow is completed return nil } versionHistory, err := versionhistory.GetCurrentVersionHistory(e.executionInfo.VersionHistories) if err != nil { return err } if !versionhistory.IsEmptyVersionHistory(versionHistory) { // this make sure current version >= last write version versionHistoryItem, err := versionhistory.GetLastVersionHistoryItem(versionHistory) if err != nil { return err } e.currentVersion = versionHistoryItem.GetVersion() } if version > e.currentVersion || forceUpdate { e.currentVersion = version } e.hBuilder = NewMutableHistoryBuilder( e.timeSource, e.shard.GenerateTransferTaskIDs, e.currentVersion, e.nextEventIDInDB, e.bufferEventsInDB, ) return nil } func (e *MutableStateImpl) GetCurrentVersion() int64 { if e.executionInfo.VersionHistories != nil { return e.currentVersion } return common.EmptyVersion } func (e *MutableStateImpl) GetStartVersion() (int64, error) { if e.executionInfo.VersionHistories != nil { versionHistory, err := versionhistory.GetCurrentVersionHistory(e.executionInfo.VersionHistories) if err != nil { return 0, err } firstItem, err := versionhistory.GetFirstVersionHistoryItem(versionHistory) if err != nil { return 0, err } return firstItem.GetVersion(), nil } return common.EmptyVersion, nil } func (e *MutableStateImpl) GetLastWriteVersion() (int64, error) { if e.executionInfo.VersionHistories != nil { versionHistory, err := versionhistory.GetCurrentVersionHistory(e.executionInfo.VersionHistories) if err != nil { return 0, err } lastItem, err := versionhistory.GetLastVersionHistoryItem(versionHistory) if err != nil { return 0, err } return lastItem.GetVersion(), nil } return common.EmptyVersion, nil } func (e *MutableStateImpl) IsCurrentWorkflowGuaranteed() bool { // stateInDB is used like a bloom filter: // // 1. stateInDB being created / running meaning that this workflow must be the current // workflow (assuming there is no rebuild of mutable state). // 2. stateInDB being completed does not guarantee this workflow being the current workflow // 3. stateInDB being zombie guarantees this workflow not being the current workflow // 4. stateInDB cannot be void, void is only possible when mutable state is just initialized switch e.stateInDB { case enumsspb.WORKFLOW_EXECUTION_STATE_VOID: return false case enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: return true case enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: return true case enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: return false case enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: return false case enumsspb.WORKFLOW_EXECUTION_STATE_CORRUPTED: return false default: panic(fmt.Sprintf("unknown workflow state: %v", e.executionState.State)) } } func (e *MutableStateImpl) GetNamespaceEntry() *cache.NamespaceCacheEntry { return e.namespaceEntry } func (e *MutableStateImpl) IsStickyTaskQueueEnabled() bool { return e.executionInfo.StickyTaskQueue != "" } func (e *MutableStateImpl) GetWorkflowType() *commonpb.WorkflowType { wType := &commonpb.WorkflowType{} wType.Name = e.executionInfo.WorkflowTypeName return wType } func (e *MutableStateImpl) GetQueryRegistry() QueryRegistry { return e.QueryRegistry } func (e *MutableStateImpl) GetActivityScheduledEvent( scheduleEventID int64, ) (*historypb.HistoryEvent, error) { ai, ok := e.pendingActivityInfoIDs[scheduleEventID] if !ok { return nil, ErrMissingActivityInfo } currentBranchToken, err := e.GetCurrentBranchToken() if err != nil { return nil, err } scheduledEvent, err := e.eventsCache.GetEvent( e.executionInfo.NamespaceId, e.executionInfo.WorkflowId, e.executionState.RunId, ai.ScheduledEventBatchId, ai.ScheduleId, currentBranchToken, ) if err != nil { // do not return the original error // since original error can be of type entity not exists // which can cause task processing side to fail silently return nil, ErrMissingActivityScheduledEvent } return scheduledEvent, nil } // GetActivityInfo gives details about an activity that is currently in progress. func (e *MutableStateImpl) GetActivityInfo( scheduleEventID int64, ) (*persistencespb.ActivityInfo, bool) { ai, ok := e.pendingActivityInfoIDs[scheduleEventID] return ai, ok } // GetActivityInfoWithTimerHeartbeat gives details about an activity that is currently in progress. func (e *MutableStateImpl) GetActivityInfoWithTimerHeartbeat( scheduleEventID int64, ) (*persistencespb.ActivityInfo, time.Time, bool) { ai, ok := e.pendingActivityInfoIDs[scheduleEventID] timerVis, ok := e.pendingActivityTimerHeartbeats[scheduleEventID] return ai, timerVis, ok } // GetActivityByActivityID gives details about an activity that is currently in progress. func (e *MutableStateImpl) GetActivityByActivityID( activityID string, ) (*persistencespb.ActivityInfo, bool) { eventID, ok := e.pendingActivityIDToEventID[activityID] if !ok { return nil, false } return e.GetActivityInfo(eventID) } // GetChildExecutionInfo gives details about a child execution that is currently in progress. func (e *MutableStateImpl) GetChildExecutionInfo( initiatedEventID int64, ) (*persistencespb.ChildExecutionInfo, bool) { ci, ok := e.pendingChildExecutionInfoIDs[initiatedEventID] return ci, ok } // GetChildExecutionInitiatedEvent reads out the ChildExecutionInitiatedEvent from mutable state for in-progress child // executions func (e *MutableStateImpl) GetChildExecutionInitiatedEvent( initiatedEventID int64, ) (*historypb.HistoryEvent, error) { ci, ok := e.pendingChildExecutionInfoIDs[initiatedEventID] if !ok { return nil, ErrMissingChildWorkflowInfo } currentBranchToken, err := e.GetCurrentBranchToken() if err != nil { return nil, err } initiatedEvent, err := e.eventsCache.GetEvent( e.executionInfo.NamespaceId, e.executionInfo.WorkflowId, e.executionState.RunId, ci.InitiatedEventBatchId, ci.InitiatedId, currentBranchToken, ) if err != nil { // do not return the original error // since original error can be of type entity not exists // which can cause task processing side to fail silently return nil, ErrMissingChildWorkflowInitiatedEvent } return initiatedEvent, nil } // GetRequestCancelInfo gives details about a request cancellation that is currently in progress. func (e *MutableStateImpl) GetRequestCancelInfo( initiatedEventID int64, ) (*persistencespb.RequestCancelInfo, bool) { ri, ok := e.pendingRequestCancelInfoIDs[initiatedEventID] return ri, ok } func (e *MutableStateImpl) GetRetryBackoffDuration( failure *failurepb.Failure, ) (time.Duration, enumspb.RetryState) { info := e.executionInfo if !info.HasRetryPolicy { return backoff.NoBackoff, enumspb.RETRY_STATE_RETRY_POLICY_NOT_SET } return getBackoffInterval( e.timeSource.Now(), info.Attempt, info.RetryMaximumAttempts, info.RetryInitialInterval, info.RetryMaximumInterval, info.WorkflowExecutionExpirationTime, info.RetryBackoffCoefficient, failure, info.RetryNonRetryableErrorTypes, ) } func (e *MutableStateImpl) GetCronBackoffDuration() (time.Duration, error) { if e.executionInfo.CronSchedule == "" { return backoff.NoBackoff, nil } executionTime := timestamp.TimeValue(e.GetExecutionInfo().GetExecutionTime()) return backoff.GetBackoffForNextSchedule(e.executionInfo.CronSchedule, executionTime, e.timeSource.Now()), nil } // GetSignalInfo get details about a signal request that is currently in progress. func (e *MutableStateImpl) GetSignalInfo( initiatedEventID int64, ) (*persistencespb.SignalInfo, bool) { ri, ok := e.pendingSignalInfoIDs[initiatedEventID] return ri, ok } // GetCompletionEvent retrieves the workflow completion event from mutable state func (e *MutableStateImpl) GetCompletionEvent() (*historypb.HistoryEvent, error) { if e.executionState.State != enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED { return nil, ErrMissingWorkflowCompletionEvent } currentBranchToken, err := e.GetCurrentBranchToken() if err != nil { return nil, err } // Completion EventID is always one less than NextEventID after workflow is completed completionEventID := e.hBuilder.NextEventID() - 1 firstEventID := e.executionInfo.CompletionEventBatchId completionEvent, err := e.eventsCache.GetEvent( e.executionInfo.NamespaceId, e.executionInfo.WorkflowId, e.executionState.RunId, firstEventID, completionEventID, currentBranchToken, ) if err != nil { // do not return the original error // since original error can be of type entity not exists // which can cause task processing side to fail silently return nil, ErrMissingWorkflowCompletionEvent } return completionEvent, nil } // GetStartEvent retrieves the workflow start event from mutable state func (e *MutableStateImpl) GetStartEvent() (*historypb.HistoryEvent, error) { currentBranchToken, err := e.GetCurrentBranchToken() if err != nil { return nil, err } startEvent, err := e.eventsCache.GetEvent( e.executionInfo.NamespaceId, e.executionInfo.WorkflowId, e.executionState.RunId, common.FirstEventID, common.FirstEventID, currentBranchToken, ) if err != nil { // do not return the original error // since original error can be of type entity not exists // which can cause task processing side to fail silently return nil, ErrMissingWorkflowStartEvent } return startEvent, nil } // DeletePendingChildExecution deletes details about a ChildExecutionInfo. func (e *MutableStateImpl) DeletePendingChildExecution( initiatedEventID int64, ) error { if _, ok := e.pendingChildExecutionInfoIDs[initiatedEventID]; ok { delete(e.pendingChildExecutionInfoIDs, initiatedEventID) } else { e.logError( fmt.Sprintf("unable to find child workflow event ID: %v in mutable state", initiatedEventID), tag.ErrorTypeInvalidMutableStateAction, ) // log data inconsistency instead of returning an error e.logDataInconsistency() } delete(e.updateChildExecutionInfos, initiatedEventID) e.deleteChildExecutionInfos[initiatedEventID] = struct{}{} return nil } // DeletePendingRequestCancel deletes details about a RequestCancelInfo. func (e *MutableStateImpl) DeletePendingRequestCancel( initiatedEventID int64, ) error { if _, ok := e.pendingRequestCancelInfoIDs[initiatedEventID]; ok { delete(e.pendingRequestCancelInfoIDs, initiatedEventID) } else { e.logError( fmt.Sprintf("unable to find request cancel external workflow event ID: %v in mutable state", initiatedEventID), tag.ErrorTypeInvalidMutableStateAction, ) // log data inconsistency instead of returning an error e.logDataInconsistency() } delete(e.updateRequestCancelInfos, initiatedEventID) e.deleteRequestCancelInfos[initiatedEventID] = struct{}{} return nil } // DeletePendingSignal deletes details about a SignalInfo func (e *MutableStateImpl) DeletePendingSignal( initiatedEventID int64, ) error { if _, ok := e.pendingSignalInfoIDs[initiatedEventID]; ok { delete(e.pendingSignalInfoIDs, initiatedEventID) } else { e.logError( fmt.Sprintf("unable to find signal external workflow event ID: %v in mutable state", initiatedEventID), tag.ErrorTypeInvalidMutableStateAction, ) // log data inconsistency instead of returning an error e.logDataInconsistency() } delete(e.updateSignalInfos, initiatedEventID) e.deleteSignalInfos[initiatedEventID] = struct{}{} return nil } func (e *MutableStateImpl) writeEventToCache( event *historypb.HistoryEvent, ) { // For start event: store it within events cache so the recordWorkflowStarted transfer task doesn't need to // load it from database // For completion event: store it within events cache so we can communicate the result to parent execution // during the processing of DeleteTransferTask without loading this event from database e.eventsCache.PutEvent( e.executionInfo.NamespaceId, e.executionInfo.WorkflowId, e.executionState.RunId, event.GetEventId(), event, ) } func (e *MutableStateImpl) HasParentExecution() bool { return e.executionInfo.ParentNamespaceId != "" && e.executionInfo.ParentWorkflowId != "" } func (e *MutableStateImpl) UpdateActivityProgress( ai *persistencespb.ActivityInfo, request *workflowservice.RecordActivityTaskHeartbeatRequest, ) { ai.Version = e.GetCurrentVersion() ai.LastHeartbeatDetails = request.Details now := e.timeSource.Now() ai.LastHeartbeatUpdateTime = &now e.updateActivityInfos[ai.ScheduleId] = ai e.syncActivityTasks[ai.ScheduleId] = struct{}{} } // ReplicateActivityInfo replicate the necessary activity information func (e *MutableStateImpl) ReplicateActivityInfo( request *historyservice.SyncActivityRequest, resetActivityTimerTaskStatus bool, ) error { ai, ok := e.pendingActivityInfoIDs[request.GetScheduledId()] if !ok { e.logError( fmt.Sprintf("unable to find activity event ID: %v in mutable state", request.GetScheduledId()), tag.ErrorTypeInvalidMutableStateAction, ) return ErrMissingActivityInfo } ai.Version = request.GetVersion() ai.ScheduledTime = request.GetScheduledTime() ai.StartedId = request.GetStartedId() ai.LastHeartbeatUpdateTime = request.GetLastHeartbeatTime() if ai.StartedId == common.EmptyEventID { ai.StartedTime = timestamp.TimePtr(time.Time{}) } else { ai.StartedTime = request.GetStartedTime() } ai.LastHeartbeatDetails = request.GetDetails() ai.Attempt = request.GetAttempt() ai.RetryLastWorkerIdentity = request.GetLastWorkerIdentity() ai.RetryLastFailure = request.GetLastFailure() if resetActivityTimerTaskStatus { ai.TimerTaskStatus = TimerTaskStatusNone } e.updateActivityInfos[ai.ScheduleId] = ai return nil } // UpdateActivity updates an activity func (e *MutableStateImpl) UpdateActivity( ai *persistencespb.ActivityInfo, ) error { if _, ok := e.pendingActivityInfoIDs[ai.ScheduleId]; !ok { e.logError( fmt.Sprintf("unable to find activity ID: %v in mutable state", ai.ActivityId), tag.ErrorTypeInvalidMutableStateAction, ) return ErrMissingActivityInfo } e.pendingActivityInfoIDs[ai.ScheduleId] = ai e.updateActivityInfos[ai.ScheduleId] = ai return nil } // UpdateActivityWithTimerHeartbeat updates an activity func (e *MutableStateImpl) UpdateActivityWithTimerHeartbeat( ai *persistencespb.ActivityInfo, timerTimeoutVisibility time.Time, ) error { err := e.UpdateActivity(ai) if err != nil { return err } e.pendingActivityTimerHeartbeats[ai.ScheduleId] = timerTimeoutVisibility return nil } // DeleteActivity deletes details about an activity. func (e *MutableStateImpl) DeleteActivity( scheduleEventID int64, ) error { if activityInfo, ok := e.pendingActivityInfoIDs[scheduleEventID]; ok { delete(e.pendingActivityInfoIDs, scheduleEventID) delete(e.pendingActivityTimerHeartbeats, scheduleEventID) if _, ok = e.pendingActivityIDToEventID[activityInfo.ActivityId]; ok { delete(e.pendingActivityIDToEventID, activityInfo.ActivityId) } else { e.logError( fmt.Sprintf("unable to find activity ID: %v in mutable state", activityInfo.ActivityId), tag.ErrorTypeInvalidMutableStateAction, ) // log data inconsistency instead of returning an error e.logDataInconsistency() } } else { e.logError( fmt.Sprintf("unable to find activity event id: %v in mutable state", scheduleEventID), tag.ErrorTypeInvalidMutableStateAction, ) // log data inconsistency instead of returning an error e.logDataInconsistency() } delete(e.updateActivityInfos, scheduleEventID) e.deleteActivityInfos[scheduleEventID] = struct{}{} return nil } // GetUserTimerInfo gives details about a user timer. func (e *MutableStateImpl) GetUserTimerInfo( timerID string, ) (*persistencespb.TimerInfo, bool) { timerInfo, ok := e.pendingTimerInfoIDs[timerID] return timerInfo, ok } // GetUserTimerInfoByEventID gives details about a user timer. func (e *MutableStateImpl) GetUserTimerInfoByEventID( startEventID int64, ) (*persistencespb.TimerInfo, bool) { timerID, ok := e.pendingTimerEventIDToID[startEventID] if !ok { return nil, false } return e.GetUserTimerInfo(timerID) } // UpdateUserTimer updates the user timer in progress. func (e *MutableStateImpl) UpdateUserTimer( ti *persistencespb.TimerInfo, ) error { timerID, ok := e.pendingTimerEventIDToID[ti.GetStartedId()] if !ok { e.logError( fmt.Sprintf("unable to find timer event ID: %v in mutable state", ti.GetStartedId()), tag.ErrorTypeInvalidMutableStateAction, ) return ErrMissingTimerInfo } if _, ok := e.pendingTimerInfoIDs[timerID]; !ok { e.logError( fmt.Sprintf("unable to find timer ID: %v in mutable state", timerID), tag.ErrorTypeInvalidMutableStateAction, ) return ErrMissingTimerInfo } e.pendingTimerInfoIDs[ti.TimerId] = ti e.updateTimerInfos[ti.TimerId] = ti return nil } // DeleteUserTimer deletes an user timer. func (e *MutableStateImpl) DeleteUserTimer( timerID string, ) error { if timerInfo, ok := e.pendingTimerInfoIDs[timerID]; ok { delete(e.pendingTimerInfoIDs, timerID) if _, ok = e.pendingTimerEventIDToID[timerInfo.GetStartedId()]; ok { delete(e.pendingTimerEventIDToID, timerInfo.GetStartedId()) } else { e.logError( fmt.Sprintf("unable to find timer event ID: %v in mutable state", timerID), tag.ErrorTypeInvalidMutableStateAction, ) // log data inconsistency instead of returning an error e.logDataInconsistency() } } else { e.logError( fmt.Sprintf("unable to find timer ID: %v in mutable state", timerID), tag.ErrorTypeInvalidMutableStateAction, ) // log data inconsistency instead of returning an error e.logDataInconsistency() } delete(e.updateTimerInfos, timerID) e.deleteTimerInfos[timerID] = struct{}{} return nil } // nolint:unused func (e *MutableStateImpl) getWorkflowTaskInfo() *WorkflowTaskInfo { taskQueue := &taskqueuepb.TaskQueue{} if e.IsStickyTaskQueueEnabled() { taskQueue.Name = e.executionInfo.StickyTaskQueue taskQueue.Kind = enumspb.TASK_QUEUE_KIND_STICKY } else { taskQueue.Name = e.executionInfo.TaskQueue taskQueue.Kind = enumspb.TASK_QUEUE_KIND_NORMAL } return &WorkflowTaskInfo{ Version: e.executionInfo.WorkflowTaskVersion, ScheduleID: e.executionInfo.WorkflowTaskScheduleId, StartedID: e.executionInfo.WorkflowTaskStartedId, RequestID: e.executionInfo.WorkflowTaskRequestId, WorkflowTaskTimeout: e.executionInfo.WorkflowTaskTimeout, Attempt: e.executionInfo.WorkflowTaskAttempt, StartedTime: e.executionInfo.WorkflowTaskStartedTime, ScheduledTime: e.executionInfo.WorkflowTaskScheduledTime, TaskQueue: taskQueue, OriginalScheduledTime: e.executionInfo.WorkflowTaskOriginalScheduledTime, } } // GetWorkflowTaskInfo returns details about the in-progress workflow task func (e *MutableStateImpl) GetWorkflowTaskInfo( scheduleEventID int64, ) (*WorkflowTaskInfo, bool) { return e.workflowTaskManager.GetWorkflowTaskInfo(scheduleEventID) } func (e *MutableStateImpl) GetPendingActivityInfos() map[int64]*persistencespb.ActivityInfo { return e.pendingActivityInfoIDs } func (e *MutableStateImpl) GetPendingTimerInfos() map[string]*persistencespb.TimerInfo { return e.pendingTimerInfoIDs } func (e *MutableStateImpl) GetPendingChildExecutionInfos() map[int64]*persistencespb.ChildExecutionInfo { return e.pendingChildExecutionInfoIDs } func (e *MutableStateImpl) GetPendingRequestCancelExternalInfos() map[int64]*persistencespb.RequestCancelInfo { return e.pendingRequestCancelInfoIDs } func (e *MutableStateImpl) GetPendingSignalExternalInfos() map[int64]*persistencespb.SignalInfo { return e.pendingSignalInfoIDs } func (e *MutableStateImpl) HasProcessedOrPendingWorkflowTask() bool { return e.workflowTaskManager.HasProcessedOrPendingWorkflowTask() } func (e *MutableStateImpl) HasPendingWorkflowTask() bool { return e.workflowTaskManager.HasPendingWorkflowTask() } func (e *MutableStateImpl) GetPendingWorkflowTask() (*WorkflowTaskInfo, bool) { return e.workflowTaskManager.GetPendingWorkflowTask() } func (e *MutableStateImpl) HasInFlightWorkflowTask() bool { return e.workflowTaskManager.HasInFlightWorkflowTask() } func (e *MutableStateImpl) GetInFlightWorkflowTask() (*WorkflowTaskInfo, bool) { return e.workflowTaskManager.GetInFlightWorkflowTask() } func (e *MutableStateImpl) HasBufferedEvents() bool { return e.hBuilder.HasBufferEvents() } // UpdateWorkflowTask updates a workflow task. func (e *MutableStateImpl) UpdateWorkflowTask( workflowTask *WorkflowTaskInfo, ) { e.workflowTaskManager.UpdateWorkflowTask(workflowTask) } // DeleteWorkflowTask deletes a workflow task. func (e *MutableStateImpl) DeleteWorkflowTask() { e.workflowTaskManager.DeleteWorkflowTask() } func (e *MutableStateImpl) ClearStickyness() { e.executionInfo.StickyTaskQueue = "" e.executionInfo.StickyScheduleToStartTimeout = timestamp.DurationFromSeconds(0) } // GetLastFirstEventIDTxnID returns last first event ID and corresponding transaction ID // first event ID is the ID of a batch of events in a single history events record func (e *MutableStateImpl) GetLastFirstEventIDTxnID() (int64, int64) { return e.executionInfo.LastFirstEventId, e.executionInfo.LastFirstEventTxnId } // GetNextEventID returns next event ID func (e *MutableStateImpl) GetNextEventID() int64 { return e.hBuilder.NextEventID() } // GetPreviousStartedEventID returns last started workflow task event ID func (e *MutableStateImpl) GetPreviousStartedEventID() int64 { return e.executionInfo.LastWorkflowTaskStartId } func (e *MutableStateImpl) IsWorkflowExecutionRunning() bool { switch e.executionState.State { case enumsspb.WORKFLOW_EXECUTION_STATE_CREATED: return true case enumsspb.WORKFLOW_EXECUTION_STATE_RUNNING: return true case enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED: return false case enumsspb.WORKFLOW_EXECUTION_STATE_ZOMBIE: return false case enumsspb.WORKFLOW_EXECUTION_STATE_CORRUPTED: return false default: panic(fmt.Sprintf("unknown workflow state: %v", e.executionState.State)) } } func (e *MutableStateImpl) IsCancelRequested() bool { return e.executionInfo.CancelRequested } func (e *MutableStateImpl) IsSignalRequested( requestID string, ) bool { if _, ok := e.pendingSignalRequestedIDs[requestID]; ok { return true } return false } func (e *MutableStateImpl) AddSignalRequested( requestID string, ) { if e.pendingSignalRequestedIDs == nil { e.pendingSignalRequestedIDs = make(map[string]struct{}) } if e.updateSignalRequestedIDs == nil { e.updateSignalRequestedIDs = make(map[string]struct{}) } e.pendingSignalRequestedIDs[requestID] = struct{}{} // add requestID to set e.updateSignalRequestedIDs[requestID] = struct{}{} } func (e *MutableStateImpl) DeleteSignalRequested( requestID string, ) { delete(e.pendingSignalRequestedIDs, requestID) delete(e.updateSignalRequestedIDs, requestID) e.deleteSignalRequestedIDs[requestID] = struct{}{} } func (e *MutableStateImpl) addWorkflowExecutionStartedEventForContinueAsNew( parentExecutionInfo *workflowspb.ParentExecutionInfo, execution commonpb.WorkflowExecution, previousExecutionState MutableState, command *commandpb.ContinueAsNewWorkflowExecutionCommandAttributes, firstRunID string, ) (*historypb.HistoryEvent, error) { previousExecutionInfo := previousExecutionState.GetExecutionInfo() taskQueue := previousExecutionInfo.TaskQueue if command.TaskQueue != nil { taskQueue = command.TaskQueue.GetName() } tq := &taskqueuepb.TaskQueue{ Name: taskQueue, Kind: enumspb.TASK_QUEUE_KIND_NORMAL, } workflowType := previousExecutionInfo.WorkflowTypeName if command.WorkflowType != nil { workflowType = command.WorkflowType.GetName() } wType := &commonpb.WorkflowType{} wType.Name = workflowType var taskTimeout *time.Duration if timestamp.DurationValue(command.GetWorkflowTaskTimeout()) == 0 { taskTimeout = previousExecutionInfo.DefaultWorkflowTaskTimeout } else { taskTimeout = command.GetWorkflowTaskTimeout() } // Workflow runTimeout is already set to the correct value in // validateContinueAsNewWorkflowExecutionAttributes runTimeout := command.GetWorkflowRunTimeout() createRequest := &workflowservice.StartWorkflowExecutionRequest{ RequestId: uuid.New(), Namespace: e.namespaceEntry.GetInfo().Name, WorkflowId: execution.WorkflowId, TaskQueue: tq, WorkflowType: wType, WorkflowExecutionTimeout: previousExecutionState.GetExecutionInfo().WorkflowExecutionTimeout, WorkflowRunTimeout: runTimeout, WorkflowTaskTimeout: taskTimeout, Input: command.Input, Header: command.Header, RetryPolicy: command.RetryPolicy, CronSchedule: command.CronSchedule, Memo: command.Memo, SearchAttributes: command.SearchAttributes, } enums.SetDefaultContinueAsNewInitiator(&command.Initiator) req := &historyservice.StartWorkflowExecutionRequest{ NamespaceId: e.namespaceEntry.GetInfo().Id, StartRequest: createRequest, ParentExecutionInfo: parentExecutionInfo, LastCompletionResult: command.LastCompletionResult, ContinuedFailure: command.GetFailure(), ContinueAsNewInitiator: command.Initiator, FirstWorkflowTaskBackoff: command.BackoffStartInterval, } if command.GetInitiator() == enumspb.CONTINUE_AS_NEW_INITIATOR_RETRY { req.Attempt = previousExecutionState.GetExecutionInfo().Attempt + 1 } else { req.Attempt = 1 } workflowTimeoutTime := timestamp.TimeValue(previousExecutionState.GetExecutionInfo().WorkflowExecutionExpirationTime) if !workflowTimeoutTime.IsZero() { req.WorkflowExecutionExpirationTime = &workflowTimeoutTime } // History event only has namespace so namespaceID has to be passed in explicitly to update the mutable state var parentNamespaceID string if parentExecutionInfo != nil { parentNamespaceID = parentExecutionInfo.GetNamespaceId() } event := e.hBuilder.AddWorkflowExecutionStartedEvent( *e.executionInfo.StartTime, req, previousExecutionInfo.AutoResetPoints, previousExecutionState.GetExecutionState().GetRunId(), firstRunID, execution.GetRunId(), ) if err := e.ReplicateWorkflowExecutionStartedEvent( parentNamespaceID, execution, createRequest.GetRequestId(), event, ); err != nil { return nil, err } if err := e.SetHistoryTree(e.GetExecutionState().GetRunId()); err != nil { return nil, err } // TODO merge active & passive task generation if err := e.taskGenerator.GenerateWorkflowStartTasks( timestamp.TimeValue(event.GetEventTime()), event, ); err != nil { return nil, err } if err := e.taskGenerator.GenerateRecordWorkflowStartedTasks( timestamp.TimeValue(event.GetEventTime()), event, ); err != nil { return nil, err } if err := e.AddFirstWorkflowTaskScheduled( event, ); err != nil { return nil, err } return event, nil } func (e *MutableStateImpl) AddWorkflowExecutionStartedEvent( execution commonpb.WorkflowExecution, startRequest *historyservice.StartWorkflowExecutionRequest, ) (*historypb.HistoryEvent, error) { opTag := tag.WorkflowActionWorkflowStarted if err := e.checkMutability(opTag); err != nil { return nil, err } request := startRequest.StartRequest eventID := e.GetNextEventID() if eventID != common.FirstEventID { e.logger.Warn(mutableStateInvalidHistoryActionMsg, opTag, tag.WorkflowEventID(eventID), tag.ErrorTypeInvalidHistoryAction) return nil, e.createInternalServerError(opTag) } event := e.hBuilder.AddWorkflowExecutionStartedEvent( *e.executionInfo.StartTime, startRequest, nil, "", execution.GetRunId(), execution.GetRunId(), ) var parentNamespaceID string if startRequest.ParentExecutionInfo != nil { parentNamespaceID = startRequest.ParentExecutionInfo.GetNamespaceId() } if err := e.ReplicateWorkflowExecutionStartedEvent( parentNamespaceID, execution, request.GetRequestId(), event, ); err != nil { return nil, err } // TODO merge active & passive task generation if err := e.taskGenerator.GenerateWorkflowStartTasks( timestamp.TimeValue(event.GetEventTime()), event, ); err != nil { return nil, err } if err := e.taskGenerator.GenerateRecordWorkflowStartedTasks( timestamp.TimeValue(event.GetEventTime()), event, ); err != nil { return nil, err } return event, nil } func (e *MutableStateImpl) ReplicateWorkflowExecutionStartedEvent( parentNamespaceID string, execution commonpb.WorkflowExecution, requestID string, startEvent *historypb.HistoryEvent, ) error { event := startEvent.GetWorkflowExecutionStartedEventAttributes() e.executionState.CreateRequestId = requestID e.executionState.RunId = execution.GetRunId() e.executionInfo.NamespaceId = e.namespaceEntry.GetInfo().Id e.executionInfo.WorkflowId = execution.GetWorkflowId() e.executionInfo.FirstExecutionRunId = event.GetFirstExecutionRunId() e.executionInfo.TaskQueue = event.TaskQueue.GetName() e.executionInfo.WorkflowTypeName = event.WorkflowType.GetName() e.executionInfo.WorkflowRunTimeout = event.GetWorkflowRunTimeout() e.executionInfo.WorkflowExecutionTimeout = event.GetWorkflowExecutionTimeout() e.executionInfo.DefaultWorkflowTaskTimeout = event.GetWorkflowTaskTimeout() if err := e.UpdateWorkflowStateStatus( enumsspb.WORKFLOW_EXECUTION_STATE_CREATED, enumspb.WORKFLOW_EXECUTION_STATUS_RUNNING, ); err != nil { return err } e.executionInfo.LastWorkflowTaskStartId = common.EmptyEventID e.executionInfo.LastFirstEventId = startEvent.GetEventId() e.executionInfo.WorkflowTaskVersion = common.EmptyVersion e.executionInfo.WorkflowTaskScheduleId = common.EmptyEventID e.executionInfo.WorkflowTaskStartedId = common.EmptyEventID e.executionInfo.WorkflowTaskRequestId = emptyUUID e.executionInfo.WorkflowTaskTimeout = timestamp.DurationFromSeconds(0) e.executionInfo.CronSchedule = event.GetCronSchedule() e.executionInfo.ParentNamespaceId = parentNamespaceID if event.ParentWorkflowExecution != nil { e.executionInfo.ParentWorkflowId = event.ParentWorkflowExecution.GetWorkflowId() e.executionInfo.ParentRunId = event.ParentWorkflowExecution.GetRunId() } if event.ParentInitiatedEventId != 0 { e.executionInfo.InitiatedId = event.GetParentInitiatedEventId() } else { e.executionInfo.InitiatedId = common.EmptyEventID } e.executionInfo.ExecutionTime = timestamp.TimePtr( e.executionInfo.StartTime.Add(timestamp.DurationValue(event.GetFirstWorkflowTaskBackoff())), ) e.executionInfo.Attempt = event.GetAttempt() if !timestamp.TimeValue(event.GetWorkflowExecutionExpirationTime()).IsZero() { e.executionInfo.WorkflowExecutionExpirationTime = event.GetWorkflowExecutionExpirationTime() } var workflowRunTimeoutTime time.Time workflowRunTimeoutDuration := timestamp.DurationValue(e.executionInfo.WorkflowRunTimeout) // if workflowRunTimeoutDuration == 0 then the workflowRunTimeoutTime will be 0 // meaning that there is not workflow run timeout if workflowRunTimeoutDuration != 0 { firstWorkflowTaskDelayDuration := timestamp.DurationValue(event.GetFirstWorkflowTaskBackoff()) workflowRunTimeoutDuration = workflowRunTimeoutDuration + firstWorkflowTaskDelayDuration workflowRunTimeoutTime = e.executionInfo.StartTime.Add(workflowRunTimeoutDuration) workflowExecutionTimeoutTime := timestamp.TimeValue(e.executionInfo.WorkflowExecutionExpirationTime) if !workflowExecutionTimeoutTime.IsZero() && workflowRunTimeoutTime.After(workflowExecutionTimeoutTime) { workflowRunTimeoutTime = workflowExecutionTimeoutTime } } e.executionInfo.WorkflowRunExpirationTime = timestamp.TimePtr(workflowRunTimeoutTime) if event.RetryPolicy != nil { e.executionInfo.HasRetryPolicy = true e.executionInfo.RetryBackoffCoefficient = event.RetryPolicy.GetBackoffCoefficient() e.executionInfo.RetryInitialInterval = event.RetryPolicy.GetInitialInterval() e.executionInfo.RetryMaximumAttempts = event.RetryPolicy.GetMaximumAttempts() e.executionInfo.RetryMaximumInterval = event.RetryPolicy.GetMaximumInterval() e.executionInfo.RetryNonRetryableErrorTypes = event.RetryPolicy.GetNonRetryableErrorTypes() } e.executionInfo.AutoResetPoints = rolloverAutoResetPointsWithExpiringTime( event.GetPrevAutoResetPoints(), event.GetContinuedExecutionRunId(), timestamp.TimeValue(startEvent.GetEventTime()), e.namespaceEntry.GetRetention(e.executionInfo.WorkflowId), ) if event.Memo != nil { e.executionInfo.Memo = event.Memo.GetFields() } if event.SearchAttributes != nil { e.executionInfo.SearchAttributes = event.SearchAttributes.GetIndexedFields() } e.writeEventToCache(startEvent) return nil } func (e *MutableStateImpl) AddFirstWorkflowTaskScheduled( startEvent *historypb.HistoryEvent, ) error { opTag := tag.WorkflowActionWorkflowTaskScheduled if err := e.checkMutability(opTag); err != nil { return err } return e.workflowTaskManager.AddFirstWorkflowTaskScheduled(startEvent) } func (e *MutableStateImpl) AddWorkflowTaskScheduledEvent( bypassTaskGeneration bool, ) (*WorkflowTaskInfo, error) { opTag := tag.WorkflowActionWorkflowTaskScheduled if err := e.checkMutability(opTag); err != nil { return nil, err } return e.workflowTaskManager.AddWorkflowTaskScheduledEvent(bypassTaskGeneration) } // AddWorkflowTaskScheduledEventAsHeartbeat is to record the first WorkflowTaskScheduledEvent during workflow task heartbeat. func (e *MutableStateImpl) AddWorkflowTaskScheduledEventAsHeartbeat( bypassTaskGeneration bool, originalScheduledTimestamp *time.Time, ) (*WorkflowTaskInfo, error) { opTag := tag.WorkflowActionWorkflowTaskScheduled if err := e.checkMutability(opTag); err != nil { return nil, err } return e.workflowTaskManager.AddWorkflowTaskScheduledEventAsHeartbeat(bypassTaskGeneration, originalScheduledTimestamp) } func (e *MutableStateImpl) ReplicateTransientWorkflowTaskScheduled() (*WorkflowTaskInfo, error) { return e.workflowTaskManager.ReplicateTransientWorkflowTaskScheduled() } func (e *MutableStateImpl) ReplicateWorkflowTaskScheduledEvent( version int64, scheduleID int64, taskQueue *taskqueuepb.TaskQueue, startToCloseTimeoutSeconds int32, attempt int32, scheduleTimestamp *time.Time, originalScheduledTimestamp *time.Time, ) (*WorkflowTaskInfo, error) { return e.workflowTaskManager.ReplicateWorkflowTaskScheduledEvent(version, scheduleID, taskQueue, startToCloseTimeoutSeconds, attempt, scheduleTimestamp, originalScheduledTimestamp) } func (e *MutableStateImpl) AddWorkflowTaskStartedEvent( scheduleEventID int64, requestID string, taskQueue *taskqueuepb.TaskQueue, identity string, ) (*historypb.HistoryEvent, *WorkflowTaskInfo, error) { opTag := tag.WorkflowActionWorkflowTaskStarted if err := e.checkMutability(opTag); err != nil { return nil, nil, err } return e.workflowTaskManager.AddWorkflowTaskStartedEvent(scheduleEventID, requestID, taskQueue, identity) } func (e *MutableStateImpl) ReplicateWorkflowTaskStartedEvent( workflowTask *WorkflowTaskInfo, version int64, scheduleID int64, startedID int64, requestID string, timestamp time.Time, ) (*WorkflowTaskInfo, error) { return e.workflowTaskManager.ReplicateWorkflowTaskStartedEvent(workflowTask, version, scheduleID, startedID, requestID, timestamp) } func (e *MutableStateImpl) CreateTransientWorkflowTaskEvents( workflowTask *WorkflowTaskInfo, identity string, ) (*historypb.HistoryEvent, *historypb.HistoryEvent) { return e.workflowTaskManager.CreateTransientWorkflowTaskEvents(workflowTask, identity) } // add BinaryCheckSum for the first workflowTaskCompletedID for auto-reset func (e *MutableStateImpl) addBinaryCheckSumIfNotExists( event *historypb.HistoryEvent, maxResetPoints int, ) error { binChecksum := event.GetWorkflowTaskCompletedEventAttributes().GetBinaryChecksum() if len(binChecksum) == 0 { return nil } exeInfo := e.executionInfo var currResetPoints []*workflowpb.ResetPointInfo if exeInfo.AutoResetPoints != nil && exeInfo.AutoResetPoints.Points != nil { currResetPoints = e.executionInfo.AutoResetPoints.Points } else { currResetPoints = make([]*workflowpb.ResetPointInfo, 0, 1) } // List of all recent binary checksums associated with the workflow. var recentBinaryChecksums []string for _, rp := range currResetPoints { recentBinaryChecksums = append(recentBinaryChecksums, rp.GetBinaryChecksum()) if rp.GetBinaryChecksum() == binChecksum { // this checksum already exists return nil } } if len(currResetPoints) == maxResetPoints { // If exceeding the max limit, do rotation by taking the oldest one out. currResetPoints = currResetPoints[1:] recentBinaryChecksums = recentBinaryChecksums[1:] } // Adding current version of the binary checksum. recentBinaryChecksums = append(recentBinaryChecksums, binChecksum) resettable := true err := e.CheckResettable() if err != nil { resettable = false } info := &workflowpb.ResetPointInfo{ BinaryChecksum: binChecksum, RunId: e.executionState.GetRunId(), FirstWorkflowTaskCompletedId: event.GetEventId(), CreateTime: timestamp.TimePtr(e.timeSource.Now()), Resettable: resettable, } currResetPoints = append(currResetPoints, info) exeInfo.AutoResetPoints = &workflowpb.ResetPoints{ Points: currResetPoints, } checksumsPayload, err := searchattribute.EncodeValue(recentBinaryChecksums, enumspb.INDEXED_VALUE_TYPE_KEYWORD) if err != nil { return err } if exeInfo.SearchAttributes == nil { exeInfo.SearchAttributes = make(map[string]*commonpb.Payload, 1) } exeInfo.SearchAttributes[searchattribute.BinaryChecksums] = checksumsPayload if e.shard.GetConfig().AdvancedVisibilityWritingMode() != common.AdvancedVisibilityWritingModeOff { return e.taskGenerator.GenerateWorkflowSearchAttrTasks(timestamp.TimeValue(event.GetEventTime())) } return nil } // TODO: we will release the restriction when reset API allow those pending // CheckResettable check if workflow can be reset func (e *MutableStateImpl) CheckResettable() error { if len(e.GetPendingChildExecutionInfos()) > 0 { return serviceerror.NewInvalidArgument(fmt.Sprintf("it is not allowed resetting to a point that workflow has pending child workflow.")) } if len(e.GetPendingRequestCancelExternalInfos()) > 0 { return serviceerror.NewInvalidArgument(fmt.Sprintf("it is not allowed resetting to a point that workflow has pending request cancel.")) } if len(e.GetPendingSignalExternalInfos()) > 0 { return serviceerror.NewInvalidArgument(fmt.Sprintf("it is not allowed resetting to a point that workflow has pending signals to send.")) } return nil } func (e *MutableStateImpl) AddWorkflowTaskCompletedEvent( scheduleEventID int64, startedEventID int64, request *workflowservice.RespondWorkflowTaskCompletedRequest, maxResetPoints int, ) (*historypb.HistoryEvent, error) { opTag := tag.WorkflowActionWorkflowTaskCompleted if err := e.checkMutability(opTag); err != nil { return nil, err } return e.workflowTaskManager.AddWorkflowTaskCompletedEvent(scheduleEventID, startedEventID, request, maxResetPoints) } func (e *MutableStateImpl) ReplicateWorkflowTaskCompletedEvent( event *historypb.HistoryEvent, ) error { return e.workflowTaskManager.ReplicateWorkflowTaskCompletedEvent(event) } func (e *MutableStateImpl) AddWorkflowTaskTimedOutEvent( scheduleEventID int64, startedEventID int64, ) (*historypb.HistoryEvent, error) { opTag := tag.WorkflowActionWorkflowTaskTimedOut if err := e.checkMutability(opTag); err != nil { return nil, err } return e.workflowTaskManager.AddWorkflowTaskTimedOutEvent(scheduleEventID, startedEventID) } func (e *MutableStateImpl) ReplicateWorkflowTaskTimedOutEvent( timeoutType enumspb.TimeoutType, ) error { return e.workflowTaskManager.ReplicateWorkflowTaskTimedOutEvent(timeoutType) } func (e *MutableStateImpl) AddWorkflowTaskScheduleToStartTimeoutEvent( scheduleEventID int64, ) (*historypb.HistoryEvent, error) { opTag := tag.WorkflowActionWorkflowTaskTimedOut if err := e.checkMutability(opTag); err != nil { return nil, err } return e.workflowTaskManager.AddWorkflowTaskScheduleToStartTimeoutEvent(scheduleEventID) } func (e *MutableStateImpl) AddWorkflowTaskFailedEvent( scheduleEventID int64, startedEventID int64, cause enumspb.WorkflowTaskFailedCause, failure *failurepb.Failure, identity string, binChecksum string, baseRunID string, newRunID string, forkEventVersion int64, ) (*historypb.HistoryEvent, error) { opTag := tag.WorkflowActionWorkflowTaskFailed if err := e.checkMutability(opTag); err != nil { return nil, err } return e.workflowTaskManager.AddWorkflowTaskFailedEvent( scheduleEventID, startedEventID, cause, failure, identity, binChecksum, baseRunID, newRunID, forkEventVersion, ) } func (e *MutableStateImpl) ReplicateWorkflowTaskFailedEvent() error { return e.workflowTaskManager.ReplicateWorkflowTaskFailedEvent() } func (e *MutableStateImpl) AddActivityTaskScheduledEvent( workflowTaskCompletedEventID int64, command *commandpb.ScheduleActivityTaskCommandAttributes, ) (*historypb.HistoryEvent, *persistencespb.ActivityInfo, error) { opTag := tag.WorkflowActionActivityTaskScheduled if err := e.checkMutability(opTag); err != nil { return nil, nil, err } _, ok := e.GetActivityByActivityID(command.GetActivityId()) if ok { e.logger.Warn(mutableStateInvalidHistoryActionMsg, opTag, tag.WorkflowEventID(e.GetNextEventID()), tag.ErrorTypeInvalidHistoryAction) return nil, nil, e.createCallerError(opTag) } event := e.hBuilder.AddActivityTaskScheduledEvent(workflowTaskCompletedEventID, command) // Write the event to cache only on active cluster for processing on activity started or retried e.eventsCache.PutEvent( e.executionInfo.NamespaceId, e.executionInfo.WorkflowId, e.executionState.RunId, event.GetEventId(), event, ) ai, err := e.ReplicateActivityTaskScheduledEvent(workflowTaskCompletedEventID, event) // TODO merge active & passive task generation if err := e.taskGenerator.GenerateActivityTransferTasks( timestamp.TimeValue(event.GetEventTime()), event, ); err != nil { return nil, nil, err } return event, ai, err } func (e *MutableStateImpl) ReplicateActivityTaskScheduledEvent( firstEventID int64, event *historypb.HistoryEvent, ) (*persistencespb.ActivityInfo, error) { attributes := event.GetActivityTaskScheduledEventAttributes() targetNamespaceID := e.executionInfo.NamespaceId if attributes.GetNamespace() != "" { targetNamespaceEntry, err := e.shard.GetNamespaceCache().GetNamespace(attributes.GetNamespace()) if err != nil { return nil, err } targetNamespaceID = targetNamespaceEntry.GetInfo().Id } scheduleEventID := event.GetEventId() scheduleToCloseTimeout := attributes.GetScheduleToCloseTimeout() ai := &persistencespb.ActivityInfo{ Version: event.GetVersion(), ScheduleId: scheduleEventID, ScheduledEventBatchId: firstEventID, ScheduledTime: event.GetEventTime(), StartedId: common.EmptyEventID, StartedTime: timestamp.TimePtr(time.Time{}), ActivityId: attributes.ActivityId, NamespaceId: targetNamespaceID, ScheduleToStartTimeout: attributes.GetScheduleToStartTimeout(), ScheduleToCloseTimeout: scheduleToCloseTimeout, StartToCloseTimeout: attributes.GetStartToCloseTimeout(), HeartbeatTimeout: attributes.GetHeartbeatTimeout(), CancelRequested: false, CancelRequestId: common.EmptyEventID, LastHeartbeatUpdateTime: timestamp.TimePtr(time.Time{}), TimerTaskStatus: TimerTaskStatusNone, TaskQueue: attributes.TaskQueue.GetName(), HasRetryPolicy: attributes.RetryPolicy != nil, Attempt: 1, } if ai.HasRetryPolicy { ai.RetryInitialInterval = attributes.RetryPolicy.GetInitialInterval() ai.RetryBackoffCoefficient = attributes.RetryPolicy.GetBackoffCoefficient() ai.RetryMaximumInterval = attributes.RetryPolicy.GetMaximumInterval() ai.RetryMaximumAttempts = attributes.RetryPolicy.GetMaximumAttempts() ai.RetryNonRetryableErrorTypes = attributes.RetryPolicy.NonRetryableErrorTypes if timestamp.DurationValue(scheduleToCloseTimeout) > 0 { ai.RetryExpirationTime = timestamp.TimePtr( timestamp.TimeValue(ai.ScheduledTime).Add(timestamp.DurationValue(scheduleToCloseTimeout)), ) } else { ai.RetryExpirationTime = timestamp.TimePtr(time.Time{}) } } e.pendingActivityInfoIDs[ai.ScheduleId] = ai e.pendingActivityIDToEventID[ai.ActivityId] = ai.ScheduleId e.updateActivityInfos[ai.ScheduleId] = ai return ai, nil } func (e *MutableStateImpl) addTransientActivityStartedEvent( scheduleEventID int64, ) error { ai, ok := e.GetActivityInfo(scheduleEventID) if !ok || ai.StartedId != common.TransientEventID { return nil } // activity task was started (as transient event), we need to add it now. event := e.hBuilder.AddActivityTaskStartedEvent( scheduleEventID, ai.Attempt, ai.RequestId, ai.StartedIdentity, ai.RetryLastFailure, ) if !ai.StartedTime.IsZero() { // overwrite started event time to the one recorded in ActivityInfo event.EventTime = ai.StartedTime } return e.ReplicateActivityTaskStartedEvent(event) } func (e *MutableStateImpl) AddActivityTaskStartedEvent( ai *persistencespb.ActivityInfo, scheduleEventID int64, requestID string, identity string, ) (*historypb.HistoryEvent, error) { opTag := tag.WorkflowActionActivityTaskStarted if err := e.checkMutability(opTag); err != nil { return nil, err } if !ai.HasRetryPolicy { event := e.hBuilder.AddActivityTaskStartedEvent( scheduleEventID, ai.Attempt, requestID, identity, ai.RetryLastFailure, ) if err := e.ReplicateActivityTaskStartedEvent(event); err != nil { return nil, err } return event, nil } // we might need to retry, so do not append started event just yet, // instead update mutable state and will record started event when activity task is closed ai.Version = e.GetCurrentVersion() ai.StartedId = common.TransientEventID ai.RequestId = requestID ai.StartedTime = timestamp.TimePtr(e.timeSource.Now()) ai.LastHeartbeatUpdateTime = ai.StartedTime ai.StartedIdentity = identity if err := e.UpdateActivity(ai); err != nil { return nil, err } e.syncActivityTasks[ai.ScheduleId] = struct{}{} return nil, nil } func (e *MutableStateImpl) ReplicateActivityTaskStartedEvent( event *historypb.HistoryEvent, ) error { attributes := event.GetActivityTaskStartedEventAttributes() scheduleID := attributes.GetScheduledEventId() ai, ok := e.GetActivityInfo(scheduleID) if !ok { e.logError( fmt.Sprintf("unable to find activity event id: %v in mutable state", scheduleID), tag.ErrorTypeInvalidMutableStateAction, ) return ErrMissingActivityInfo } ai.Version = event.GetVersion() ai.StartedId = event.GetEventId() ai.RequestId = attributes.GetRequestId() ai.StartedTime = event.GetEventTime() ai.LastHeartbeatUpdateTime = ai.StartedTime e.updateActivityInfos[ai.ScheduleId] = ai return nil } func (e *MutableStateImpl) AddActivityTaskCompletedEvent( scheduleEventID int64, startedEventID int64, request *workflowservice.RespondActivityTaskCompletedRequest, ) (*historypb.HistoryEvent, error) { opTag := tag.WorkflowActionActivityTaskCompleted if err := e.checkMutability(opTag); err != nil { return nil, err } if ai, ok := e.GetActivityInfo(scheduleEventID); !ok || ai.StartedId != startedEventID { e.logger.Warn(mutableStateInvalidHistoryActionMsg, opTag, tag.WorkflowEventID(e.GetNextEventID()), tag.ErrorTypeInvalidHistoryAction, tag.Bool(ok), tag.WorkflowScheduleID(scheduleEventID), tag.WorkflowStartedID(startedEventID)) return nil, e.createInternalServerError(opTag) } if err := e.addTransientActivityStartedEvent(scheduleEventID); err != nil { return nil, err } event := e.hBuilder.AddActivityTaskCompletedEvent( scheduleEventID, startedEventID, request.Identity, request.Result, ) if err := e.ReplicateActivityTaskCompletedEvent(event); err != nil { return nil, err } return event, nil } func (e *MutableStateImpl) ReplicateActivityTaskCompletedEvent( event *historypb.HistoryEvent, ) error { attributes := event.GetActivityTaskCompletedEventAttributes() scheduleID := attributes.GetScheduledEventId() return e.DeleteActivity(scheduleID) } func (e *MutableStateImpl) AddActivityTaskFailedEvent( scheduleEventID int64, startedEventID int64, failure *failurepb.Failure, retryState enumspb.RetryState, identity string, ) (*historypb.HistoryEvent, error) { opTag := tag.WorkflowActionActivityTaskFailed if err := e.checkMutability(opTag); err != nil { return nil, err } if ai, ok := e.GetActivityInfo(scheduleEventID); !ok || ai.StartedId != startedEventID { e.logger.Warn(mutableStateInvalidHistoryActionMsg, opTag, tag.WorkflowEventID(e.GetNextEventID()), tag.ErrorTypeInvalidHistoryAction, tag.Bool(ok), tag.WorkflowScheduleID(scheduleEventID), tag.WorkflowStartedID(startedEventID)) return nil, e.createInternalServerError(opTag) } if err := e.addTransientActivityStartedEvent(scheduleEventID); err != nil { return nil, err } event := e.hBuilder.AddActivityTaskFailedEvent( scheduleEventID, startedEventID, failure, retryState, identity, ) if err := e.ReplicateActivityTaskFailedEvent(event); err != nil { return nil, err } return event, nil } func (e *MutableStateImpl) ReplicateActivityTaskFailedEvent( event *historypb.HistoryEvent, ) error { attributes := event.GetActivityTaskFailedEventAttributes() scheduleID := attributes.GetScheduledEventId() return e.DeleteActivity(scheduleID) } func (e *MutableStateImpl) AddActivityTaskTimedOutEvent( scheduleEventID int64, startedEventID int64, timeoutFailure *failurepb.Failure, retryState enumspb.RetryState, ) (*historypb.HistoryEvent, error) { opTag := tag.WorkflowActionActivityTaskTimedOut if err := e.checkMutability(opTag); err != nil { return nil, err } timeoutType := timeoutFailure.GetTimeoutFailureInfo().GetTimeoutType() ai, ok := e.GetActivityInfo(scheduleEventID) if !ok || ai.StartedId != startedEventID || ((timeoutType == enumspb.TIMEOUT_TYPE_START_TO_CLOSE || timeoutType == enumspb.TIMEOUT_TYPE_HEARTBEAT) && ai.StartedId == common.EmptyEventID) { e.logger.Warn(mutableStateInvalidHistoryActionMsg, opTag, tag.WorkflowEventID(e.GetNextEventID()), tag.ErrorTypeInvalidHistoryAction, tag.Bool(ok), tag.WorkflowScheduleID(ai.ScheduleId), tag.WorkflowStartedID(ai.StartedId), tag.WorkflowTimeoutType(timeoutType)) return nil, e.createInternalServerError(opTag) } timeoutFailure.Cause = ai.RetryLastFailure if err := e.addTransientActivityStartedEvent(scheduleEventID); err != nil { return nil, err } event := e.hBuilder.AddActivityTaskTimedOutEvent( scheduleEventID, startedEventID, timeoutFailure, retryState, ) if err := e.ReplicateActivityTaskTimedOutEvent(event); err != nil { return nil, err } return event, nil } func (e *MutableStateImpl) ReplicateActivityTaskTimedOutEvent( event *historypb.HistoryEvent, ) error { attributes := event.GetActivityTaskTimedOutEventAttributes() scheduleID := attributes.GetScheduledEventId() return e.DeleteActivity(scheduleID) } func (e *MutableStateImpl) AddActivityTaskCancelRequestedEvent( workflowTaskCompletedEventID int64, scheduleID int64, _ string, ) (*historypb.HistoryEvent, *persistencespb.ActivityInfo, error) { opTag := tag.WorkflowActionActivityTaskCancelRequested if err := e.checkMutability(opTag); err != nil { return nil, nil, err } ai, ok := e.GetActivityInfo(scheduleID) if !ok { // It is possible both started and completed events are buffered for this activity if !e.hBuilder.HasActivityFinishEvent(scheduleID) { e.logWarn(mutableStateInvalidHistoryActionMsg, opTag, tag.WorkflowEventID(e.GetNextEventID()), tag.ErrorTypeInvalidHistoryAction, tag.Bool(ok), tag.WorkflowScheduleID(scheduleID)) return nil, nil, e.createCallerError(opTag) } } // Check for duplicate cancellation if ok && ai.CancelRequested { e.logWarn(mutableStateInvalidHistoryActionMsg, opTag, tag.WorkflowEventID(e.GetNextEventID()), tag.ErrorTypeInvalidHistoryAction, tag.Bool(ok), tag.WorkflowScheduleID(scheduleID)) return nil, nil, e.createCallerError(opTag) } // At this point we know this is a valid activity cancellation request actCancelReqEvent := e.hBuilder.AddActivityTaskCancelRequestedEvent(workflowTaskCompletedEventID, scheduleID) if err := e.ReplicateActivityTaskCancelRequestedEvent(actCancelReqEvent); err != nil { return nil, nil, err } return actCancelReqEvent, ai, nil } func (e *MutableStateImpl) ReplicateActivityTaskCancelRequestedEvent( event *historypb.HistoryEvent, ) error { attributes := event.GetActivityTaskCancelRequestedEventAttributes() scheduleID := attributes.GetScheduledEventId() ai, ok := e.GetActivityInfo(scheduleID) if !ok { // This will only be called on active cluster if activity info is found in mutable state // Passive side logic should always have activity info in mutable state if this is called, as the only // scenario where active side logic could have this event without activity info in mutable state is when // activity start and complete events are buffered. return nil } ai.Version = event.GetVersion() // - We have the activity dispatched to worker. // - The activity might not be heartbeat'ing, but the activity can still call RecordActivityHeartBeat() // to see cancellation while reporting progress of the activity. ai.CancelRequested = true ai.CancelRequestId = event.GetEventId() e.updateActivityInfos[ai.ScheduleId] = ai return nil } func (e *MutableStateImpl) AddActivityTaskCanceledEvent( scheduleEventID int64, startedEventID int64, latestCancelRequestedEventID int64, details *commonpb.Payloads, identity string, ) (*historypb.HistoryEvent, error) { opTag := tag.WorkflowActionActivityTaskCanceled if err := e.checkMutability(opTag); err != nil { return nil, err } ai, ok := e.GetActivityInfo(scheduleEventID) if !ok || ai.StartedId != startedEventID { e.logWarn(mutableStateInvalidHistoryActionMsg, opTag, tag.WorkflowEventID(e.GetNextEventID()), tag.ErrorTypeInvalidHistoryAction, tag.WorkflowScheduleID(scheduleEventID)) return nil, e.createInternalServerError(opTag) } // Verify cancel request as well. if !ai.CancelRequested { e.logWarn(mutableStateInvalidHistoryActionMsg, opTag, tag.WorkflowEventID(e.GetNextEventID()), tag.ErrorTypeInvalidHistoryAction, tag.WorkflowScheduleID(scheduleEventID), tag.WorkflowActivityID(ai.ActivityId), tag.WorkflowStartedID(ai.StartedId)) return nil, e.createInternalServerError(opTag) } if err := e.addTransientActivityStartedEvent(scheduleEventID); err != nil { return nil, err } event := e.hBuilder.AddActivityTaskCanceledEvent( scheduleEventID, startedEventID, latestCancelRequestedEventID, details, identity, ) if err := e.ReplicateActivityTaskCanceledEvent(event); err != nil { return nil, err } return event, nil } func (e *MutableStateImpl) ReplicateActivityTaskCanceledEvent( event *historypb.HistoryEvent, ) error { attributes := event.GetActivityTaskCanceledEventAttributes() scheduleID := attributes.GetScheduledEventId() return e.DeleteActivity(scheduleID) } func (e *MutableStateImpl) AddCompletedWorkflowEvent( workflowTaskCompletedEventID int64, command *commandpb.CompleteWorkflowExecutionCommandAttributes, ) (*historypb.HistoryEvent, error) { opTag := tag.WorkflowActionWorkflowCompleted if err := e.checkMutability(opTag); err != nil { return nil, err } event := e.hBuilder.AddCompletedWorkflowEvent(workflowTaskCompletedEventID, command) if err := e.ReplicateWorkflowExecutionCompletedEvent(workflowTaskCompletedEventID, event); err != nil { return nil, err } // TODO merge active & passive task generation if err := e.taskGenerator.GenerateWorkflowCloseTasks( timestamp.TimeValue(event.GetEventTime()), ); err != nil { return nil, err } return event, nil } func (e *MutableStateImpl) ReplicateWorkflowExecutionCompletedEvent( firstEventID int64, event *historypb.HistoryEvent, ) error { if err := e.UpdateWorkflowStateStatus( enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, enumspb.WORKFLOW_EXECUTION_STATUS_COMPLETED, ); err != nil { return err } e.executionInfo.CompletionEventBatchId = firstEventID // Used when completion event needs to be loaded from database e.ClearStickyness() e.writeEventToCache(event) return nil } func (e *MutableStateImpl) AddFailWorkflowEvent( workflowTaskCompletedEventID int64, retryState enumspb.RetryState, command *commandpb.FailWorkflowExecutionCommandAttributes, ) (*historypb.HistoryEvent, error) { opTag := tag.WorkflowActionWorkflowFailed if err := e.checkMutability(opTag); err != nil { return nil, err } event := e.hBuilder.AddFailWorkflowEvent(workflowTaskCompletedEventID, retryState, command) if err := e.ReplicateWorkflowExecutionFailedEvent(workflowTaskCompletedEventID, event); err != nil { return nil, err } // TODO merge active & passive task generation if err := e.taskGenerator.GenerateWorkflowCloseTasks( timestamp.TimeValue(event.GetEventTime()), ); err != nil { return nil, err } return event, nil } func (e *MutableStateImpl) ReplicateWorkflowExecutionFailedEvent( firstEventID int64, event *historypb.HistoryEvent, ) error { if err := e.UpdateWorkflowStateStatus( enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, enumspb.WORKFLOW_EXECUTION_STATUS_FAILED, ); err != nil { return err } e.executionInfo.CompletionEventBatchId = firstEventID // Used when completion event needs to be loaded from database e.ClearStickyness() e.writeEventToCache(event) return nil } func (e *MutableStateImpl) AddTimeoutWorkflowEvent( firstEventID int64, retryState enumspb.RetryState, ) (*historypb.HistoryEvent, error) { opTag := tag.WorkflowActionWorkflowTimeout if err := e.checkMutability(opTag); err != nil { return nil, err } event := e.hBuilder.AddTimeoutWorkflowEvent(retryState) if err := e.ReplicateWorkflowExecutionTimedoutEvent(firstEventID, event); err != nil { return nil, err } // TODO merge active & passive task generation if err := e.taskGenerator.GenerateWorkflowCloseTasks( timestamp.TimeValue(event.GetEventTime()), ); err != nil { return nil, err } return event, nil } func (e *MutableStateImpl) ReplicateWorkflowExecutionTimedoutEvent( firstEventID int64, event *historypb.HistoryEvent, ) error { if err := e.UpdateWorkflowStateStatus( enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, enumspb.WORKFLOW_EXECUTION_STATUS_TIMED_OUT, ); err != nil { return err } e.executionInfo.CompletionEventBatchId = firstEventID // Used when completion event needs to be loaded from database e.ClearStickyness() e.writeEventToCache(event) return nil } func (e *MutableStateImpl) AddWorkflowExecutionCancelRequestedEvent( request *historyservice.RequestCancelWorkflowExecutionRequest, ) (*historypb.HistoryEvent, error) { opTag := tag.WorkflowActionWorkflowCancelRequested if err := e.checkMutability(opTag); err != nil { return nil, err } if e.executionInfo.CancelRequested { e.logWarn(mutableStateInvalidHistoryActionMsg, opTag, tag.WorkflowEventID(e.GetNextEventID()), tag.ErrorTypeInvalidHistoryAction, tag.WorkflowState(e.executionState.State), tag.Bool(e.executionInfo.CancelRequested), tag.Key(e.executionInfo.CancelRequestId), ) return nil, e.createInternalServerError(opTag) } event := e.hBuilder.AddWorkflowExecutionCancelRequestedEvent(request) if err := e.ReplicateWorkflowExecutionCancelRequestedEvent(event); err != nil { return nil, err } // Set the CancelRequestID on the active cluster. This information is not part of the history event. e.executionInfo.CancelRequestId = request.CancelRequest.GetRequestId() return event, nil } func (e *MutableStateImpl) ReplicateWorkflowExecutionCancelRequestedEvent( _ *historypb.HistoryEvent, ) error { e.executionInfo.CancelRequested = true return nil } func (e *MutableStateImpl) AddWorkflowExecutionCanceledEvent( workflowTaskCompletedEventID int64, command *commandpb.CancelWorkflowExecutionCommandAttributes, ) (*historypb.HistoryEvent, error) { opTag := tag.WorkflowActionWorkflowCanceled if err := e.checkMutability(opTag); err != nil { return nil, err } event := e.hBuilder.AddWorkflowExecutionCanceledEvent(workflowTaskCompletedEventID, command) if err := e.ReplicateWorkflowExecutionCanceledEvent(workflowTaskCompletedEventID, event); err != nil { return nil, err } // TODO merge active & passive task generation if err := e.taskGenerator.GenerateWorkflowCloseTasks( timestamp.TimeValue(event.GetEventTime()), ); err != nil { return nil, err } return event, nil } func (e *MutableStateImpl) ReplicateWorkflowExecutionCanceledEvent( firstEventID int64, event *historypb.HistoryEvent, ) error { if err := e.UpdateWorkflowStateStatus( enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, enumspb.WORKFLOW_EXECUTION_STATUS_CANCELED, ); err != nil { return err } e.executionInfo.CompletionEventBatchId = firstEventID // Used when completion event needs to be loaded from database e.ClearStickyness() e.writeEventToCache(event) return nil } func (e *MutableStateImpl) AddRequestCancelExternalWorkflowExecutionInitiatedEvent( workflowTaskCompletedEventID int64, cancelRequestID string, command *commandpb.RequestCancelExternalWorkflowExecutionCommandAttributes, ) (*historypb.HistoryEvent, *persistencespb.RequestCancelInfo, error) { opTag := tag.WorkflowActionExternalWorkflowCancelInitiated if err := e.checkMutability(opTag); err != nil { return nil, nil, err } event := e.hBuilder.AddRequestCancelExternalWorkflowExecutionInitiatedEvent(workflowTaskCompletedEventID, command) rci, err := e.ReplicateRequestCancelExternalWorkflowExecutionInitiatedEvent(workflowTaskCompletedEventID, event, cancelRequestID) if err != nil { return nil, nil, err } // TODO merge active & passive task generation if err := e.taskGenerator.GenerateRequestCancelExternalTasks( timestamp.TimeValue(event.GetEventTime()), event, ); err != nil { return nil, nil, err } return event, rci, nil } func (e *MutableStateImpl) ReplicateRequestCancelExternalWorkflowExecutionInitiatedEvent( firstEventID int64, event *historypb.HistoryEvent, cancelRequestID string, ) (*persistencespb.RequestCancelInfo, error) { // TODO: Evaluate if we need cancelRequestID also part of history event initiatedEventID := event.GetEventId() rci := &persistencespb.RequestCancelInfo{ Version: event.GetVersion(), InitiatedEventBatchId: firstEventID, InitiatedId: initiatedEventID, CancelRequestId: cancelRequestID, } e.pendingRequestCancelInfoIDs[rci.InitiatedId] = rci e.updateRequestCancelInfos[rci.InitiatedId] = rci return rci, nil } func (e *MutableStateImpl) AddExternalWorkflowExecutionCancelRequested( initiatedID int64, namespace string, workflowID string, runID string, ) (*historypb.HistoryEvent, error) { opTag := tag.WorkflowActionExternalWorkflowCancelRequested if err := e.checkMutability(opTag); err != nil { return nil, err } _, ok := e.GetRequestCancelInfo(initiatedID) if !ok { e.logWarn(mutableStateInvalidHistoryActionMsg, opTag, tag.WorkflowEventID(e.GetNextEventID()), tag.ErrorTypeInvalidHistoryAction, tag.WorkflowInitiatedID(initiatedID)) return nil, e.createInternalServerError(opTag) } event := e.hBuilder.AddExternalWorkflowExecutionCancelRequested( initiatedID, namespace, workflowID, runID, ) if err := e.ReplicateExternalWorkflowExecutionCancelRequested(event); err != nil { return nil, err } return event, nil } func (e *MutableStateImpl) ReplicateExternalWorkflowExecutionCancelRequested( event *historypb.HistoryEvent, ) error { initiatedID := event.GetExternalWorkflowExecutionCancelRequestedEventAttributes().GetInitiatedEventId() return e.DeletePendingRequestCancel(initiatedID) } func (e *MutableStateImpl) AddRequestCancelExternalWorkflowExecutionFailedEvent( initiatedID int64, namespace string, workflowID string, runID string, cause enumspb.CancelExternalWorkflowExecutionFailedCause, ) (*historypb.HistoryEvent, error) { opTag := tag.WorkflowActionExternalWorkflowCancelFailed if err := e.checkMutability(opTag); err != nil { return nil, err } _, ok := e.GetRequestCancelInfo(initiatedID) if !ok { e.logWarn(mutableStateInvalidHistoryActionMsg, opTag, tag.WorkflowEventID(e.GetNextEventID()), tag.ErrorTypeInvalidHistoryAction, tag.WorkflowInitiatedID(initiatedID)) return nil, e.createInternalServerError(opTag) } event := e.hBuilder.AddRequestCancelExternalWorkflowExecutionFailedEvent( common.EmptyEventID, // TODO this field is not used at all initiatedID, namespace, workflowID, runID, cause, ) if err := e.ReplicateRequestCancelExternalWorkflowExecutionFailedEvent(event); err != nil { return nil, err } return event, nil } func (e *MutableStateImpl) ReplicateRequestCancelExternalWorkflowExecutionFailedEvent( event *historypb.HistoryEvent, ) error { initiatedID := event.GetRequestCancelExternalWorkflowExecutionFailedEventAttributes().GetInitiatedEventId() return e.DeletePendingRequestCancel(initiatedID) } func (e *MutableStateImpl) AddSignalExternalWorkflowExecutionInitiatedEvent( workflowTaskCompletedEventID int64, signalRequestID string, command *commandpb.SignalExternalWorkflowExecutionCommandAttributes, ) (*historypb.HistoryEvent, *persistencespb.SignalInfo, error) { opTag := tag.WorkflowActionExternalWorkflowSignalInitiated if err := e.checkMutability(opTag); err != nil { return nil, nil, err } event := e.hBuilder.AddSignalExternalWorkflowExecutionInitiatedEvent(workflowTaskCompletedEventID, command) si, err := e.ReplicateSignalExternalWorkflowExecutionInitiatedEvent(workflowTaskCompletedEventID, event, signalRequestID) if err != nil { return nil, nil, err } // TODO merge active & passive task generation if err := e.taskGenerator.GenerateSignalExternalTasks( timestamp.TimeValue(event.GetEventTime()), event, ); err != nil { return nil, nil, err } return event, si, nil } func (e *MutableStateImpl) ReplicateSignalExternalWorkflowExecutionInitiatedEvent( firstEventID int64, event *historypb.HistoryEvent, signalRequestID string, ) (*persistencespb.SignalInfo, error) { // TODO: Consider also writing signalRequestID to history event initiatedEventID := event.GetEventId() attributes := event.GetSignalExternalWorkflowExecutionInitiatedEventAttributes() si := &persistencespb.SignalInfo{ Version: event.GetVersion(), InitiatedEventBatchId: firstEventID, InitiatedId: initiatedEventID, RequestId: signalRequestID, Name: attributes.GetSignalName(), Input: attributes.Input, Control: attributes.Control, } e.pendingSignalInfoIDs[si.InitiatedId] = si e.updateSignalInfos[si.InitiatedId] = si return si, nil } func (e *MutableStateImpl) AddUpsertWorkflowSearchAttributesEvent( workflowTaskCompletedEventID int64, command *commandpb.UpsertWorkflowSearchAttributesCommandAttributes, ) (*historypb.HistoryEvent, error) { opTag := tag.WorkflowActionUpsertWorkflowSearchAttributes if err := e.checkMutability(opTag); err != nil { return nil, err } event := e.hBuilder.AddUpsertWorkflowSearchAttributesEvent(workflowTaskCompletedEventID, command) e.ReplicateUpsertWorkflowSearchAttributesEvent(event) // TODO merge active & passive task generation if err := e.taskGenerator.GenerateWorkflowSearchAttrTasks( timestamp.TimeValue(event.GetEventTime()), ); err != nil { return nil, err } return event, nil } func (e *MutableStateImpl) ReplicateUpsertWorkflowSearchAttributesEvent( event *historypb.HistoryEvent, ) { upsertSearchAttr := event.GetUpsertWorkflowSearchAttributesEventAttributes().GetSearchAttributes().GetIndexedFields() currentSearchAttr := e.GetExecutionInfo().SearchAttributes e.executionInfo.SearchAttributes = mergeMapOfPayload(currentSearchAttr, upsertSearchAttr) } func mergeMapOfPayload( current map[string]*commonpb.Payload, upsert map[string]*commonpb.Payload, ) map[string]*commonpb.Payload { if current == nil { current = make(map[string]*commonpb.Payload) } for k, v := range upsert { current[k] = v } return current } func (e *MutableStateImpl) AddExternalWorkflowExecutionSignaled( initiatedID int64, namespace string, workflowID string, runID string, control string, // TODO this field is probably deprecated ) (*historypb.HistoryEvent, error) { opTag := tag.WorkflowActionExternalWorkflowSignalRequested if err := e.checkMutability(opTag); err != nil { return nil, err } _, ok := e.GetSignalInfo(initiatedID) if !ok { e.logWarn(mutableStateInvalidHistoryActionMsg, opTag, tag.WorkflowEventID(e.GetNextEventID()), tag.ErrorTypeInvalidHistoryAction, tag.WorkflowInitiatedID(initiatedID)) return nil, e.createInternalServerError(opTag) } event := e.hBuilder.AddExternalWorkflowExecutionSignaled( initiatedID, namespace, workflowID, runID, control, // TODO this field is probably deprecated ) if err := e.ReplicateExternalWorkflowExecutionSignaled(event); err != nil { return nil, err } return event, nil } func (e *MutableStateImpl) ReplicateExternalWorkflowExecutionSignaled( event *historypb.HistoryEvent, ) error { initiatedID := event.GetExternalWorkflowExecutionSignaledEventAttributes().GetInitiatedEventId() return e.DeletePendingSignal(initiatedID) } func (e *MutableStateImpl) AddSignalExternalWorkflowExecutionFailedEvent( initiatedID int64, namespace string, workflowID string, runID string, control string, // TODO this field is probably deprecated cause enumspb.SignalExternalWorkflowExecutionFailedCause, ) (*historypb.HistoryEvent, error) { opTag := tag.WorkflowActionExternalWorkflowSignalFailed if err := e.checkMutability(opTag); err != nil { return nil, err } _, ok := e.GetSignalInfo(initiatedID) if !ok { e.logWarn(mutableStateInvalidHistoryActionMsg, opTag, tag.WorkflowEventID(e.GetNextEventID()), tag.ErrorTypeInvalidHistoryAction, tag.WorkflowInitiatedID(initiatedID)) return nil, e.createInternalServerError(opTag) } event := e.hBuilder.AddSignalExternalWorkflowExecutionFailedEvent( common.EmptyEventID, // TODO this field is not used at all initiatedID, namespace, workflowID, runID, control, // TODO this field is probably deprecated cause, ) if err := e.ReplicateSignalExternalWorkflowExecutionFailedEvent(event); err != nil { return nil, err } return event, nil } func (e *MutableStateImpl) ReplicateSignalExternalWorkflowExecutionFailedEvent( event *historypb.HistoryEvent, ) error { initiatedID := event.GetSignalExternalWorkflowExecutionFailedEventAttributes().GetInitiatedEventId() return e.DeletePendingSignal(initiatedID) } func (e *MutableStateImpl) AddTimerStartedEvent( workflowTaskCompletedEventID int64, command *commandpb.StartTimerCommandAttributes, ) (*historypb.HistoryEvent, *persistencespb.TimerInfo, error) { opTag := tag.WorkflowActionTimerStarted if err := e.checkMutability(opTag); err != nil { return nil, nil, err } timerID := command.GetTimerId() _, ok := e.GetUserTimerInfo(timerID) if ok { e.logWarn(mutableStateInvalidHistoryActionMsg, opTag, tag.WorkflowEventID(e.GetNextEventID()), tag.ErrorTypeInvalidHistoryAction, tag.WorkflowTimerID(timerID)) return nil, nil, e.createCallerError(opTag) } event := e.hBuilder.AddTimerStartedEvent(workflowTaskCompletedEventID, command) ti, err := e.ReplicateTimerStartedEvent(event) if err != nil { return nil, nil, err } return event, ti, err } func (e *MutableStateImpl) ReplicateTimerStartedEvent( event *historypb.HistoryEvent, ) (*persistencespb.TimerInfo, error) { attributes := event.GetTimerStartedEventAttributes() timerID := attributes.GetTimerId() startToFireTimeout := timestamp.DurationValue(attributes.GetStartToFireTimeout()) // TODO: Time skew need to be taken in to account. expiryTime := timestamp.TimeValue(event.GetEventTime()).Add(startToFireTimeout) // should use the event time, not now ti := &persistencespb.TimerInfo{ Version: event.GetVersion(), TimerId: timerID, ExpiryTime: &expiryTime, StartedId: event.GetEventId(), TaskStatus: TimerTaskStatusNone, } e.pendingTimerInfoIDs[ti.TimerId] = ti e.pendingTimerEventIDToID[ti.StartedId] = ti.TimerId e.updateTimerInfos[ti.TimerId] = ti return ti, nil } func (e *MutableStateImpl) AddTimerFiredEvent( timerID string, ) (*historypb.HistoryEvent, error) { opTag := tag.WorkflowActionTimerFired if err := e.checkMutability(opTag); err != nil { return nil, err } timerInfo, ok := e.GetUserTimerInfo(timerID) if !ok { e.logWarn(mutableStateInvalidHistoryActionMsg, opTag, tag.WorkflowEventID(e.GetNextEventID()), tag.ErrorTypeInvalidHistoryAction, tag.WorkflowTimerID(timerID)) return nil, e.createInternalServerError(opTag) } // Timer is running. event := e.hBuilder.AddTimerFiredEvent(timerInfo.GetStartedId(), timerInfo.TimerId) if err := e.ReplicateTimerFiredEvent(event); err != nil { return nil, err } return event, nil } func (e *MutableStateImpl) ReplicateTimerFiredEvent( event *historypb.HistoryEvent, ) error { attributes := event.GetTimerFiredEventAttributes() timerID := attributes.GetTimerId() return e.DeleteUserTimer(timerID) } func (e *MutableStateImpl) AddTimerCanceledEvent( workflowTaskCompletedEventID int64, command *commandpb.CancelTimerCommandAttributes, identity string, ) (*historypb.HistoryEvent, error) { opTag := tag.WorkflowActionTimerCanceled if err := e.checkMutability(opTag); err != nil { return nil, err } var timerStartedID int64 timerID := command.GetTimerId() ti, ok := e.GetUserTimerInfo(timerID) if !ok { // if timer is not running then check if it has fired in the mutable state. // If so clear the timer from the mutable state. We need to check both the // bufferedEvents and the history builder timerFiredEvent := e.hBuilder.GetAndRemoveTimerFireEvent(timerID) if timerFiredEvent == nil { e.logWarn(mutableStateInvalidHistoryActionMsg, opTag, tag.WorkflowEventID(e.GetNextEventID()), tag.ErrorTypeInvalidHistoryAction, tag.WorkflowTimerID(timerID)) return nil, e.createCallerError(opTag) } timerStartedID = timerFiredEvent.GetTimerFiredEventAttributes().GetStartedEventId() } else { timerStartedID = ti.GetStartedId() } // Timer is running. event := e.hBuilder.AddTimerCanceledEvent( workflowTaskCompletedEventID, timerStartedID, timerID, identity, ) if ok { if err := e.ReplicateTimerCanceledEvent(event); err != nil { return nil, err } } return event, nil } func (e *MutableStateImpl) ReplicateTimerCanceledEvent( event *historypb.HistoryEvent, ) error { attributes := event.GetTimerCanceledEventAttributes() timerID := attributes.GetTimerId() return e.DeleteUserTimer(timerID) } func (e *MutableStateImpl) AddRecordMarkerEvent( workflowTaskCompletedEventID int64, command *commandpb.RecordMarkerCommandAttributes, ) (*historypb.HistoryEvent, error) { opTag := tag.WorkflowActionWorkflowRecordMarker if err := e.checkMutability(opTag); err != nil { return nil, err } return e.hBuilder.AddMarkerRecordedEvent(workflowTaskCompletedEventID, command), nil } func (e *MutableStateImpl) AddWorkflowExecutionTerminatedEvent( firstEventID int64, reason string, details *commonpb.Payloads, identity string, ) (*historypb.HistoryEvent, error) { opTag := tag.WorkflowActionWorkflowTerminated if err := e.checkMutability(opTag); err != nil { return nil, err } event := e.hBuilder.AddWorkflowExecutionTerminatedEvent(reason, details, identity) if err := e.ReplicateWorkflowExecutionTerminatedEvent(firstEventID, event); err != nil { return nil, err } // TODO merge active & passive task generation if err := e.taskGenerator.GenerateWorkflowCloseTasks( timestamp.TimeValue(event.GetEventTime()), ); err != nil { return nil, err } return event, nil } func (e *MutableStateImpl) ReplicateWorkflowExecutionTerminatedEvent( firstEventID int64, event *historypb.HistoryEvent, ) error { if err := e.UpdateWorkflowStateStatus( enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, enumspb.WORKFLOW_EXECUTION_STATUS_TERMINATED, ); err != nil { return err } e.executionInfo.CompletionEventBatchId = firstEventID // Used when completion event needs to be loaded from database e.ClearStickyness() e.writeEventToCache(event) return nil } func (e *MutableStateImpl) AddWorkflowExecutionSignaled( signalName string, input *commonpb.Payloads, identity string, ) (*historypb.HistoryEvent, error) { opTag := tag.WorkflowActionWorkflowSignaled if err := e.checkMutability(opTag); err != nil { return nil, err } event := e.hBuilder.AddWorkflowExecutionSignaledEvent(signalName, input, identity) if err := e.ReplicateWorkflowExecutionSignaled(event); err != nil { return nil, err } return event, nil } func (e *MutableStateImpl) ReplicateWorkflowExecutionSignaled( _ *historypb.HistoryEvent, ) error { // Increment signal count in mutable state for this workflow execution e.executionInfo.SignalCount++ return nil } func (e *MutableStateImpl) AddContinueAsNewEvent( firstEventID int64, workflowTaskCompletedEventID int64, parentNamespace string, command *commandpb.ContinueAsNewWorkflowExecutionCommandAttributes, ) (*historypb.HistoryEvent, MutableState, error) { opTag := tag.WorkflowActionWorkflowContinueAsNew if err := e.checkMutability(opTag); err != nil { return nil, nil, err } var err error newRunID := uuid.New() newExecution := commonpb.WorkflowExecution{ WorkflowId: e.executionInfo.WorkflowId, RunId: newRunID, } // Extract ParentExecutionInfo from current run so it can be passed down to the next var parentInfo *workflowspb.ParentExecutionInfo if e.HasParentExecution() { parentInfo = &workflowspb.ParentExecutionInfo{ NamespaceId: e.executionInfo.ParentNamespaceId, Namespace: parentNamespace, Execution: &commonpb.WorkflowExecution{ WorkflowId: e.executionInfo.ParentWorkflowId, RunId: e.executionInfo.ParentRunId, }, InitiatedId: e.executionInfo.InitiatedId, } } continueAsNewEvent := e.hBuilder.AddContinuedAsNewEvent( workflowTaskCompletedEventID, newRunID, command, ) firstRunID := e.executionInfo.FirstExecutionRunId // This is needed for backwards compatibility. Workflow execution create with Temporal release v0.28.0 or earlier // does not have FirstExecutionRunID stored as part of mutable state. If this is not set then load it from // workflow execution started event. if len(firstRunID) == 0 { currentStartEvent, err := e.GetStartEvent() if err != nil { return nil, nil, err } firstRunID = currentStartEvent.GetWorkflowExecutionStartedEventAttributes().GetFirstExecutionRunId() } namespaceID := e.namespaceEntry.GetInfo().Id var newStateBuilder *MutableStateImpl newStateBuilder = NewMutableState( e.shard, e.shard.GetEventsCache(), e.logger, e.namespaceEntry, timestamp.TimeValue(continueAsNewEvent.GetEventTime()), ) if _, err = newStateBuilder.addWorkflowExecutionStartedEventForContinueAsNew( parentInfo, newExecution, e, command, firstRunID, ); err != nil { return nil, nil, serviceerror.NewInternal("Failed to add workflow execution started event.") } if err = e.ReplicateWorkflowExecutionContinuedAsNewEvent( firstEventID, namespaceID, continueAsNewEvent, ); err != nil { return nil, nil, err } // TODO merge active & passive task generation if err := e.taskGenerator.GenerateWorkflowCloseTasks( timestamp.TimeValue(continueAsNewEvent.GetEventTime()), ); err != nil { return nil, nil, err } return continueAsNewEvent, newStateBuilder, nil } func rolloverAutoResetPointsWithExpiringTime( resetPoints *workflowpb.ResetPoints, prevRunID string, now time.Time, namespaceRetention time.Duration, ) *workflowpb.ResetPoints { if resetPoints == nil || resetPoints.Points == nil { return resetPoints } newPoints := make([]*workflowpb.ResetPointInfo, 0, len(resetPoints.Points)) expireTime := now.Add(namespaceRetention) for _, rp := range resetPoints.Points { if rp.GetRunId() == prevRunID { rp.ExpireTime = &expireTime } newPoints = append(newPoints, rp) } return &workflowpb.ResetPoints{ Points: newPoints, } } func (e *MutableStateImpl) ReplicateWorkflowExecutionContinuedAsNewEvent( firstEventID int64, _ string, continueAsNewEvent *historypb.HistoryEvent, ) error { if err := e.UpdateWorkflowStateStatus( enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED, enumspb.WORKFLOW_EXECUTION_STATUS_CONTINUED_AS_NEW, ); err != nil { return err } e.executionInfo.CompletionEventBatchId = firstEventID // Used when completion event needs to be loaded from database e.ClearStickyness() e.writeEventToCache(continueAsNewEvent) return nil } func (e *MutableStateImpl) AddStartChildWorkflowExecutionInitiatedEvent( workflowTaskCompletedEventID int64, createRequestID string, command *commandpb.StartChildWorkflowExecutionCommandAttributes, ) (*historypb.HistoryEvent, *persistencespb.ChildExecutionInfo, error) { opTag := tag.WorkflowActionChildWorkflowInitiated if err := e.checkMutability(opTag); err != nil { return nil, nil, err } event := e.hBuilder.AddStartChildWorkflowExecutionInitiatedEvent(workflowTaskCompletedEventID, command) // Write the event to cache only on active cluster e.eventsCache.PutEvent(e.executionInfo.NamespaceId, e.executionInfo.WorkflowId, e.executionState.RunId, event.GetEventId(), event) ci, err := e.ReplicateStartChildWorkflowExecutionInitiatedEvent(workflowTaskCompletedEventID, event, createRequestID) if err != nil { return nil, nil, err } // TODO merge active & passive task generation if err := e.taskGenerator.GenerateChildWorkflowTasks( timestamp.TimeValue(event.GetEventTime()), event, ); err != nil { return nil, nil, err } return event, ci, nil } func (e *MutableStateImpl) ReplicateStartChildWorkflowExecutionInitiatedEvent( firstEventID int64, event *historypb.HistoryEvent, createRequestID string, ) (*persistencespb.ChildExecutionInfo, error) { initiatedEventID := event.GetEventId() attributes := event.GetStartChildWorkflowExecutionInitiatedEventAttributes() ci := &persistencespb.ChildExecutionInfo{ Version: event.GetVersion(), InitiatedId: initiatedEventID, InitiatedEventBatchId: firstEventID, StartedId: common.EmptyEventID, StartedWorkflowId: attributes.GetWorkflowId(), CreateRequestId: createRequestID, Namespace: attributes.GetNamespace(), WorkflowTypeName: attributes.GetWorkflowType().GetName(), ParentClosePolicy: attributes.GetParentClosePolicy(), } e.pendingChildExecutionInfoIDs[ci.InitiatedId] = ci e.updateChildExecutionInfos[ci.InitiatedId] = ci return ci, nil } func (e *MutableStateImpl) AddChildWorkflowExecutionStartedEvent( namespace string, execution *commonpb.WorkflowExecution, workflowType *commonpb.WorkflowType, initiatedID int64, header *commonpb.Header, ) (*historypb.HistoryEvent, error) { opTag := tag.WorkflowActionChildWorkflowStarted if err := e.checkMutability(opTag); err != nil { return nil, err } ci, ok := e.GetChildExecutionInfo(initiatedID) if !ok || ci.StartedId != common.EmptyEventID { e.logWarn(mutableStateInvalidHistoryActionMsg, opTag, tag.WorkflowEventID(e.GetNextEventID()), tag.ErrorTypeInvalidHistoryAction, tag.Bool(ok), tag.WorkflowInitiatedID(initiatedID)) return nil, e.createInternalServerError(opTag) } event := e.hBuilder.AddChildWorkflowExecutionStartedEvent( initiatedID, namespace, execution, workflowType, header, ) if err := e.ReplicateChildWorkflowExecutionStartedEvent(event); err != nil { return nil, err } return event, nil } func (e *MutableStateImpl) ReplicateChildWorkflowExecutionStartedEvent( event *historypb.HistoryEvent, ) error { attributes := event.GetChildWorkflowExecutionStartedEventAttributes() initiatedID := attributes.GetInitiatedEventId() ci, _ := e.GetChildExecutionInfo(initiatedID) ci.StartedId = event.GetEventId() ci.StartedRunId = attributes.GetWorkflowExecution().GetRunId() e.updateChildExecutionInfos[ci.InitiatedId] = ci return nil } func (e *MutableStateImpl) AddStartChildWorkflowExecutionFailedEvent( initiatedID int64, cause enumspb.StartChildWorkflowExecutionFailedCause, initiatedEventAttributes *historypb.StartChildWorkflowExecutionInitiatedEventAttributes, ) (*historypb.HistoryEvent, error) { opTag := tag.WorkflowActionChildWorkflowInitiationFailed if err := e.checkMutability(opTag); err != nil { return nil, err } ci, ok := e.GetChildExecutionInfo(initiatedID) if !ok || ci.StartedId != common.EmptyEventID { e.logWarn(mutableStateInvalidHistoryActionMsg, opTag, tag.WorkflowEventID(e.GetNextEventID()), tag.ErrorTypeInvalidHistoryAction, tag.Bool(ok), tag.WorkflowInitiatedID(initiatedID)) return nil, e.createInternalServerError(opTag) } event := e.hBuilder.AddStartChildWorkflowExecutionFailedEvent( common.EmptyEventID, // TODO this field is not used at all initiatedID, cause, initiatedEventAttributes.Namespace, initiatedEventAttributes.WorkflowId, initiatedEventAttributes.WorkflowType, initiatedEventAttributes.Control, // TODO this field is probably deprecated ) if err := e.ReplicateStartChildWorkflowExecutionFailedEvent(event); err != nil { return nil, err } return event, nil } func (e *MutableStateImpl) ReplicateStartChildWorkflowExecutionFailedEvent( event *historypb.HistoryEvent, ) error { attributes := event.GetStartChildWorkflowExecutionFailedEventAttributes() initiatedID := attributes.GetInitiatedEventId() return e.DeletePendingChildExecution(initiatedID) } func (e *MutableStateImpl) AddChildWorkflowExecutionCompletedEvent( initiatedID int64, childExecution *commonpb.WorkflowExecution, attributes *historypb.WorkflowExecutionCompletedEventAttributes, ) (*historypb.HistoryEvent, error) { opTag := tag.WorkflowActionChildWorkflowCompleted if err := e.checkMutability(opTag); err != nil { return nil, err } ci, ok := e.GetChildExecutionInfo(initiatedID) if !ok || ci.StartedId == common.EmptyEventID { e.logWarn(mutableStateInvalidHistoryActionMsg, opTag, tag.WorkflowEventID(e.GetNextEventID()), tag.ErrorTypeInvalidHistoryAction, tag.Bool(ok), tag.WorkflowInitiatedID(initiatedID)) return nil, e.createInternalServerError(opTag) } workflowType := &commonpb.WorkflowType{ Name: ci.WorkflowTypeName, } event := e.hBuilder.AddChildWorkflowExecutionCompletedEvent( ci.InitiatedId, ci.StartedId, ci.Namespace, childExecution, workflowType, attributes.Result, ) if err := e.ReplicateChildWorkflowExecutionCompletedEvent(event); err != nil { return nil, err } return event, nil } func (e *MutableStateImpl) ReplicateChildWorkflowExecutionCompletedEvent( event *historypb.HistoryEvent, ) error { attributes := event.GetChildWorkflowExecutionCompletedEventAttributes() initiatedID := attributes.GetInitiatedEventId() return e.DeletePendingChildExecution(initiatedID) } func (e *MutableStateImpl) AddChildWorkflowExecutionFailedEvent( initiatedID int64, childExecution *commonpb.WorkflowExecution, attributes *historypb.WorkflowExecutionFailedEventAttributes, ) (*historypb.HistoryEvent, error) { opTag := tag.WorkflowActionChildWorkflowFailed if err := e.checkMutability(opTag); err != nil { return nil, err } ci, ok := e.GetChildExecutionInfo(initiatedID) if !ok || ci.StartedId == common.EmptyEventID { e.logWarn(mutableStateInvalidHistoryActionMsg, tag.WorkflowEventID(e.GetNextEventID()), tag.ErrorTypeInvalidHistoryAction, tag.Bool(!ok), tag.WorkflowInitiatedID(initiatedID)) return nil, e.createInternalServerError(opTag) } workflowType := &commonpb.WorkflowType{ Name: ci.WorkflowTypeName, } event := e.hBuilder.AddChildWorkflowExecutionFailedEvent( ci.InitiatedId, ci.StartedId, ci.Namespace, childExecution, workflowType, attributes.Failure, attributes.RetryState, ) if err := e.ReplicateChildWorkflowExecutionFailedEvent(event); err != nil { return nil, err } return event, nil } func (e *MutableStateImpl) ReplicateChildWorkflowExecutionFailedEvent( event *historypb.HistoryEvent, ) error { attributes := event.GetChildWorkflowExecutionFailedEventAttributes() initiatedID := attributes.GetInitiatedEventId() return e.DeletePendingChildExecution(initiatedID) } func (e *MutableStateImpl) AddChildWorkflowExecutionCanceledEvent( initiatedID int64, childExecution *commonpb.WorkflowExecution, attributes *historypb.WorkflowExecutionCanceledEventAttributes, ) (*historypb.HistoryEvent, error) { opTag := tag.WorkflowActionChildWorkflowCanceled if err := e.checkMutability(opTag); err != nil { return nil, err } ci, ok := e.GetChildExecutionInfo(initiatedID) if !ok || ci.StartedId == common.EmptyEventID { e.logWarn(mutableStateInvalidHistoryActionMsg, opTag, tag.WorkflowEventID(e.GetNextEventID()), tag.ErrorTypeInvalidHistoryAction, tag.Bool(ok), tag.WorkflowInitiatedID(initiatedID)) return nil, e.createInternalServerError(opTag) } workflowType := &commonpb.WorkflowType{ Name: ci.WorkflowTypeName, } event := e.hBuilder.AddChildWorkflowExecutionCanceledEvent( ci.InitiatedId, ci.StartedId, ci.Namespace, childExecution, workflowType, attributes.Details, ) if err := e.ReplicateChildWorkflowExecutionCanceledEvent(event); err != nil { return nil, err } return event, nil } func (e *MutableStateImpl) ReplicateChildWorkflowExecutionCanceledEvent( event *historypb.HistoryEvent, ) error { attributes := event.GetChildWorkflowExecutionCanceledEventAttributes() initiatedID := attributes.GetInitiatedEventId() return e.DeletePendingChildExecution(initiatedID) } func (e *MutableStateImpl) AddChildWorkflowExecutionTerminatedEvent( initiatedID int64, childExecution *commonpb.WorkflowExecution, _ *historypb.WorkflowExecutionTerminatedEventAttributes, // TODO this field is not used at all ) (*historypb.HistoryEvent, error) { opTag := tag.WorkflowActionChildWorkflowTerminated if err := e.checkMutability(opTag); err != nil { return nil, err } ci, ok := e.GetChildExecutionInfo(initiatedID) if !ok || ci.StartedId == common.EmptyEventID { e.logWarn(mutableStateInvalidHistoryActionMsg, opTag, tag.WorkflowEventID(e.GetNextEventID()), tag.ErrorTypeInvalidHistoryAction, tag.Bool(ok), tag.WorkflowInitiatedID(initiatedID)) return nil, e.createInternalServerError(opTag) } workflowType := &commonpb.WorkflowType{ Name: ci.WorkflowTypeName, } event := e.hBuilder.AddChildWorkflowExecutionTerminatedEvent( ci.InitiatedId, ci.StartedId, ci.Namespace, childExecution, workflowType, ) if err := e.ReplicateChildWorkflowExecutionTerminatedEvent(event); err != nil { return nil, err } return event, nil } func (e *MutableStateImpl) ReplicateChildWorkflowExecutionTerminatedEvent( event *historypb.HistoryEvent, ) error { attributes := event.GetChildWorkflowExecutionTerminatedEventAttributes() initiatedID := attributes.GetInitiatedEventId() return e.DeletePendingChildExecution(initiatedID) } func (e *MutableStateImpl) AddChildWorkflowExecutionTimedOutEvent( initiatedID int64, childExecution *commonpb.WorkflowExecution, attributes *historypb.WorkflowExecutionTimedOutEventAttributes, ) (*historypb.HistoryEvent, error) { opTag := tag.WorkflowActionChildWorkflowTimedOut if err := e.checkMutability(opTag); err != nil { return nil, err } ci, ok := e.GetChildExecutionInfo(initiatedID) if !ok || ci.StartedId == common.EmptyEventID { e.logWarn(mutableStateInvalidHistoryActionMsg, opTag, tag.WorkflowEventID(e.GetNextEventID()), tag.ErrorTypeInvalidHistoryAction, tag.Bool(ok), tag.WorkflowInitiatedID(initiatedID)) return nil, e.createInternalServerError(opTag) } workflowType := &commonpb.WorkflowType{ Name: ci.WorkflowTypeName, } event := e.hBuilder.AddChildWorkflowExecutionTimedOutEvent( ci.InitiatedId, ci.StartedId, ci.Namespace, childExecution, workflowType, attributes.RetryState, ) if err := e.ReplicateChildWorkflowExecutionTimedOutEvent(event); err != nil { return nil, err } return event, nil } func (e *MutableStateImpl) ReplicateChildWorkflowExecutionTimedOutEvent( event *historypb.HistoryEvent, ) error { attributes := event.GetChildWorkflowExecutionTimedOutEventAttributes() initiatedID := attributes.GetInitiatedEventId() return e.DeletePendingChildExecution(initiatedID) } func (e *MutableStateImpl) RetryActivity( ai *persistencespb.ActivityInfo, failure *failurepb.Failure, ) (enumspb.RetryState, error) { opTag := tag.WorkflowActionActivityTaskRetry if err := e.checkMutability(opTag); err != nil { return enumspb.RETRY_STATE_INTERNAL_SERVER_ERROR, err } if !ai.HasRetryPolicy { return enumspb.RETRY_STATE_RETRY_POLICY_NOT_SET, nil } if ai.CancelRequested { return enumspb.RETRY_STATE_CANCEL_REQUESTED, nil } now := e.timeSource.Now() backoffInterval, retryState := getBackoffInterval( now, ai.Attempt, ai.RetryMaximumAttempts, ai.RetryInitialInterval, ai.RetryMaximumInterval, ai.RetryExpirationTime, ai.RetryBackoffCoefficient, failure, ai.RetryNonRetryableErrorTypes, ) if retryState != enumspb.RETRY_STATE_IN_PROGRESS { return retryState, nil } // a retry is needed, update activity info for next retry ai.Version = e.GetCurrentVersion() ai.Attempt++ ai.ScheduledTime = timestamp.TimePtr(now.Add(backoffInterval)) // update to next schedule time ai.StartedId = common.EmptyEventID ai.RequestId = "" ai.StartedTime = timestamp.TimePtr(time.Time{}) ai.TimerTaskStatus = TimerTaskStatusNone ai.RetryLastWorkerIdentity = ai.StartedIdentity ai.RetryLastFailure = failure if err := e.taskGenerator.GenerateActivityRetryTasks( ai.ScheduleId, ); err != nil { return enumspb.RETRY_STATE_INTERNAL_SERVER_ERROR, err } e.updateActivityInfos[ai.ScheduleId] = ai e.syncActivityTasks[ai.ScheduleId] = struct{}{} return enumspb.RETRY_STATE_IN_PROGRESS, nil } // TODO mutable state should generate corresponding transfer / timer tasks according to // updates accumulated, while currently all transfer / timer tasks are managed manually // TODO convert AddTransferTasks to prepareTransferTasks // AddTransferTasks append transfer tasks func (e *MutableStateImpl) AddTransferTasks( transferTasks ...persistence.Task, ) { e.InsertTransferTasks = append(e.InsertTransferTasks, transferTasks...) } // TODO convert AddTransferTasks to prepareTimerTasks // AddTimerTasks append timer tasks func (e *MutableStateImpl) AddTimerTasks( timerTasks ...persistence.Task, ) { e.InsertTimerTasks = append(e.InsertTimerTasks, timerTasks...) } // AddVisibilityTasks append visibility tasks func (e *MutableStateImpl) AddVisibilityTasks( visibilityTasks ...persistence.Task, ) { e.InsertVisibilityTasks = append(e.InsertVisibilityTasks, visibilityTasks...) } func (e *MutableStateImpl) SetUpdateCondition( nextEventIDInDB int64, dbRecordVersion int64, ) { e.nextEventIDInDB = nextEventIDInDB e.dbRecordVersion = dbRecordVersion } func (e *MutableStateImpl) GetUpdateCondition() (int64, int64) { return e.nextEventIDInDB, e.dbRecordVersion } func (e *MutableStateImpl) GetWorkflowStateStatus() (enumsspb.WorkflowExecutionState, enumspb.WorkflowExecutionStatus) { return e.executionState.State, e.executionState.Status } func (e *MutableStateImpl) UpdateWorkflowStateStatus( state enumsspb.WorkflowExecutionState, status enumspb.WorkflowExecutionStatus, ) error { return setStateStatus(e.executionState, state, status) } func (e *MutableStateImpl) StartTransaction( namespaceEntry *cache.NamespaceCacheEntry, ) (bool, error) { e.namespaceEntry = namespaceEntry if err := e.UpdateCurrentVersion(namespaceEntry.GetFailoverVersion(), false); err != nil { return false, err } flushBeforeReady, err := e.startTransactionHandleWorkflowTaskFailover(false) if err != nil { return false, err } e.startTransactionHandleWorkflowTaskTTL() return flushBeforeReady, nil } func (e *MutableStateImpl) StartTransactionSkipWorkflowTaskFail( namespaceEntry *cache.NamespaceCacheEntry, ) error { e.namespaceEntry = namespaceEntry if err := e.UpdateCurrentVersion(namespaceEntry.GetFailoverVersion(), false); err != nil { return err } _, err := e.startTransactionHandleWorkflowTaskFailover(true) return err } func (e *MutableStateImpl) CloseTransactionAsMutation( now time.Time, transactionPolicy TransactionPolicy, ) (*persistence.WorkflowMutation, []*persistence.WorkflowEvents, error) { if err := e.prepareCloseTransaction( now, transactionPolicy, ); err != nil { return nil, nil, err } workflowEventsSeq, bufferEvents, clearBuffer, err := e.prepareEventsAndReplicationTasks(transactionPolicy) if err != nil { return nil, nil, err } if len(workflowEventsSeq) > 0 { lastEvents := workflowEventsSeq[len(workflowEventsSeq)-1].Events lastEvent := lastEvents[len(lastEvents)-1] if err := e.updateWithLastWriteEvent( lastEvent, transactionPolicy, ); err != nil { return nil, nil, err } } setTaskInfo(e.GetCurrentVersion(), now, e.InsertTransferTasks, e.InsertTimerTasks, e.InsertVisibilityTasks) // update last update time e.executionInfo.LastUpdateTime = &now e.executionInfo.StateTransitionCount += 1 // we generate checksum here based on the assumption that the returned // snapshot object is considered immutable. As of this writing, the only // code that modifies the returned object lives inside Context.resetWorkflowExecution // currently, the updates done inside Context.resetWorkflowExecution doesn't // impact the checksum calculation checksum := e.generateChecksum() if e.dbRecordVersion == 0 { // noop, existing behavior } else { e.dbRecordVersion += 1 } workflowMutation := &persistence.WorkflowMutation{ ExecutionInfo: e.executionInfo, ExecutionState: e.executionState, NextEventID: e.hBuilder.NextEventID(), UpsertActivityInfos: e.updateActivityInfos, DeleteActivityInfos: e.deleteActivityInfos, UpsertTimerInfos: e.updateTimerInfos, DeleteTimerInfos: e.deleteTimerInfos, UpsertChildExecutionInfos: e.updateChildExecutionInfos, DeleteChildExecutionInfos: e.deleteChildExecutionInfos, UpsertRequestCancelInfos: e.updateRequestCancelInfos, DeleteRequestCancelInfos: e.deleteRequestCancelInfos, UpsertSignalInfos: e.updateSignalInfos, DeleteSignalInfos: e.deleteSignalInfos, UpsertSignalRequestedIDs: e.updateSignalRequestedIDs, DeleteSignalRequestedIDs: e.deleteSignalRequestedIDs, NewBufferedEvents: bufferEvents, ClearBufferedEvents: clearBuffer, TransferTasks: e.InsertTransferTasks, ReplicationTasks: e.InsertReplicationTasks, TimerTasks: e.InsertTimerTasks, VisibilityTasks: e.InsertVisibilityTasks, Condition: e.nextEventIDInDB, DBRecordVersion: e.dbRecordVersion, Checksum: checksum, } e.checksum = checksum if err := e.cleanupTransaction(transactionPolicy); err != nil { return nil, nil, err } return workflowMutation, workflowEventsSeq, nil } func (e *MutableStateImpl) CloseTransactionAsSnapshot( now time.Time, transactionPolicy TransactionPolicy, ) (*persistence.WorkflowSnapshot, []*persistence.WorkflowEvents, error) { if err := e.prepareCloseTransaction( now, transactionPolicy, ); err != nil { return nil, nil, err } workflowEventsSeq, bufferEvents, _, err := e.prepareEventsAndReplicationTasks(transactionPolicy) if err != nil { return nil, nil, err } if len(bufferEvents) > 0 { // TODO do we need the functionality to generate snapshot with buffered events? return nil, nil, serviceerror.NewInternal("cannot generate workflow snapshot with buffered events") } if len(workflowEventsSeq) > 0 { lastEvents := workflowEventsSeq[len(workflowEventsSeq)-1].Events lastEvent := lastEvents[len(lastEvents)-1] if err := e.updateWithLastWriteEvent( lastEvent, transactionPolicy, ); err != nil { return nil, nil, err } } setTaskInfo(e.GetCurrentVersion(), now, e.InsertTransferTasks, e.InsertTimerTasks, e.InsertVisibilityTasks) // update last update time e.executionInfo.LastUpdateTime = &now e.executionInfo.StateTransitionCount += 1 // we generate checksum here based on the assumption that the returned // snapshot object is considered immutable. As of this writing, the only // code that modifies the returned object lives inside Context.resetWorkflowExecution // currently, the updates done inside Context.resetWorkflowExecution doesn't // impact the checksum calculation checksum := e.generateChecksum() if e.dbRecordVersion == 0 { // noop, existing behavior } else { e.dbRecordVersion += 1 } workflowSnapshot := &persistence.WorkflowSnapshot{ ExecutionInfo: e.executionInfo, ExecutionState: e.executionState, NextEventID: e.hBuilder.NextEventID(), ActivityInfos: e.pendingActivityInfoIDs, TimerInfos: e.pendingTimerInfoIDs, ChildExecutionInfos: e.pendingChildExecutionInfoIDs, RequestCancelInfos: e.pendingRequestCancelInfoIDs, SignalInfos: e.pendingSignalInfoIDs, SignalRequestedIDs: e.pendingSignalRequestedIDs, TransferTasks: e.InsertTransferTasks, ReplicationTasks: e.InsertReplicationTasks, TimerTasks: e.InsertTimerTasks, VisibilityTasks: e.InsertVisibilityTasks, Condition: e.nextEventIDInDB, DBRecordVersion: e.dbRecordVersion, Checksum: checksum, } e.checksum = checksum if err := e.cleanupTransaction(transactionPolicy); err != nil { return nil, nil, err } return workflowSnapshot, workflowEventsSeq, nil } func (e *MutableStateImpl) IsResourceDuplicated( resourceDedupKey definition.DeduplicationID, ) bool { id := definition.GenerateDeduplicationKey(resourceDedupKey) _, duplicated := e.appliedEvents[id] return duplicated } func (e *MutableStateImpl) UpdateDuplicatedResource( resourceDedupKey definition.DeduplicationID, ) { id := definition.GenerateDeduplicationKey(resourceDedupKey) e.appliedEvents[id] = struct{}{} } func (e *MutableStateImpl) prepareCloseTransaction( now time.Time, transactionPolicy TransactionPolicy, ) error { if err := e.closeTransactionWithPolicyCheck( transactionPolicy, ); err != nil { return err } if err := e.closeTransactionHandleBufferedEventsLimit( transactionPolicy, ); err != nil { return err } if err := e.closeTransactionHandleWorkflowReset( now, transactionPolicy, ); err != nil { return err } // TODO merge active & passive task generation // NOTE: this function must be the last call // since we only generate at most one activity & user timer, // regardless of how many activity & user timer created // so the calculation must be at the very end return e.closeTransactionHandleActivityUserTimerTasks( now, transactionPolicy, ) } func (e *MutableStateImpl) cleanupTransaction( _ TransactionPolicy, ) error { e.updateActivityInfos = make(map[int64]*persistencespb.ActivityInfo) e.deleteActivityInfos = make(map[int64]struct{}) e.syncActivityTasks = make(map[int64]struct{}) e.updateTimerInfos = make(map[string]*persistencespb.TimerInfo) e.deleteTimerInfos = make(map[string]struct{}) e.updateChildExecutionInfos = make(map[int64]*persistencespb.ChildExecutionInfo) e.deleteChildExecutionInfos = make(map[int64]struct{}) e.updateRequestCancelInfos = make(map[int64]*persistencespb.RequestCancelInfo) e.deleteRequestCancelInfos = make(map[int64]struct{}) e.updateSignalInfos = make(map[int64]*persistencespb.SignalInfo) e.deleteSignalInfos = make(map[int64]struct{}) e.updateSignalRequestedIDs = make(map[string]struct{}) e.deleteSignalRequestedIDs = make(map[string]struct{}) e.stateInDB = e.executionState.State e.nextEventIDInDB = e.GetNextEventID() // e.dbRecordVersion remains the same e.hBuilder = NewMutableHistoryBuilder( e.timeSource, e.shard.GenerateTransferTaskIDs, e.GetCurrentVersion(), e.nextEventIDInDB, e.bufferEventsInDB, ) e.InsertTransferTasks = nil e.InsertReplicationTasks = nil e.InsertTimerTasks = nil e.InsertVisibilityTasks = nil return nil } func (e *MutableStateImpl) prepareEventsAndReplicationTasks( transactionPolicy TransactionPolicy, ) ([]*persistence.WorkflowEvents, []*historypb.HistoryEvent, bool, error) { currentBranchToken, err := e.GetCurrentBranchToken() if err != nil { return nil, nil, false, err } historyMutation, err := e.hBuilder.Finish(!e.HasInFlightWorkflowTask()) if err != nil { return nil, nil, false, err } // TODO @wxing1292 need more refactoring to make the logic clean e.bufferEventsInDB = historyMutation.MemBufferBatch newBufferBatch := historyMutation.DBBufferBatch clearBuffer := historyMutation.DBClearBuffer newEventsBatches := historyMutation.DBEventsBatches e.updatePendingEventIDs(historyMutation.ScheduleIDToStartID) workflowEventsSeq := make([]*persistence.WorkflowEvents, len(newEventsBatches)) historyNodeTxnIDs, err := e.shard.GenerateTransferTaskIDs(len(newEventsBatches)) if err != nil { return nil, nil, false, err } for index, eventBatch := range newEventsBatches { workflowEventsSeq[index] = &persistence.WorkflowEvents{ NamespaceID: e.executionInfo.NamespaceId, WorkflowID: e.executionInfo.WorkflowId, RunID: e.executionState.RunId, BranchToken: currentBranchToken, PrevTxnID: e.executionInfo.LastFirstEventTxnId, TxnID: historyNodeTxnIDs[index], Events: eventBatch, } e.GetExecutionInfo().LastEventTaskId = eventBatch[len(eventBatch)-1].GetTaskId() e.executionInfo.LastFirstEventId = eventBatch[0].GetEventId() e.executionInfo.LastFirstEventTxnId = historyNodeTxnIDs[index] } if err := e.validateNoEventsAfterWorkflowFinish( transactionPolicy, workflowEventsSeq, ); err != nil { return nil, nil, false, err } for _, workflowEvents := range workflowEventsSeq { replicationTasks, err := e.eventsToReplicationTask(transactionPolicy, workflowEvents.Events) if err != nil { return nil, nil, false, err } e.InsertReplicationTasks = append( e.InsertReplicationTasks, replicationTasks..., ) } e.InsertReplicationTasks = append( e.InsertReplicationTasks, e.syncActivityToReplicationTask(transactionPolicy)..., ) if transactionPolicy == TransactionPolicyPassive && len(e.InsertReplicationTasks) > 0 { return nil, nil, false, serviceerror.NewInternal("should not generate replication task when close transaction as passive") } return workflowEventsSeq, newBufferBatch, clearBuffer, nil } func (e *MutableStateImpl) eventsToReplicationTask( transactionPolicy TransactionPolicy, events []*historypb.HistoryEvent, ) ([]persistence.Task, error) { if transactionPolicy == TransactionPolicyPassive || !e.canReplicateEvents() || len(events) == 0 { return emptyTasks, nil } firstEvent := events[0] lastEvent := events[len(events)-1] version := firstEvent.GetVersion() sourceCluster := e.clusterMetadata.ClusterNameForFailoverVersion(version) currentCluster := e.clusterMetadata.GetCurrentClusterName() if currentCluster != sourceCluster { return nil, serviceerror.NewInternal("MutableStateImpl encounter contradicting version & transaction policy") } currentBranchToken, err := e.GetCurrentBranchToken() if err != nil { return nil, err } replicationTask := &persistence.HistoryReplicationTask{ FirstEventID: firstEvent.GetEventId(), NextEventID: lastEvent.GetEventId() + 1, Version: firstEvent.GetVersion(), BranchToken: currentBranchToken, NewRunBranchToken: nil, } if e.executionInfo.GetVersionHistories() == nil { return nil, serviceerror.NewInternal("should not generate replication task when missing replication state & version history") } return []persistence.Task{replicationTask}, nil } func (e *MutableStateImpl) syncActivityToReplicationTask( transactionPolicy TransactionPolicy, ) []persistence.Task { if transactionPolicy == TransactionPolicyPassive || !e.canReplicateEvents() { return emptyTasks } return convertSyncActivityInfos( e.pendingActivityInfoIDs, e.syncActivityTasks, ) } func (e *MutableStateImpl) updatePendingEventIDs( scheduleIDToStartID map[int64]int64, ) { Loop: for scheduleID, startID := range scheduleIDToStartID { if activityInfo, ok := e.GetActivityInfo(scheduleID); ok { activityInfo.StartedId = startID e.updateActivityInfos[activityInfo.ScheduleId] = activityInfo continue Loop } if childInfo, ok := e.GetChildExecutionInfo(scheduleID); ok { childInfo.StartedId = startID e.updateChildExecutionInfos[childInfo.InitiatedId] = childInfo continue Loop } } } func (e *MutableStateImpl) updateWithLastWriteEvent( lastEvent *historypb.HistoryEvent, transactionPolicy TransactionPolicy, ) error { if transactionPolicy == TransactionPolicyPassive { // already handled in state builder return nil } if e.executionInfo.VersionHistories != nil { currentVersionHistory, err := versionhistory.GetCurrentVersionHistory(e.executionInfo.VersionHistories) if err != nil { return err } if err := versionhistory.AddOrUpdateVersionHistoryItem(currentVersionHistory, versionhistory.NewVersionHistoryItem( lastEvent.GetEventId(), lastEvent.GetVersion(), )); err != nil { return err } } return nil } func (e *MutableStateImpl) canReplicateEvents() bool { return e.namespaceEntry.GetReplicationPolicy() == cache.ReplicationPolicyMultiCluster } // validateNoEventsAfterWorkflowFinish perform check on history event batch // NOTE: do not apply this check on every batch, since transient // workflow task && workflow finish will be broken (the first batch) func (e *MutableStateImpl) validateNoEventsAfterWorkflowFinish( transactionPolicy TransactionPolicy, workflowEventSeq []*persistence.WorkflowEvents, ) error { if transactionPolicy == TransactionPolicyPassive || len(workflowEventSeq) == 0 { return nil } // only do check if workflow is finished if e.executionState.State != enumsspb.WORKFLOW_EXECUTION_STATE_COMPLETED { return nil } // workflow close // this will perform check on the last event of last batch // NOTE: do not apply this check on every batch, since transient // workflow task && workflow finish will be broken (the first batch) eventBatch := workflowEventSeq[len(workflowEventSeq)-1].Events lastEvent := eventBatch[len(eventBatch)-1] switch lastEvent.GetEventType() { case enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_COMPLETED, enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_FAILED, enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_TIMED_OUT, enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_TERMINATED, enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_CONTINUED_AS_NEW, enumspb.EVENT_TYPE_WORKFLOW_EXECUTION_CANCELED: return nil default: e.logError( "encounter case where events appears after workflow finish.", tag.WorkflowNamespaceID(e.executionInfo.NamespaceId), tag.WorkflowID(e.executionInfo.WorkflowId), tag.WorkflowRunID(e.executionState.RunId), ) return consts.ErrEventsAterWorkflowFinish } } func (e *MutableStateImpl) startTransactionHandleWorkflowTaskTTL() { if e.executionInfo.StickyTaskQueue == "" { return } ttl := e.config.StickyTTL(e.GetNamespaceEntry().GetInfo().Name) expired := e.timeSource.Now().After(timestamp.TimeValue(e.executionInfo.LastUpdateTime).Add(ttl)) if expired && !e.HasPendingWorkflowTask() { e.ClearStickyness() } } func (e *MutableStateImpl) startTransactionHandleWorkflowTaskFailover( skipWorkflowTaskFailed bool, ) (bool, error) { if !e.IsWorkflowExecutionRunning() || !e.canReplicateEvents() { return false, nil } // NOTE: // the main idea here is to guarantee that once there is a workflow task started // all events ending in the buffer should have the same version // Handling mutable state turn from standby to active, while having a workflow task on the fly workflowTask, ok := e.GetInFlightWorkflowTask() if !ok || workflowTask.Version >= e.GetCurrentVersion() { // no pending workflow tasks, no buffered events // or workflow task has higher / equal version return false, nil } currentVersion := e.GetCurrentVersion() lastWriteVersion, err := e.GetLastWriteVersion() if err != nil { return false, err } if lastWriteVersion != workflowTask.Version { return false, serviceerror.NewInternal(fmt.Sprintf("MutableStateImpl encounter mismatch version, workflow task: %v, last write version %v", workflowTask.Version, lastWriteVersion)) } lastWriteSourceCluster := e.clusterMetadata.ClusterNameForFailoverVersion(lastWriteVersion) currentVersionCluster := e.clusterMetadata.ClusterNameForFailoverVersion(currentVersion) currentCluster := e.clusterMetadata.GetCurrentClusterName() // there are 4 cases for version changes (based on version from namespace cache) // NOTE: namespace cache version change may occur after seeing events with higher version // meaning that the flush buffer logic in NDC branch manager should be kept. // // 1. active -> passive => fail workflow task & flush buffer using last write version // 2. active -> active => fail workflow task & flush buffer using last write version // 3. passive -> active => fail workflow task using current version, no buffered events // 4. passive -> passive => no buffered events, since always passive, nothing to be done // handle case 4 if lastWriteSourceCluster != currentCluster && currentVersionCluster != currentCluster { // do a sanity check on buffered events if e.HasBufferedEvents() { return false, serviceerror.NewInternal("MutableStateImpl encounter previous passive workflow with buffered events") } return false, nil } // handle case 1 & 2 var flushBufferVersion = lastWriteVersion // handle case 3 if lastWriteSourceCluster != currentCluster && currentVersionCluster == currentCluster { // do a sanity check on buffered events if e.HasBufferedEvents() { return false, serviceerror.NewInternal("MutableStateImpl encounter previous passive workflow with buffered events") } flushBufferVersion = currentVersion } // this workflow was previous active (whether it has buffered events or not), // the in flight workflow task must be failed to guarantee all events within same // event batch shard the same version if err := e.UpdateCurrentVersion(flushBufferVersion, true); err != nil { return false, err } if skipWorkflowTaskFailed { return false, nil } // we have a workflow task with buffered events on the fly with a lower version, fail it if err := failWorkflowTask( e, workflowTask, enumspb.WORKFLOW_TASK_FAILED_CAUSE_FAILOVER_CLOSE_COMMAND, ); err != nil { return false, err } err = ScheduleWorkflowTask(e) if err != nil { return false, err } return true, nil } func (e *MutableStateImpl) closeTransactionWithPolicyCheck( transactionPolicy TransactionPolicy, ) error { if transactionPolicy == TransactionPolicyPassive || !e.canReplicateEvents() { return nil } activeCluster := e.clusterMetadata.ClusterNameForFailoverVersion(e.GetCurrentVersion()) currentCluster := e.clusterMetadata.GetCurrentClusterName() if activeCluster != currentCluster { namespaceID := e.GetExecutionInfo().NamespaceId return serviceerror.NewNamespaceNotActive(namespaceID, currentCluster, activeCluster) } return nil } func (e *MutableStateImpl) closeTransactionHandleBufferedEventsLimit( transactionPolicy TransactionPolicy, ) error { if transactionPolicy == TransactionPolicyPassive || !e.IsWorkflowExecutionRunning() { return nil } if e.hBuilder.BufferEventSize() < e.config.MaximumBufferedEventsBatch() { return nil } // Handling buffered events size issue if workflowTask, ok := e.GetInFlightWorkflowTask(); ok { // we have a workflow task on the fly with a lower version, fail it if err := failWorkflowTask( e, workflowTask, enumspb.WORKFLOW_TASK_FAILED_CAUSE_FORCE_CLOSE_COMMAND, ); err != nil { return err } err := ScheduleWorkflowTask(e) if err != nil { return err } } return nil } func (e *MutableStateImpl) closeTransactionHandleWorkflowReset( now time.Time, transactionPolicy TransactionPolicy, ) error { if transactionPolicy == TransactionPolicyPassive || !e.IsWorkflowExecutionRunning() { return nil } // compare with bad client binary checksum and schedule a reset task // only schedule reset task if current doesn't have childWFs. // TODO: This will be removed once our reset allows childWFs if len(e.GetPendingChildExecutionInfos()) != 0 { return nil } namespaceEntry, err := e.shard.GetNamespaceCache().GetNamespaceByID(e.executionInfo.NamespaceId) if err != nil { return err } if _, pt := FindAutoResetPoint( e.timeSource, namespaceEntry.GetConfig().BadBinaries, e.GetExecutionInfo().AutoResetPoints, ); pt != nil { if err := e.taskGenerator.GenerateWorkflowResetTasks( e.unixNanoToTime(now.UnixNano()), ); err != nil { return err } e.logInfo("Auto-Reset task is scheduled", tag.WorkflowNamespace(namespaceEntry.GetInfo().Name), tag.WorkflowID(e.executionInfo.WorkflowId), tag.WorkflowRunID(e.executionState.RunId), tag.WorkflowResetBaseRunID(pt.GetRunId()), tag.WorkflowEventID(pt.GetFirstWorkflowTaskCompletedId()), tag.WorkflowBinaryChecksum(pt.GetBinaryChecksum()), ) } return nil } func (e *MutableStateImpl) closeTransactionHandleActivityUserTimerTasks( now time.Time, transactionPolicy TransactionPolicy, ) error { if transactionPolicy == TransactionPolicyPassive || !e.IsWorkflowExecutionRunning() { return nil } if err := e.taskGenerator.GenerateActivityTimerTasks( e.unixNanoToTime(now.UnixNano()), ); err != nil { return err } return e.taskGenerator.GenerateUserTimerTasks( e.unixNanoToTime(now.UnixNano()), ) } func (e *MutableStateImpl) checkMutability( actionTag tag.ZapTag, ) error { if !e.IsWorkflowExecutionRunning() { e.logWarn( mutableStateInvalidHistoryActionMsg, tag.WorkflowEventID(e.GetNextEventID()), tag.ErrorTypeInvalidHistoryAction, tag.WorkflowState(e.executionState.State), actionTag, ) return ErrWorkflowFinished } return nil } func (e *MutableStateImpl) generateChecksum() *persistencespb.Checksum { if !e.shouldGenerateChecksum() { return nil } csum, err := generateMutableStateChecksum(e) if err != nil { e.logWarn("error generating MutableState checksum", tag.Error(err)) return nil } return csum } func (e *MutableStateImpl) shouldGenerateChecksum() bool { if e.namespaceEntry == nil { return false } return rand.Intn(100) < e.config.MutableStateChecksumGenProbability(e.namespaceEntry.GetInfo().Name) } func (e *MutableStateImpl) shouldVerifyChecksum() bool { if e.namespaceEntry == nil { return false } return rand.Intn(100) < e.config.MutableStateChecksumVerifyProbability(e.namespaceEntry.GetInfo().Name) } func (e *MutableStateImpl) shouldInvalidateCheckum() bool { invalidateBeforeEpochSecs := int64(e.config.MutableStateChecksumInvalidateBefore()) if invalidateBeforeEpochSecs > 0 { invalidateBefore := time.Unix(invalidateBeforeEpochSecs, 0).UTC() return e.executionInfo.LastUpdateTime.Before(invalidateBefore) } return false } func (e *MutableStateImpl) createInternalServerError( actionTag tag.ZapTag, ) error { return serviceerror.NewInternal(actionTag.Field().String + " operation failed") } func (e *MutableStateImpl) createCallerError( actionTag tag.ZapTag, ) error { return serviceerror.NewInvalidArgument(fmt.Sprintf(mutableStateInvalidHistoryActionMsgTemplate, actionTag.Field().String)) } func (_ *MutableStateImpl) unixNanoToTime( timestampNanos int64, ) time.Time { return time.Unix(0, timestampNanos).UTC() } func (e *MutableStateImpl) logInfo(msg string, tags ...tag.Tag) { tags = append(tags, tag.WorkflowID(e.executionInfo.WorkflowId)) tags = append(tags, tag.WorkflowRunID(e.executionState.RunId)) tags = append(tags, tag.WorkflowNamespaceID(e.executionInfo.NamespaceId)) e.logger.Info(msg, tags...) } func (e *MutableStateImpl) logWarn(msg string, tags ...tag.Tag) { tags = append(tags, tag.WorkflowID(e.executionInfo.WorkflowId)) tags = append(tags, tag.WorkflowRunID(e.executionState.RunId)) tags = append(tags, tag.WorkflowNamespaceID(e.executionInfo.NamespaceId)) e.logger.Warn(msg, tags...) } func (e *MutableStateImpl) logError(msg string, tags ...tag.Tag) { tags = append(tags, tag.WorkflowID(e.executionInfo.WorkflowId)) tags = append(tags, tag.WorkflowRunID(e.executionState.RunId)) tags = append(tags, tag.WorkflowNamespaceID(e.executionInfo.NamespaceId)) e.logger.Error(msg, tags...) } func (e *MutableStateImpl) logDataInconsistency() { namespaceID := e.executionInfo.NamespaceId workflowID := e.executionInfo.WorkflowId runID := e.executionState.RunId e.logger.Error("encounter cassandra data inconsistency", tag.WorkflowNamespaceID(namespaceID), tag.WorkflowID(workflowID), tag.WorkflowRunID(runID), ) }
1
12,434
@wxing1292 to double check this is the right version to use?
temporalio-temporal
go
@@ -70,4 +70,12 @@ class TIntRange extends TInt { return $this->min_bound !== null && $this->min_bound > 0; } + + public function contains(int $i): bool + { + return + ($this->min_bound === null && $this->max_bound === null) || + ($this->min_bound === null && $this->max_bound >= $i) || + ($this->max_bound === null && $this->min_bound <= $i); + } }
1
<?php namespace Psalm\Type\Atomic; /** * Denotes an interval of integers between two bounds */ class TIntRange extends TInt { const BOUND_MIN = 'min'; const BOUND_MAX = 'max'; /** * @var int|null */ public $min_bound; /** * @var int|null */ public $max_bound; public function __construct(?int $min_bound, ?int $max_bound) { $this->min_bound = $min_bound; $this->max_bound = $max_bound; } public function __toString(): string { return $this->getKey(); } public function getKey(bool $include_extra = true): string { return 'int<' . ($this->min_bound ?? 'min') . ', ' . ($this->max_bound ?? 'max') . '>'; } public function canBeFullyExpressedInPhp(int $php_major_version, int $php_minor_version): bool { return false; } /** * @param array<lowercase-string, string> $aliased_classes */ public function toPhpString( ?string $namespace, array $aliased_classes, ?string $this_class, int $php_major_version, int $php_minor_version ): ?string { return $php_major_version >= 7 ? 'int' : null; } /** * @param array<lowercase-string, string> $aliased_classes */ public function toNamespacedString( ?string $namespace, array $aliased_classes, ?string $this_class, bool $use_phpdoc_format ): string { return $use_phpdoc_format ? 'int' : 'int<' . ($this->min_bound ?? 'min') . ', ' . ($this->max_bound ?? 'max') . '>'; } public function isPositive(): bool { return $this->min_bound !== null && $this->min_bound > 0; } }
1
10,917
Shouldn't there be another case for when both min and max are specified?
vimeo-psalm
php
@@ -1,5 +1,5 @@ ## This file is part of Invenio. -## Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 CERN. +## Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as
1
## This file is part of Invenio. ## Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## Youshould have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """Invenio BibRank Administrator Interface.""" __revision__ = "$Id$" import os import ConfigParser from invenio.config import \ CFG_SITE_LANG, \ CFG_SITE_URL import invenio.modules.access.engine as acce from invenio.base.i18n import language_list_long from invenio.legacy.dbquery import run_sql, wash_table_column_name from invenio.modules.ranker.registry import configuration def getnavtrail(previous = ''): navtrail = """<a class="navtrail" href="%s/help/admin">Admin Area</a> """ % (CFG_SITE_URL,) navtrail = navtrail + previous return navtrail def check_user(req, role, adminarea=2, authorized=0): (auth_code, auth_message) = is_adminuser(req, role) if not authorized and auth_code != 0: return ("false", auth_message) return ("", auth_message) def is_adminuser(req, role): """check if user is a registered administrator. """ return acce.acc_authorize_action(req, role) def perform_index(ln=CFG_SITE_LANG): """create the bibrank main area menu page.""" header = ['Code', 'Translations', 'Collections', 'Rank method'] rnk_list = get_def_name('', "rnkMETHOD") actions = [] for (rnkID, name) in rnk_list: actions.append([name]) for col in [(('Modify', 'modifytranslations'),), (('Modify', 'modifycollection'),), (('Show Details', 'showrankdetails'), ('Modify', 'modifyrank'), ('Delete', 'deleterank'))]: actions[-1].append('<a href="%s/admin/bibrank/bibrankadmin.py/%s?rnkID=%s&amp;ln=%s">%s</a>' % (CFG_SITE_URL, col[0][1], rnkID, ln, col[0][0])) for (str, function) in col[1:]: actions[-1][-1] += ' / <a href="%s/admin/bibrank/bibrankadmin.py/%s?rnkID=%s&amp;ln=%s">%s</a>' % (CFG_SITE_URL, function, rnkID, ln, str) output = """ <a href="%s/admin/bibrank/bibrankadmin.py/addrankarea?ln=%s">Add new rank method</a><br /><br /> """ % (CFG_SITE_URL, ln) output += tupletotable(header=header, tuple=actions) return addadminbox("""Overview of rank methods&nbsp;&nbsp;&nbsp;<small>[<a title="See guide" href="%s/help/admin/bibrank-admin-guide#mi">?</a>]</small>""" % CFG_SITE_URL, datalist=[output, '']) def perform_modifycollection(rnkID='', ln=CFG_SITE_LANG, func='', colID='', confirm=0): """Modify which collections the rank method is visible to""" output = "" subtitle = "" if rnkID: rnkNAME = get_def_name(rnkID, "rnkMETHOD")[0][1] if func in ["0", 0] and confirm in ["1", 1]: finresult = attach_col_rnk(rnkID, colID) elif func in ["1", 1] and confirm in ["1", 1]: finresult = detach_col_rnk(rnkID, colID) if colID: colNAME = get_def_name(colID, "collection")[0][1] subtitle = """Step 1 - Select collection to enable/disable rank method '%s' for""" % rnkNAME output = """ <dl> <dt>The rank method is currently enabled for these collections:</dt> <dd> """ col_list = get_rnk_col(rnkID, ln) if not col_list: output += """No collections""" else: for (id, name) in col_list: output += """%s, """ % name output += """</dd> </dl> """ col_list = get_def_name('', "collection") col_rnk = dict(get_rnk_col(rnkID)) col_list = filter(lambda x: x[0] not in col_rnk, col_list) if col_list: text = """ <span class="adminlabel">Enable for:</span> <select name="colID" class="admin_w200"> <option value="">- select collection -</option> """ for (id, name) in col_list: text += """<option value="%s" %s>%s</option>""" % (id, (func in ["0", 0] and confirm in ["0", 0] and colID and int(colID) == int(id)) and 'selected="selected"' or '' , name) text += """</select>""" output += createhiddenform(action="modifycollection", text=text, button="Enable", rnkID=rnkID, ln=ln, func=0, confirm=1) if confirm in ["0", 0] and func in ["0", 0] and colID: subtitle = "Step 2 - Confirm to enable rank method for the chosen collection" text = "<b><p>Please confirm to enable rank method '%s' for the collection '%s'</p></b>" % (rnkNAME, colNAME) output += createhiddenform(action="modifycollection", text=text, button="Confirm", rnkID=rnkID, ln=ln, colID=colID, func=0, confirm=1) elif confirm in ["1", 1] and func in ["0", 0] and colID: subtitle = "Step 3 - Result" output += write_outcome(finresult) elif confirm not in ["0", 0] and func in ["0", 0]: output += """<b><span class="info">Please select a collection.</span></b>""" col_list = get_rnk_col(rnkID, ln) if col_list: text = """ <span class="adminlabel">Disable for:</span> <select name="colID" class="admin_w200"> <option value="">- select collection -</option> """ for (id, name) in col_list: text += """<option value="%s" %s>%s</option>""" % (id, (func in ["1", 1] and confirm in ["0", 0] and colID and int(colID) == int(id)) and 'selected="selected"' or '' , name) text += """</select>""" output += createhiddenform(action="modifycollection", text=text, button="Disable", rnkID=rnkID, ln=ln, func=1, confirm=1) if confirm in ["1", 1] and func in ["1", 1] and colID: subtitle = "Step 3 - Result" output += write_outcome(finresult) elif confirm not in ["0", 0] and func in ["1", 1]: output += """<b><span class="info">Please select a collection.</span></b>""" body = [output] return addadminbox(subtitle + """&nbsp;&nbsp;&nbsp;<small>[<a title="See guide" href="%s/help/admin/bibrank-admin-guide#mc">?</a>]</small>""" % CFG_SITE_URL, body) def perform_modifytranslations(rnkID, ln, sel_type, trans, confirm, callback='yes'): """Modify the translations of a rank method""" output = '' subtitle = '' langs = get_languages() langs.sort() if confirm in ["2", 2] and rnkID: finresult = modify_translations(rnkID, langs, sel_type, trans, "rnkMETHOD") rnk_name = get_def_name(rnkID, "rnkMETHOD")[0][1] rnk_dict = dict(get_i8n_name('', ln, get_rnk_nametypes()[0][0], "rnkMETHOD")) if rnkID and int(rnkID) in rnk_dict: rnkID = int(rnkID) subtitle = """<a name="3">3. Modify translations for rank method '%s'</a>""" % rnk_name if type(trans) is str: trans = [trans] if sel_type == '': sel_type = get_rnk_nametypes()[0][0] header = ['Language', 'Translation'] actions = [] text = """ <span class="adminlabel">Name type</span> <select name="sel_type" class="admin_w200"> """ types = get_rnk_nametypes() if len(types) > 1: for (key, value) in types: text += """<option value="%s" %s>%s""" % (key, key == sel_type and 'selected="selected"' or '', value) trans_names = get_name(rnkID, ln, key, "rnkMETHOD") if trans_names and trans_names[0][0]: text += ": %s" % trans_names[0][0] text += "</option>" text += """</select>""" output += createhiddenform(action="modifytranslations", text=text, button="Select", rnkID=rnkID, ln=ln, confirm=0) if confirm in [-1, "-1", 0, "0"]: trans = [] for key, value in langs: try: trans_names = get_name(rnkID, key, sel_type, "rnkMETHOD") trans.append(trans_names[0][0]) except StandardError as e: trans.append('') for nr in range(0,len(langs)): actions.append(["%s" % (langs[nr][1],)]) actions[-1].append('<input type="text" name="trans" size="30" value="%s"/>' % trans[nr]) text = tupletotable(header=header, tuple=actions) output += createhiddenform(action="modifytranslations", text=text, button="Modify", rnkID=rnkID, sel_type=sel_type, ln=ln, confirm=2) if sel_type and len(trans) and confirm in ["2", 2]: output += write_outcome(finresult) body = [output] return addadminbox(subtitle + """&nbsp;&nbsp;&nbsp;<small>[<a title="See guide" href="%s/help/admin/bibrank-admin-guide#mt">?</a>]</small>""" % CFG_SITE_URL, body) def perform_addrankarea(rnkcode='', ln=CFG_SITE_LANG, template='', confirm=-1): """form to add a new rank method with these values:""" subtitle = 'Step 1 - Create new rank method' output = """ <dl> <dt>BibRank code:</dt> <dd>A unique code that identifies a rank method, is used when running the bibrank daemon and used to name the configuration file for the method. <br />The template files includes the necessary parameters for the chosen rank method, and only needs to be edited with the correct tags and paths. <br />For more information, please go to the <a title="See guide" href="%s/help/admin/bibrank-admin-guide">BibRank guide</a> and read the section about adding a rank method</dd> </dl> """ % CFG_SITE_URL text = """ <span class="adminlabel">BibRank code</span> <input class="admin_wvar" type="text" name="rnkcode" value="%s" /> """ % (rnkcode) text += """<br /> <span class="adminlabel">Cfg template</span> <select name="template" class="admin_w200"> <option value="">No template</option> """ templates = get_templates() for templ in templates: text += """<option value="%s" %s>%s</option>""" % (templ, template == templ and 'selected="selected"' or '', templ[9:len(templ)-4]) text += """</select>""" output += createhiddenform(action="addrankarea", text=text, button="Add rank method", ln=ln, confirm=1) if rnkcode: if confirm in ["0", 0]: subtitle = 'Step 2 - Confirm addition of rank method' text = """<b>Add rank method with BibRank code: '%s'.</b>""" % (rnkcode) if template: text += """<br /><b>Using configuration template: '%s'.</b>""" % (template) else: text += """<br /><b>Create empty configuration file.</b>""" output += createhiddenform(action="addrankarea", text=text, rnkcode=rnkcode, button="Confirm", template=template, confirm=1) elif confirm in ["1", 1]: rnkID = add_rnk(rnkcode) subtitle = "Step 3 - Result" if rnkID[0] == 1: rnkID = rnkID[1] text = """<b><span class="info">Added new rank method with BibRank code '%s'</span></b>""" % rnkcode try: if template: infile = open(configuration.get(template, ''), 'r') indata = infile.readlines() infile.close() else: indata = () file = open(configuration.get(get_rnk_code(rnkID)[0][0] + '.cfg', ''), 'w') for line in indata: file.write(line) file.close() if template: text += """<b><span class="info"><br />Configuration file created using '%s' as template.</span></b>""" % template else: text += """<b><span class="info"><br />Empty configuration file created.</span></b>""" except StandardError as e: text += """<b><span class="info"><br />Sorry, could not create configuration file: '%s.cfg', either because it already exists, or not enough rights to create file. <br />Please create the file in the path given.</span></b>""" % (configuration.get(get_rnk_code(rnkID)[0][0] + '.cfg', ''), ) else: text = """<b><span class="info">Sorry, could not add rank method, rank method with the same BibRank code probably exists.</span></b>""" output += text elif not rnkcode and confirm not in [-1, "-1"]: output += """<b><span class="info">Sorry, could not add rank method, not enough data submitted.</span></b>""" body = [output] return addadminbox(subtitle + """&nbsp;&nbsp;&nbsp;<small>[<a title="See guide" href="%s/help/admin/bibrank-admin-guide#ar">?</a>]</small>""" % CFG_SITE_URL, body) def perform_modifyrank(rnkID, rnkcode='', ln=CFG_SITE_LANG, template='', cfgfile='', confirm=0): """form to modify a rank method rnkID - id of the rank method """ if not rnkID: return "No ranking method selected." if not get_rnk_code(rnkID): return "Ranking method %s does not seem to exist." % str(rnkID) subtitle = 'Step 1 - Please modify the wanted values below' if not rnkcode: oldcode = get_rnk_code(rnkID)[0] else: oldcode = rnkcode output = """ <dl> <dd>When changing the BibRank code of a rank method, you must also change any scheduled tasks using the old value. <br />For more information, please go to the <a title="See guide" href="%s/help/admin/bibrank-admin-guide">BibRank guide</a> and read the section about modifying a rank method's BibRank code.</dd> </dl> """ % CFG_SITE_URL text = """ <span class="adminlabel">BibRank code</span> <input class="admin_wvar" type="text" name="rnkcode" value="%s" /> <br /> """ % (oldcode) try: text += """<span class="adminlabel">Cfg file</span>""" textarea = "" if cfgfile: textarea +=cfgfile else: file = open(configuration.get(get_rnk_code(rnkID)[0][0] + '.cfg', '')) for line in file.readlines(): textarea += line text += """<textarea class="admin_wvar" name="cfgfile" rows="15" cols="70">""" + textarea + """</textarea>""" except StandardError as e: text += """<b><span class="info">Cannot load file, either it does not exist, or not enough rights to read it: '%s.cfg'<br />Please create the file in the path given.</span></b>""" % (configuration.get(get_rnk_code(rnkID)[0][0] + '.cfg', ''), ) output += createhiddenform(action="modifyrank", text=text, rnkID=rnkID, button="Modify", confirm=1) if rnkcode and confirm in ["1", 1] and get_rnk_code(rnkID)[0][0] != rnkcode: oldcode = get_rnk_code(rnkID)[0][0] result = modify_rnk(rnkID, rnkcode) subtitle = "Step 3 - Result" if result: text = """<b><span class="info">Rank method modified.</span></b>""" try: file = open(configuration.get(oldcode + '.cfg', ''), 'r') file2 = open(configuration.get(rnkcode + '.cfg', ''), 'w') lines = file.readlines() for line in lines: file2.write(line) file.close() file2.close() os.remove(configuration.get(oldcode + '.cfg', '')) except StandardError as e: text = """<b><span class="info">Sorry, could not change name of cfg file, must be done manually: '%s.cfg'</span></b>""" % (configuration.get(oldcode + '.cfg', ''), ) else: text = """<b><span class="info">Sorry, could not modify rank method.</span></b>""" output += text if cfgfile and confirm in ["1", 1]: try: file = open(configuration.get(get_rnk_code(rnkID)[0][0] + '.cfg', ''), 'w') file.write(cfgfile) file.close() text = """<b><span class="info"><br />Configuration file modified: '%s/bibrank/%s.cfg'</span></b>""" % (configuration.get(get_rnk_code(rnkID)[0][0] + '.cfg', ''), ) except StandardError as e: text = """<b><span class="info"><br />Sorry, could not modify configuration file, please check for rights to do so: '%s.cfg'<br />Please modify the file manually.</span></b>""" % (configuration.get(get_rnk_code(rnkID)[0][0] + '.cfg', ''), ) output += text finoutput = addadminbox(subtitle + """&nbsp;&nbsp;&nbsp;<small>[<a title="See guide" href="%s/help/admin/bibrank-admin-guide#mr">?</a>]</small>""" % CFG_SITE_URL, [output]) output = "" text = """ <span class="adminlabel">Select</span> <select name="template" class="admin_w200"> <option value="">- select template -</option> """ templates = get_templates() for templ in templates: text += """<option value="%s" %s>%s</option>""" % (templ, template == templ and 'selected="selected"' or '', templ[9:len(templ)-4]) text += """</select><br />""" output += createhiddenform(action="modifyrank", text=text, rnkID=rnkID, button="Show template", confirm=0) try: if template: textarea = "" text = """<span class="adminlabel">Content:</span>""" file = open(configuration.get(template, ''), 'r') lines = file.readlines() for line in lines: textarea += line file.close() text += """<textarea class="admin_wvar" readonly="true" rows="15" cols="70">""" + textarea + """</textarea>""" output += text except StandardError as e: output += """Cannot load file, either it does not exist, or not enough rights to read it: '%s'""" % (configuration.get(template, ''), ) finoutput += addadminbox("View templates", [output]) return finoutput def perform_deleterank(rnkID, ln=CFG_SITE_LANG, confirm=0): """form to delete a rank method """ subtitle ='' output = """ <span class="warning"> <dl> <dt><strong>WARNING:</strong></dt> <dd><strong>When deleting a rank method, you also deletes all data related to the rank method, like translations, which collections it was attached to and the data necessary to rank the searchresults. Any scheduled tasks using the deleted rank method will also stop working. <br /><br />For more information, please go to the <a title="See guide" href="%s/help/admin/bibrank-admin-guide">BibRank guide</a> and read the section regarding deleting a rank method.</strong></dd> </dl> </span> """ % CFG_SITE_URL if rnkID: if confirm in ["0", 0]: rnkNAME = get_def_name(rnkID, "rnkMETHOD")[0][1] subtitle = 'Step 1 - Confirm deletion' text = """Delete rank method '%s'.""" % (rnkNAME) output += createhiddenform(action="deleterank", text=text, button="Confirm", rnkID=rnkID, confirm=1) elif confirm in ["1", 1]: try: rnkNAME = get_def_name(rnkID, "rnkMETHOD")[0][1] rnkcode = get_rnk_code(rnkID)[0][0] table = "" try: config = ConfigParser.ConfigParser() config.readfp(open(configuration.get( rnkcode + ".cfg"), 'r')) table = config.get(config.get('rank_method', "function"), "table") except Exception: pass result = delete_rnk(rnkID, table) subtitle = "Step 2 - Result" if result: text = """<b><span class="info">Rank method deleted</span></b>""" try: os.remove(configuration.get( rnkcode + ".cfg")) text += """<br /><b><span class="info">Configuration file deleted: '%s.cfg'.</span></b>""" % (configuration.get( rnkcode + ".cfg"), ) except StandardError as e: text += """<br /><b><span class="info">Sorry, could not delete configuration file: '%s/bibrank/%s.cfg'.</span><br />Please delete the file manually.</span></b>""" % (configuration.get( rnkcode + ".cfg"), ) else: text = """<b><span class="info">Sorry, could not delete rank method</span></b>""" except StandardError as e: text = """<b><span class="info">Sorry, could not delete rank method, most likely already deleted</span></b>""" output = text body = [output] return addadminbox(subtitle + """&nbsp;&nbsp;&nbsp;<small>[<a title="See guide" href="%s/help/admin/bibrank-admin-guide#dr">?</a>]</small>""" % CFG_SITE_URL, body) def perform_showrankdetails(rnkID, ln=CFG_SITE_LANG): """Returns details about the rank method given by rnkID""" if not rnkID: return "No ranking method selected." if not get_rnk_code(rnkID): return "Ranking method %s does not seem to exist." % str(rnkID) subtitle = """Overview <a href="%s/admin/bibrank/bibrankadmin.py/modifyrank?rnkID=%s&amp;ln=%s">[Modify]</a>""" % (CFG_SITE_URL, rnkID, ln) text = """ BibRank code: %s<br /> Last updated by BibRank: """ % (get_rnk_code(rnkID)[0][0]) if get_rnk(rnkID)[0][2]: text += "%s<br />" % get_rnk(rnkID)[0][2] else: text += "Not yet run.<br />" output = addadminbox(subtitle, [text]) subtitle = """Rank method statistics""" text = "" try: text = "Not yet implemented" except StandardError as e: text = "BibRank not yet run, cannot show statistics for method" output += addadminbox(subtitle, [text]) subtitle = """Attached to collections <a href="%s/admin/bibrank/bibrankadmin.py/modifycollection?rnkID=%s&amp;ln=%s">[Modify]</a>""" % (CFG_SITE_URL, rnkID, ln) text = "" col = get_rnk_col(rnkID, ln) for key, value in col: text+= "%s<br />" % value if not col: text +="No collections" output += addadminbox(subtitle, [text]) subtitle = """Translations <a href="%s/admin/bibrank/bibrankadmin.py/modifytranslations?rnkID=%s&amp;ln=%s">[Modify]</a>""" % (CFG_SITE_URL, rnkID, ln) prev_lang = '' trans = get_translations(rnkID) types = get_rnk_nametypes() types = dict(map(lambda x: (x[0], x[1]), types)) text = "" languages = dict(get_languages()) if trans: for lang, type, name in trans: if lang and lang in languages and type and name: if prev_lang != lang: prev_lang = lang text += """%s: <br />""" % (languages[lang]) if type in types: text+= """<span style="margin-left: 10px">'%s'</span><span class="note">(%s)</span><br />""" % (name, types[type]) else: text = """No translations exists""" output += addadminbox(subtitle, [text]) subtitle = """Configuration file: '%s/bibrank/%s.cfg' <a href="%s/admin/bibrank/bibrankadmin.py/modifyrank?rnkID=%s&amp;ln=%s">[Modify]</a>""" % (CFG_ETCDIR, get_rnk_code(rnkID)[0][0], CFG_SITE_URL, rnkID, ln) text = "" try: file = open(configuration.get(get_rnk_code(rnkID)[0][0] + ".cfg", '')) text += """<pre>""" for line in file.readlines(): text += line text += """</pre>""" except StandardError as e: text = """Cannot load file, either it does not exist, or not enough rights to read it.""" output += addadminbox(subtitle, [text]) return output def compare_on_val(second, first): return cmp(second[1], first[1]) def get_rnk_code(rnkID): """Returns the name from rnkMETHOD based on argument rnkID - id from rnkMETHOD""" try: res = run_sql("SELECT name FROM rnkMETHOD where id=%s" % (rnkID)) return res except StandardError as e: return () def get_rnk(rnkID=''): """Return one or all rank methods rnkID - return the rank method given, or all if not given""" try: if rnkID: res = run_sql("SELECT id,name,DATE_FORMAT(last_updated, '%%Y-%%m-%%d %%H:%%i:%%s') from rnkMETHOD WHERE id=%s" % rnkID) else: res = run_sql("SELECT id,name,DATE_FORMAT(last_updated, '%%Y-%%m-%%d %%H:%%i:%%s') from rnkMETHOD") return res except StandardError as e: return () def get_translations(rnkID): """Returns the translations in rnkMETHODNAME for a rankmethod rnkID - the id of the rankmethod from rnkMETHOD """ try: res = run_sql("SELECT ln, type, value FROM rnkMETHODNAME where id_rnkMETHOD=%s ORDER BY ln,type" % (rnkID)) return res except StandardError as e: return () def get_rnk_nametypes(): """Return a list of the various translationnames for the rank methods""" type = [] type.append(('ln', 'Long name')) #type.append(('sn', 'Short name')) return type def get_col_nametypes(): """Return a list of the various translationnames for the rank methods""" type = [] type.append(('ln', 'Long name')) return type def get_rnk_col(rnkID, ln=CFG_SITE_LANG): """ Returns a list of the collections the given rank method is attached to rnkID - id from rnkMETHOD""" try: res1 = dict(run_sql("SELECT id_collection, '' FROM collection_rnkMETHOD WHERE id_rnkMETHOD=%s" % rnkID)) res2 = get_def_name('', "collection") result = filter(lambda x: x[0] in res1, res2) return result except StandardError as e: return () def get_templates(): """Read CFG_ETCDIR/bibrank and returns a list of all files with 'template' """ templates = [] files = configuration.itervalues() for file in files: if str.find(file,"template_") != -1: templates.append(file) return templates def attach_col_rnk(rnkID, colID): """attach rank method to collection rnkID - id from rnkMETHOD table colID - id of collection, as in collection table """ try: res = run_sql("INSERT INTO collection_rnkMETHOD(id_collection, id_rnkMETHOD) values (%s,%s)" % (colID, rnkID)) return (1, "") except StandardError as e: return (0, e) def detach_col_rnk(rnkID, colID): """detach rank method from collection rnkID - id from rnkMETHOD table colID - id of collection, as in collection table """ try: res = run_sql("DELETE FROM collection_rnkMETHOD WHERE id_collection=%s AND id_rnkMETHOD=%s" % (colID, rnkID)) return (1, "") except StandardError as e: return (0, e) def delete_rnk(rnkID, table=""): """Deletes all data for the given rank method rnkID - delete all data in the tables associated with ranking and this id """ try: res = run_sql("DELETE FROM rnkMETHOD WHERE id=%s" % rnkID) res = run_sql("DELETE FROM rnkMETHODNAME WHERE id_rnkMETHOD=%s" % rnkID) res = run_sql("DELETE FROM collection_rnkMETHOD WHERE id_rnkMETHOD=%s" % rnkID) res = run_sql("DELETE FROM rnkMETHODDATA WHERE id_rnkMETHOD=%s" % rnkID) if table: res = run_sql("truncate %s" % table) res = run_sql("truncate %sR" % table[:-1]) return (1, "") except StandardError as e: return (0, e) def modify_rnk(rnkID, rnkcode): """change the code for the rank method given rnkID - change in rnkMETHOD where id is like this rnkcode - new value for field 'name' in rnkMETHOD """ try: res = run_sql("UPDATE rnkMETHOD set name=%s WHERE id=%s", (rnkcode, rnkID)) return (1, "") except StandardError as e: return (0, e) def add_rnk(rnkcode): """Adds a new rank method to rnkMETHOD rnkcode - the "code" for the rank method, to be used by bibrank daemon """ try: res = run_sql("INSERT INTO rnkMETHOD (name) VALUES (%s)", (rnkcode,)) res = run_sql("SELECT id FROM rnkMETHOD WHERE name=%s", (rnkcode,)) if res: return (1, res[0][0]) else: raise StandardError except StandardError as e: return (0, e) def addadminbox(header='', datalist=[], cls="admin_wvar"): """used to create table around main data on a page, row based. header - header on top of the table datalist - list of the data to be added row by row cls - possible to select wich css-class to format the look of the table.""" if len(datalist) == 1: per = '100' else: per = '75' output = '<table class="%s" ' % (cls, ) + 'width="95%">\n' output += """ <thead> <tr> <th class="adminheaderleft" colspan="%s">%s</th> </tr> </thead> <tbody> """ % (len(datalist), header) output += ' <tr>\n' output += """ <td style="vertical-align: top; margin-top: 5px; width: %s;"> %s </td> """ % (per+'%', datalist[0]) if len(datalist) > 1: output += """ <td style="vertical-align: top; margin-top: 5px; width: %s;"> %s </td> """ % ('25%', datalist[1]) output += ' </tr>\n' output += """ </tbody> </table> """ return output def tupletotable(header=[], tuple=[], start='', end='', extracolumn='', highlight_rows_p=False, alternate_row_colors_p=False): """create html table for a tuple. header - optional header for the columns tuple - create table of this start - text to be added in the beginning, most likely beginning of a form end - text to be added in the end, mot likely end of a form. extracolumn - mainly used to put in a button. highlight_rows_p - if the cursor hovering a row should highlight the full row or not alternate_row_colors_p - if alternate background colours should be used for the rows """ # study first row in tuple for alignment align = [] try: firstrow = tuple[0] if type(firstrow) in [int, long]: align = ['admintdright'] elif type(firstrow) in [str, dict]: align = ['admintdleft'] else: for item in firstrow: if type(item) is int: align.append('admintdright') else: align.append('admintdleft') except IndexError: firstrow = [] tblstr = '' for h in header + ['']: tblstr += ' <th class="adminheader">%s</th>\n' % (h, ) if tblstr: tblstr = ' <tr>\n%s\n </tr>\n' % (tblstr, ) tblstr = start + '<table class="admin_wvar_nomargin">\n' + tblstr # extra column try: extra = '<tr class="%s">' % (highlight_rows_p and 'admin_row_highlight' or '') if type(firstrow) not in [int, long, str, dict]: # for data in firstrow: extra += '<td class="%s">%s</td>\n' % ('admintd', data) for i in range(len(firstrow)): extra += '<td class="%s">%s</td>\n' % (align[i], firstrow[i]) else: extra += ' <td class="%s">%s</td>\n' % (align[0], firstrow) extra += '<td class="extracolumn" rowspan="%s" style="vertical-align: top;">\n%s\n</td>\n</tr>\n' % (len(tuple), extracolumn) except IndexError: extra = '' tblstr += extra # for i in range(1, len(tuple)): j = 0 for row in tuple[1:]: j += 1 tblstr += ' <tr class="%s %s">\n' % (highlight_rows_p and 'admin_row_highlight' or '', (j % 2 and alternate_row_colors_p) and 'admin_row_color' or '') # row = tuple[i] if type(row) not in [int, long, str, dict]: # for data in row: tblstr += '<td class="admintd">%s</td>\n' % (data,) for i in range(len(row)): tblstr += '<td class="%s">%s</td>\n' % (align[i], row[i]) else: tblstr += ' <td class="%s">%s</td>\n' % (align[0], row) tblstr += ' </tr> \n' tblstr += '</table> \n ' tblstr += end return tblstr def tupletotable_onlyselected(header=[], tuple=[], selected=[], start='', end='', extracolumn=''): """create html table for a tuple. header - optional header for the columns tuple - create table of this selected - indexes of selected rows in the tuple start - put this in the beginning end - put this in the beginning extracolumn - mainly used to put in a button""" tuple2 = [] for index in selected: tuple2.append(tuple[int(index)-1]) return tupletotable(header=header, tuple=tuple2, start=start, end=end, extracolumn=extracolumn) def addcheckboxes(datalist=[], name='authids', startindex=1, checked=[]): """adds checkboxes in front of the listdata. datalist - add checkboxes in front of this list name - name of all the checkboxes, values will be associated with this name startindex - usually 1 because of the header checked - values of checkboxes to be pre-checked """ if not type(checked) is list: checked = [checked] for row in datalist: if 1 or row[0] not in [-1, "-1", 0, "0"]: # always box, check another place chkstr = str(startindex) in checked and 'checked="checked"' or '' row.insert(0, '<input type="checkbox" name="%s" value="%s" %s />' % (name, startindex, chkstr)) else: row.insert(0, '') startindex += 1 return datalist def createhiddenform(action="", text="", button="confirm", cnfrm='', **hidden): """create select with hidden values and submit button action - name of the action to perform on submit text - additional text, can also be used to add non hidden input button - value/caption on the submit button cnfrm - if given, must check checkbox to confirm **hidden - dictionary with name=value pairs for hidden input """ output = '<form action="%s" method="post">\n' % (action, ) output += '<table>\n<tr><td style="vertical-align: top">' output += text.decode('utf-8') if cnfrm: output += ' <input type="checkbox" name="confirm" value="1"/>' for key in hidden.keys(): if type(hidden[key]) is list: for value in hidden[key]: output += ' <input type="hidden" name="%s" value="%s"/>\n' % (key, value) else: output += ' <input type="hidden" name="%s" value="%s"/>\n' % (key, hidden[key]) output += '</td><td style="vertical-align: bottom">' output += ' <input class="adminbutton" type="submit" value="%s"/>\n' % (button, ) output += '</td></tr></table>' output += '</form>\n' return output def get_languages(): languages = [] for (lang, lang_namelong) in language_list_long(): languages.append((lang, lang_namelong)) languages.sort() return languages def get_def_name(ID, table): """Returns a list of the names, either with the name in the current language, the default language, or just the name from the given table ln - a language supported by Invenio type - the type of value wanted, like 'ln', 'sn'""" name = "name" if table[-1:].isupper(): name = "NAME" try: if ID: res = run_sql("SELECT id,name FROM %s where id=%s" % (table, ID)) else: res = run_sql("SELECT id,name FROM %s" % table) res = list(res) res.sort(compare_on_val) return res except StandardError as e: return [] def get_i8n_name(ID, ln, rtype, table): """Returns a list of the names, either with the name in the current language, the default language, or just the name from the given table ln - a language supported by Invenio type - the type of value wanted, like 'ln', 'sn'""" name = "name" if table[-1:].isupper(): name = "NAME" try: res = "" if ID: res = run_sql("SELECT id_%s,value FROM %s%s where type='%s' and ln='%s' and id_%s=%s" % (table, table, name, rtype,ln, table, ID)) else: res = run_sql("SELECT id_%s,value FROM %s%s where type='%s' and ln='%s'" % (table, table, name, rtype,ln)) if ln != CFG_SITE_LANG: if ID: res1 = run_sql("SELECT id_%s,value FROM %s%s WHERE ln='%s' and type='%s' and id_%s=%s" % (table, table, name, CFG_SITE_LANG, rtype, table, ID)) else: res1 = run_sql("SELECT id_%s,value FROM %s%s WHERE ln='%s' and type='%s'" % (table, table, name, CFG_SITE_LANG, rtype)) res2 = dict(res) result = filter(lambda x: x[0] not in res2, res1) res = res + result if ID: res1 = run_sql("SELECT id,name FROM %s where id=%s" % (table, ID)) else: res1 = run_sql("SELECT id,name FROM %s" % table) res2 = dict(res) result = filter(lambda x: x[0] not in res2, res1) res = res + result res = list(res) res.sort(compare_on_val) return res except StandardError as e: raise StandardError def get_name(ID, ln, rtype, table, id_column=None): """Returns the value from the table name based on arguments ID - id ln - a language supported by Invenio type - the type of value wanted, like 'ln', 'sn' table - tablename id_column - name of the column with identifier. If None, expect column to be named 'id_%s' % table """ name = "name" if table[-1:].isupper(): name = "NAME" if id_column: id_column = wash_table_column_name(id_column) try: res = run_sql("SELECT value FROM %s%s WHERE type='%s' and ln='%s' and %s=%s" % (table, name, rtype, ln, (id_column or 'id_%s' % wash_table_column_name(table)), ID)) return res except StandardError as e: return () def modify_translations(ID, langs, sel_type, trans, table, id_column=None): """add or modify translations in tables given by table frmID - the id of the format from the format table sel_type - the name type langs - the languages trans - the translations, in same order as in langs table - the table id_column - name of the column with identifier. If None, expect column to be named 'id_%s' % table """ name = "name" if table[-1:].isupper(): name = "NAME" id_column = id_column or 'id_%s' % table if id_column: id_column = wash_table_column_name(id_column) try: for nr in range(0,len(langs)): res = run_sql("SELECT value FROM %s%s WHERE %s=%%s AND type=%%s AND ln=%%s" % (table, name, id_column), (ID, sel_type, langs[nr][0])) if res: if trans[nr]: res = run_sql("UPDATE %s%s SET value=%%s WHERE %s=%%s AND type=%%s AND ln=%%s" % (table, name, id_column), (trans[nr], ID, sel_type, langs[nr][0])) else: res = run_sql("DELETE FROM %s%s WHERE %s=%%s AND type=%%s AND ln=%%s" % (table, name, id_column), (ID, sel_type, langs[nr][0])) else: if trans[nr]: res = run_sql("INSERT INTO %s%s (%s, type, ln, value) VALUES (%%s,%%s,%%s,%%s)" % (table, name, id_column), (ID, sel_type, langs[nr][0], trans[nr])) return (1, "") except StandardError as e: return (0, e) def write_outcome(res): """ Write the outcome of an update of some settings. Parameter 'res' is a tuple (int, str), where 'int' is 0 when there is an error to display, and 1 when everything went fine. 'str' is a message displayed when there is an error. """ if res and res[0] == 1: return """<b><span class="info">Operation successfully completed.</span></b>""" elif res: return """<b><span class="info">Operation failed. Reason:</span></b><br />%s""" % res[1]
1
12,295
Don't miss this one `2: I102 copyright year is outdated, expected 2014 but got 2013`. Thanks
inveniosoftware-invenio
py
@@ -112,8 +112,9 @@ size_t Cord::appendTo(std::string& str) const { } // Last block - str.append(tail_, blockPt_); - + if (tail_) { + str.append(tail_, blockPt_); + } return len_; }
1
/* Copyright (c) 2018 vesoft inc. All rights reserved. * * This source code is licensed under Apache 2.0 License, * attached with Common Clause Condition 1.0, found in the LICENSES directory. */ #include "base/Cord.h" #include "base/Logging.h" namespace nebula { Cord::Cord(int32_t blockSize) : blockSize_(blockSize) , blockContentSize_(blockSize_ - sizeof(char*)) , blockPt_(blockContentSize_) { } Cord::~Cord() { clear(); } void Cord::allocateBlock() { DCHECK_EQ(blockPt_, blockContentSize_); char* blk = reinterpret_cast<char*>(malloc(blockSize_ * sizeof(char))); CHECK(blk) << "Out of memory"; if (tail_) { // Link the tail to the new block memcpy(tail_ + blockPt_, reinterpret_cast<char*>(&blk), sizeof(char*)); } tail_ = blk; blockPt_ = 0; if (!head_) { head_ = blk; } } size_t Cord::size() const noexcept { return len_; } bool Cord::empty() const noexcept { return len_ == 0; } void Cord::clear() { if (head_) { DCHECK(tail_); // Need to release all blocks char* p = head_; while (p != tail_) { char* next; memcpy(reinterpret_cast<char*>(&next), p + blockContentSize_, sizeof(char*)); free(p); p = next; } // Free the last block free(p); } blockPt_ = blockContentSize_; len_ = 0; head_ = nullptr; tail_ = nullptr; } bool Cord::applyTo(std::function<bool(const char*, int32_t)> visitor) const { if (empty()) { return true; } char* next = head_; while (next != tail_) { if (!visitor(next, blockContentSize_)) { // stop visiting further return false; } // Get the pointer to the next block memcpy(reinterpret_cast<char*>(&next), next + blockContentSize_, sizeof(char*)); } // Last block return visitor(tail_, blockPt_); } size_t Cord::appendTo(std::string& str) const { if (empty()) { return 0; } char* next = head_; while (next != tail_) { str.append(next, blockContentSize_); // Get the pointer to the next block memcpy(reinterpret_cast<char*>(&next), next + blockContentSize_, sizeof(char*)); } // Last block str.append(tail_, blockPt_); return len_; } std::string Cord::str() const { std::string buf; buf.reserve(len_); appendTo(buf); return buf; } Cord& Cord::write(const char* value, size_t len) { if (len == 0) { return *this; } size_t bytesToWrite = std::min(len, static_cast<size_t>(blockContentSize_ - blockPt_)); if (bytesToWrite == 0) { allocateBlock(); bytesToWrite = std::min(len, static_cast<size_t>(blockContentSize_)); } memcpy(tail_ + blockPt_, value, bytesToWrite); blockPt_ += bytesToWrite; len_ += bytesToWrite; if (bytesToWrite < len) { return write(value + bytesToWrite, len - bytesToWrite); } else { return *this; } } /********************** * * Stream operator * *********************/ Cord& Cord::operator<<(int8_t value) { return write(reinterpret_cast<char*>(&value), sizeof(int8_t)); } Cord& Cord::operator<<(uint8_t value) { return write(reinterpret_cast<char*>(&value), sizeof(uint8_t)); } Cord& Cord::operator<<(int16_t value) { return write(reinterpret_cast<char*>(&value), sizeof(int16_t)); } Cord& Cord::operator<<(uint16_t value) { return write(reinterpret_cast<char*>(&value), sizeof(uint16_t)); } Cord& Cord::operator<<(int32_t value) { return write(reinterpret_cast<char*>(&value), sizeof(int32_t)); } Cord& Cord::operator<<(uint32_t value) { return write(reinterpret_cast<char*>(&value), sizeof(uint32_t)); } Cord& Cord::operator<<(int64_t value) { return write(reinterpret_cast<char*>(&value), sizeof(int64_t)); } Cord& Cord::operator<<(uint64_t value) { return write(reinterpret_cast<char*>(&value), sizeof(uint64_t)); } Cord& Cord::operator<<(char value) { return write(&value, sizeof(char)); } Cord& Cord::operator<<(bool value) { return write(reinterpret_cast<char*>(&value), sizeof(bool)); } Cord& Cord::operator<<(float value) { return write(reinterpret_cast<char*>(&value), sizeof(float)); } Cord& Cord::operator<<(double value) { return write(reinterpret_cast<char*>(&value), sizeof(double)); } Cord& Cord::operator<<(const std::string& value) { return write(value.data(), value.size()); } Cord& Cord::operator<<(const char* value) { return write(value, strlen(value)); } Cord& Cord::operator<<(const Cord& rhs) { char* next = rhs.head_; while (next != rhs.tail_) { write(next, blockContentSize_); // Get the pointer to the next block memcpy(reinterpret_cast<char*>(&next), next + blockContentSize_, sizeof(char*)); } // Last block write(rhs.tail_, rhs.blockPt_); return *this; } } // namespace nebula
1
27,284
the tail_ maybe nullptr?
vesoft-inc-nebula
cpp
@@ -92,7 +92,7 @@ func (c *Controller) reviewAdmission(ctx context.Context, req *admv1beta1.Admiss if err := json.Unmarshal(req.Object.Raw, pod); err != nil { return errs.New("unable to unmarshal %s/%s object: %v", req.Kind.Version, req.Kind.Kind, err) } - return c.createPodEntry(ctx, pod) + return c.syncPodEntry(ctx, pod) case admv1beta1.Delete: return c.deletePodEntry(ctx, req.Namespace, req.Name) default:
1
package main import ( "bytes" "context" "encoding/json" "fmt" "net/url" "path" "github.com/sirupsen/logrus" "github.com/spiffe/spire/pkg/common/idutil" "github.com/spiffe/spire/proto/spire/api/registration" "github.com/spiffe/spire/proto/spire/common" "github.com/zeebo/errs" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" admv1beta1 "k8s.io/api/admission/v1beta1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) type ControllerConfig struct { Log logrus.FieldLogger R registration.RegistrationClient TrustDomain string Cluster string PodLabel string PodAnnotation string } type Controller struct { c ControllerConfig } func NewController(config ControllerConfig) *Controller { return &Controller{ c: config, } } func (c *Controller) Initialize(ctx context.Context) error { // ensure there is a node registration entry for PSAT nodes in the cluster. return c.createEntry(ctx, &common.RegistrationEntry{ ParentId: idutil.ServerID(c.c.TrustDomain), SpiffeId: c.nodeID(), Selectors: []*common.Selector{ {Type: "k8s_psat", Value: fmt.Sprintf("cluster:%s", c.c.Cluster)}, }, }) } func (c *Controller) ReviewAdmission(ctx context.Context, req *admv1beta1.AdmissionRequest) (*admv1beta1.AdmissionResponse, error) { c.c.Log.WithFields(logrus.Fields{ "namespace": req.Namespace, "name": req.Name, "kind": req.Kind.Kind, "version": req.Kind.Version, "operation": req.Operation, }).Debug("ReviewAdmission called") if err := c.reviewAdmission(ctx, req); err != nil { return nil, err } return &admv1beta1.AdmissionResponse{ UID: req.UID, Allowed: true, }, nil } // reviewAdmission handles CREATE and DELETE requests for pods in // non-kubernetes namespaces. Ideally the ValidatingAdmissionWebhook // configuration has filters in place to restrict the admission requests. func (c *Controller) reviewAdmission(ctx context.Context, req *admv1beta1.AdmissionRequest) error { switch req.Namespace { case metav1.NamespacePublic, metav1.NamespaceSystem: return nil } if req.Kind != (metav1.GroupVersionKind{Version: "v1", Kind: "Pod"}) { c.c.Log.WithFields(logrus.Fields{ "version": req.Kind.Version, "kind": req.Kind.Kind, }).Warn("Admission request received for unhandled object; check filters") return nil } switch req.Operation { case admv1beta1.Create: pod := new(corev1.Pod) if err := json.Unmarshal(req.Object.Raw, pod); err != nil { return errs.New("unable to unmarshal %s/%s object: %v", req.Kind.Version, req.Kind.Kind, err) } return c.createPodEntry(ctx, pod) case admv1beta1.Delete: return c.deletePodEntry(ctx, req.Namespace, req.Name) default: c.c.Log.WithFields(logrus.Fields{ "operation": req.Operation, }).Warn("Admission request received for unhandled pod operation; check filters") } return nil } // podSpiffeID returns the desired spiffe ID for the pod, or nil if it should be ignored func (c *Controller) podSpiffeID(pod *corev1.Pod) string { if c.c.PodLabel != "" { // the controller has been configured with a pod label. if the pod // has that label, use the value to construct the pod entry. otherwise // ignore the pod altogether. if labelValue, ok := pod.Labels[c.c.PodLabel]; ok { return c.makeID("%s", labelValue) } return "" } if c.c.PodAnnotation != "" { // the controller has been configured with a pod annotation. if the pod // has that annotation, use the value to construct the pod entry. otherwise // ignore the pod altogether. if annotationValue, ok := pod.Annotations[c.c.PodAnnotation]; ok { return c.makeID("%s", annotationValue) } return "" } // the controller has not been configured with a pod label or a pod annotation. // create an entry based on the service account. return c.makeID("ns/%s/sa/%s", pod.Namespace, pod.Spec.ServiceAccountName) } func (c *Controller) createPodEntry(ctx context.Context, pod *corev1.Pod) error { spiffeID := c.podSpiffeID(pod) // If we have no spiffe ID for the pod, do nothing if spiffeID == "" { return nil } return c.createEntry(ctx, &common.RegistrationEntry{ ParentId: c.nodeID(), SpiffeId: spiffeID, Selectors: []*common.Selector{ namespaceSelector(pod.Namespace), podNameSelector(pod.Name), }, }) } func (c *Controller) deletePodEntry(ctx context.Context, namespace, name string) error { log := c.c.Log.WithFields(logrus.Fields{ "ns": namespace, "pod": name, }) entries, err := c.c.R.ListBySelectors(ctx, &common.Selectors{ Entries: []*common.Selector{ namespaceSelector(namespace), podNameSelector(name), }, }) if err != nil { return errs.New("unable to list by pod entries: %v", err) } log.Info("Deleting pod entries") if len(entries.Entries) > 1 { log.WithField("count", len(entries.Entries)).Warn("Multiple pod entries found to delete") } var errGroup errs.Group for _, entry := range entries.Entries { _, err := c.c.R.DeleteEntry(ctx, &registration.RegistrationEntryID{ Id: entry.EntryId, }) if err != nil { log.WithError(err).Error("Failed deleting pod entry") errGroup.Add(errs.New("unable to delete entry %q: %v", entry.EntryId, err)) } } return errGroup.Err() } func (c *Controller) nodeID() string { return c.makeID("k8s-workload-registrar/%s/node", c.c.Cluster) } func (c *Controller) makeID(pathFmt string, pathArgs ...interface{}) string { id := url.URL{ Scheme: "spiffe", Host: c.c.TrustDomain, Path: path.Clean(fmt.Sprintf(pathFmt, pathArgs...)), } return id.String() } func (c *Controller) createEntry(ctx context.Context, entry *common.RegistrationEntry) error { // ensure there is a node registration entry for PSAT nodes in the cluster. log := c.c.Log.WithFields(logrus.Fields{ "parent_id": entry.ParentId, "spiffe_id": entry.SpiffeId, "selectors": selectorsField(entry.Selectors), }) _, err := c.c.R.CreateEntry(ctx, entry) switch status.Code(err) { case codes.OK, codes.AlreadyExists: log.Info("Created pod entry") return nil default: log.WithError(err).Error("Failed to create pod entry") return errs.Wrap(err) } } func namespaceSelector(namespace string) *common.Selector { return &common.Selector{ Type: "k8s", Value: fmt.Sprintf("ns:%s", namespace), } } func podNameSelector(podName string) *common.Selector { return &common.Selector{ Type: "k8s", Value: fmt.Sprintf("pod-name:%s", podName), } } func selectorsField(selectors []*common.Selector) string { var buf bytes.Buffer for i, selector := range selectors { if i > 0 { buf.WriteString(",") } buf.WriteString(selector.Type) buf.WriteString(":") buf.WriteString(selector.Value) } return buf.String() }
1
12,893
The PR description mentions that `The controller code is extended to react to "add" events`, but I don't see any additional cases added here beyond the existing Create and Delete... is there something I'm missing?
spiffe-spire
go
@@ -26,7 +26,7 @@ module Blacklight ## # An OpenStruct that responds to common Hash methods class OpenStructWithHashAccess < OpenStruct - delegate :keys, :each, :map, :has_key?, :empty?, :delete, :length, :reject!, :select!, :include, :fetch, :to_json, :as_json, :to => :to_h + delegate :keys, :each, :map, :has_key?, :include?, :empty?, :length, :delete, :delete_if, :keep_if, :clear, :reject!, :select!, :replace, :fetch, :to_json, :as_json, to: :to_h if ::RUBY_VERSION < '2.0' def []=(key, value)
1
require 'ostruct' module Blacklight module Utils def self.needs_attr_accessible? if rails_3? !strong_parameters_enabled? else protected_attributes_enabled? end end def self.rails_3? Rails::VERSION::MAJOR == 3 end def self.strong_parameters_enabled? defined?(ActionController::StrongParameters) end def self.protected_attributes_enabled? defined?(ActiveModel::MassAssignmentSecurity) end end ## # An OpenStruct that responds to common Hash methods class OpenStructWithHashAccess < OpenStruct delegate :keys, :each, :map, :has_key?, :empty?, :delete, :length, :reject!, :select!, :include, :fetch, :to_json, :as_json, :to => :to_h if ::RUBY_VERSION < '2.0' def []=(key, value) send "#{key}=", value end def [](key) send key end def respond_to? method, *args super(method, *args) || has_key?(method.to_sym) end end ## # Expose the internal hash # @return [Hash] def to_h @table end def select *args, &block self.class.new to_h.select(*args, &block) end ## # Merge the values of this OpenStruct with another OpenStruct or Hash # @param [Hash,#to_h] # @return [OpenStructWithHashAccess] a new instance of an OpenStructWithHashAccess def merge other_hash self.class.new to_h.merge((other_hash if other_hash.is_a? Hash) || other_hash.to_h) end ## # Merge the values of another OpenStruct or Hash into this object # @param [Hash,#to_h] # @return [OpenStructWithHashAccess] a new instance of an OpenStructWithHashAccess def merge! other_hash @table.merge!((other_hash if other_hash.is_a? Hash) || other_hash.to_h) end def deep_dup self.class.new @table.deep_dup end end ## # An OpenStruct refinement that converts any hash-keys into # additional instances of NestedOpenStructWithHashAccess class NestedOpenStructWithHashAccess < OpenStructWithHashAccess attr_reader :nested_class delegate :default_proc=, :to => :to_h def initialize klass, *args @nested_class = klass hash = {} hashes_and_keys = args.flatten lazy_configs = hashes_and_keys.extract_options! hashes_and_keys.each do |v| if v.is_a? Hash key = v.first value = v[key] hash[key] = nested_class.new value else hash[v] = nested_class.new end end lazy_configs.each do |k,v| if v.is_a? nested_class hash[k] = v else hash[k] = nested_class.new v end end super hash set_default_proc! end ## # Add an new key to the object, with a default default def << key @table[key] end ## # Add a new key/value to the object; if it's a Hash, turn it # into another NestedOpenStructWithHashAccess def []=(key, value) if value.is_a? Hash send "#{key}=", nested_class.new(value) elsif ::RUBY_VERSION < '2.0' send "#{key}=", value else super end end ## # Before serializing, we need to reset the default proc # so it can be serialized appropriately def marshal_dump h = to_h.dup h.default = nil [nested_class, h] end ## # After deserializing, we need to re-add the default proc # to the internal hash def marshal_load x @nested_class = x.first super x.last set_default_proc! end def deep_dup self.class.new self.nested_class, @table.deep_dup end def select *args, &block self.class.new nested_class, to_h.select(*args, &block) end ## # Merge the values of this OpenStruct with another OpenStruct or Hash # @param [Hash,#to_h] # @return [OpenStructWithHashAccess] a new instance of an OpenStructWithHashAccess def merge other_hash self.class.new nested_class, to_h.merge((other_hash if other_hash.is_a? Hash) || other_hash.to_h) end ## # Merge the values of another OpenStruct or Hash into this object # @param [Hash,#to_h] # @return [OpenStructWithHashAccess] a new instance of an OpenStructWithHashAccess def merge! other_hash @table.merge!(nested_class, (other_hash if other_hash.is_a? Hash) || other_hash.to_h) end private def set_default_proc! self.default_proc = lambda do |hash, key| hash[key] = self.nested_class.new end end end end
1
5,918
Does it matter that #include changed to #include? ? I think the change makes sense since it aligns with the method name on Hash but unsure if anything called #include that would break with the change.
projectblacklight-blacklight
rb
@@ -92,7 +92,7 @@ const ( // minimumTaskCleanupWaitDuration specifies the minimum duration to wait before cleaning up // a task's container. This is used to enforce sane values for the config.TaskCleanupWaitDuration field. - minimumTaskCleanupWaitDuration = 1 * time.Minute + minimumTaskCleanupWaitDuration = time.Second // minimumImagePullInactivityTimeout specifies the minimum amount of time for that an image can be // 'stuck' in the pull / unpack step. Very small values are unsafe and lead to high failure rate.
1
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may // not use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either // express or implied. See the License for the specific language governing // permissions and limitations under the License. package config import ( "encoding/json" "errors" "fmt" "io/ioutil" "os" "reflect" "strings" "time" apierrors "github.com/aws/amazon-ecs-agent/agent/api/errors" "github.com/aws/amazon-ecs-agent/agent/dockerclient" "github.com/aws/amazon-ecs-agent/agent/ec2" "github.com/aws/amazon-ecs-agent/agent/utils" "github.com/cihub/seelog" ) const ( // http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker DockerReservedPort = 2375 DockerReservedSSLPort = 2376 // DockerTagSeparator is the charactor used to separate names and tag in docker DockerTagSeparator = ":" // DefaultDockerTag is the default tag used by docker DefaultDockerTag = "latest" SSHPort = 22 // AgentIntrospectionPort is used to serve the metadata about the agent and to query the tasks being managed by the agent. AgentIntrospectionPort = 51678 // AgentCredentialsPort is used to serve the credentials for tasks. AgentCredentialsPort = 51679 // AgentPrometheusExpositionPort is used to expose Prometheus metrics that can be scraped by a Prometheus server AgentPrometheusExpositionPort = 51680 // defaultConfigFileName is the default (json-formatted) config file defaultConfigFileName = "/etc/ecs_container_agent/config.json" // DefaultClusterName is the name of the default cluster. DefaultClusterName = "default" // DefaultTaskCleanupWaitDuration specifies the default value for task cleanup duration. It is used to // clean up task's containers. DefaultTaskCleanupWaitDuration = 3 * time.Hour // DefaultPollingMetricsWaitDuration specifies the default value for polling metrics wait duration // This is only used when PollMetrics is set to true DefaultPollingMetricsWaitDuration = DefaultContainerMetricsPublishInterval / 2 // defaultDockerStopTimeout specifies the value for container stop timeout duration defaultDockerStopTimeout = 30 * time.Second // DefaultImageCleanupTimeInterval specifies the default value for image cleanup duration. It is used to // remove the images pulled by agent. DefaultImageCleanupTimeInterval = 30 * time.Minute // DefaultNumImagesToDeletePerCycle specifies the default number of images to delete when agent performs // image cleanup. DefaultNumImagesToDeletePerCycle = 5 // DefaultNumNonECSContainersToDeletePerCycle specifies the default number of nonecs containers to delete when agent performs // nonecs containers cleanup. DefaultNumNonECSContainersToDeletePerCycle = 5 // DefaultImageDeletionAge specifies the default value for minimum amount of elapsed time after an image // has been pulled before it can be deleted. DefaultImageDeletionAge = 1 * time.Hour // DefaultNonECSImageDeletionAge specifies the default value for minimum amount of elapsed time after an image // has been created before it can be deleted DefaultNonECSImageDeletionAge = 1 * time.Hour //DefaultImagePullTimeout specifies the timeout for PullImage API. DefaultImagePullTimeout = 2 * time.Hour // minimumTaskCleanupWaitDuration specifies the minimum duration to wait before cleaning up // a task's container. This is used to enforce sane values for the config.TaskCleanupWaitDuration field. minimumTaskCleanupWaitDuration = 1 * time.Minute // minimumImagePullInactivityTimeout specifies the minimum amount of time for that an image can be // 'stuck' in the pull / unpack step. Very small values are unsafe and lead to high failure rate. minimumImagePullInactivityTimeout = 1 * time.Minute // minimumPollingMetricsWaitDuration specifies the minimum duration to wait before polling for new stats // from docker. This is only used when PollMetrics is set to true minimumPollingMetricsWaitDuration = 5 * time.Second // maximumPollingMetricsWaitDuration specifies the maximum duration to wait before polling for new stats // from docker. This is only used when PollMetrics is set to true maximumPollingMetricsWaitDuration = DefaultContainerMetricsPublishInterval // minimumDockerStopTimeout specifies the minimum value for docker StopContainer API minimumDockerStopTimeout = 1 * time.Second // minimumImageCleanupInterval specifies the minimum time for agent to wait before performing // image cleanup. minimumImageCleanupInterval = 10 * time.Minute // minimumNumImagesToDeletePerCycle specifies the minimum number of images that to be deleted when // performing image cleanup. minimumNumImagesToDeletePerCycle = 1 // defaultCNIPluginsPath is the default path where cni binaries are located defaultCNIPluginsPath = "/amazon-ecs-cni-plugins" // DefaultMinSupportedCNIVersion denotes the minimum version of cni spec required DefaultMinSupportedCNIVersion = "0.3.0" // pauseContainerTarball is the path to the pause container tarball pauseContainerTarballPath = "/images/amazon-ecs-pause.tar" // DefaultTaskMetadataSteadyStateRate is set as 40. This is arrived from our benchmarking // results where task endpoint can handle 4000 rps effectively. Here, 100 containers // will be able to send out 40 rps. DefaultTaskMetadataSteadyStateRate = 40 // DefaultTaskMetadataBurstRate is set to handle 60 burst requests at once DefaultTaskMetadataBurstRate = 60 //Known cached image names CachedImageNameAgentContainer = "amazon/amazon-ecs-agent:latest" // DefaultNvidiaRuntime is the name of the runtime to pass Nvidia GPUs to containers DefaultNvidiaRuntime = "nvidia" // defaultCgroupCPUPeriod is set to 100 ms to set isCFS period and quota for task limits defaultCgroupCPUPeriod = 100 * time.Millisecond maximumCgroupCPUPeriod = 100 * time.Millisecond minimumCgroupCPUPeriod = 8 * time.Millisecond // DefaultContainerMetricsPublishInterval is the default interval that we publish // metrics to the ECS telemetry backend (TACS) DefaultContainerMetricsPublishInterval = 20 * time.Second ) const ( // ImagePullDefaultBehavior specifies the behavior that if an image pull API call fails, // agent tries to start from the Docker image cache anyway, assuming that the image has not changed. ImagePullDefaultBehavior ImagePullBehaviorType = iota // ImagePullAlwaysBehavior specifies the behavior that if an image pull API call fails, // the task fails instead of using cached image. ImagePullAlwaysBehavior // ImagePullOnceBehavior specifies the behavior that agent will only attempt to pull // the same image once, once an image is pulled, local image cache will be used // for all the containers. ImagePullOnceBehavior // ImagePullPreferCachedBehavior specifies the behavior that agent will only attempt to pull // the image if there is no cached image. ImagePullPreferCachedBehavior ) const ( // When ContainerInstancePropagateTagsFromNoneType is specified, no DescribeTags // API call will be made. ContainerInstancePropagateTagsFromNoneType ContainerInstancePropagateTagsFromType = iota // When ContainerInstancePropagateTagsFromEC2InstanceType is specified, agent will // make DescribeTags API call to get tags remotely. ContainerInstancePropagateTagsFromEC2InstanceType ) var ( // DefaultPauseContainerImageName is the name of the pause container image. The linker's // load flags are used to populate this value from the Makefile DefaultPauseContainerImageName = "" // DefaultPauseContainerTag is the tag for the pause container image. The linker's load // flags are used to populate this value from the Makefile DefaultPauseContainerTag = "" ) // Merge merges two config files, preferring the ones on the left. Any nil or // zero values present in the left that are present in the right will be overridden func (cfg *Config) Merge(rhs Config) *Config { left := reflect.ValueOf(cfg).Elem() right := reflect.ValueOf(&rhs).Elem() for i := 0; i < left.NumField(); i++ { leftField := left.Field(i) switch leftField.Interface().(type) { case BooleanDefaultFalse, BooleanDefaultTrue: str, _ := json.Marshal(reflect.ValueOf(leftField.Interface()).Interface()) if string(str) == "null" { leftField.Set(reflect.ValueOf(right.Field(i).Interface())) } default: if utils.ZeroOrNil(leftField.Interface()) { leftField.Set(reflect.ValueOf(right.Field(i).Interface())) } } } return cfg //make it chainable } // NewConfig returns a config struct created by merging environment variables, // a config file, and EC2 Metadata info. // The 'config' struct it returns can be used, even if an error is returned. An // error is returned, however, if the config is incomplete in some way that is // considered fatal. func NewConfig(ec2client ec2.EC2MetadataClient) (*Config, error) { var errs []error envConfig, err := environmentConfig() //Environment overrides all else if err != nil { errs = append(errs, err) } config := &envConfig if config.External.Enabled() { if config.AWSRegion == "" { return nil, errors.New("AWS_DEFAULT_REGION has to be set when running on external capacity") } // Use fake ec2 metadata client if on prem config is set. ec2client = ec2.NewBlackholeEC2MetadataClient() } if config.complete() { // No need to do file / network IO return config, nil } fcfg, err := fileConfig() if err != nil { errs = append(errs, err) } config.Merge(fcfg) config.Merge(userDataConfig(ec2client)) if config.AWSRegion == "" { if config.NoIID { // get it from AWS SDK if we don't have instance identity document awsRegion, err := ec2client.Region() if err != nil { errs = append(errs, err) } config.AWSRegion = awsRegion } else { // Get it from metadata only if we need to (network io) config.Merge(ec2MetadataConfig(ec2client)) } } return config, config.mergeDefaultConfig(errs) } func (config *Config) mergeDefaultConfig(errs []error) error { config.trimWhitespace() config.Merge(DefaultConfig()) err := config.validateAndOverrideBounds() if err != nil { errs = append(errs, err) } if len(errs) != 0 { return apierrors.NewMultiError(errs...) } return nil } // trimWhitespace trims whitespace from all string cfg values with the // `trim` tag func (cfg *Config) trimWhitespace() { cfgElem := reflect.ValueOf(cfg).Elem() cfgStructField := reflect.Indirect(reflect.ValueOf(cfg)).Type() for i := 0; i < cfgElem.NumField(); i++ { cfgField := cfgElem.Field(i) if !cfgField.CanInterface() { continue } trimTag := cfgStructField.Field(i).Tag.Get("trim") if len(trimTag) == 0 { continue } if cfgField.Kind() != reflect.String { seelog.Warnf("Cannot trim non-string field type %v index %v", cfgField.Kind().String(), i) continue } str := cfgField.Interface().(string) cfgField.SetString(strings.TrimSpace(str)) } } // validateAndOverrideBounds performs validation over members of the Config struct // and check the value against the minimum required value. func (cfg *Config) validateAndOverrideBounds() error { err := cfg.checkMissingAndDepreciated() if err != nil { return err } if cfg.DockerStopTimeout < minimumDockerStopTimeout { return fmt.Errorf("config: invalid value for docker container stop timeout: %v", cfg.DockerStopTimeout.String()) } if cfg.ContainerStartTimeout < minimumContainerStartTimeout { return fmt.Errorf("config: invalid value for docker container start timeout: %v", cfg.ContainerStartTimeout.String()) } var badDrivers []string for _, driver := range cfg.AvailableLoggingDrivers { // Don't classify awsfirelens as a bad driver if driver == dockerclient.AWSFirelensDriver { continue } _, ok := dockerclient.LoggingDriverMinimumVersion[driver] if !ok { badDrivers = append(badDrivers, string(driver)) } } if len(badDrivers) > 0 { return errors.New("Invalid logging drivers: " + strings.Join(badDrivers, ", ")) } // If a value has been set for taskCleanupWaitDuration and the value is less than the minimum allowed cleanup duration, // print a warning and override it if cfg.TaskCleanupWaitDuration < minimumTaskCleanupWaitDuration { seelog.Warnf("Invalid value for ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION, will be overridden with the default value: %s. Parsed value: %v, minimum value: %v.", DefaultTaskCleanupWaitDuration.String(), cfg.TaskCleanupWaitDuration, minimumTaskCleanupWaitDuration) cfg.TaskCleanupWaitDuration = DefaultTaskCleanupWaitDuration } if cfg.ImagePullInactivityTimeout < minimumImagePullInactivityTimeout { seelog.Warnf("Invalid value for image pull inactivity timeout duration, will be overridden with the default value: %s. Parsed value: %v, minimum value: %v.", defaultImagePullInactivityTimeout.String(), cfg.ImagePullInactivityTimeout, minimumImagePullInactivityTimeout) cfg.ImagePullInactivityTimeout = defaultImagePullInactivityTimeout } if cfg.ImageCleanupInterval < minimumImageCleanupInterval { seelog.Warnf("Invalid value for ECS_IMAGE_CLEANUP_INTERVAL, will be overridden with the default value: %s. Parsed value: %v, minimum value: %v.", DefaultImageCleanupTimeInterval.String(), cfg.ImageCleanupInterval, minimumImageCleanupInterval) cfg.ImageCleanupInterval = DefaultImageCleanupTimeInterval } if cfg.NumImagesToDeletePerCycle < minimumNumImagesToDeletePerCycle { seelog.Warnf("Invalid value for number of images to delete for image cleanup, will be overridden with the default value: %d. Parsed value: %d, minimum value: %d.", DefaultImageDeletionAge, cfg.NumImagesToDeletePerCycle, minimumNumImagesToDeletePerCycle) cfg.NumImagesToDeletePerCycle = DefaultNumImagesToDeletePerCycle } if cfg.TaskMetadataSteadyStateRate <= 0 || cfg.TaskMetadataBurstRate <= 0 { seelog.Warnf("Invalid values for rate limits, will be overridden with default values: %d,%d.", DefaultTaskMetadataSteadyStateRate, DefaultTaskMetadataBurstRate) cfg.TaskMetadataSteadyStateRate = DefaultTaskMetadataSteadyStateRate cfg.TaskMetadataBurstRate = DefaultTaskMetadataBurstRate } // check the PollMetrics specific configurations cfg.pollMetricsOverrides() cfg.platformOverrides() return nil } func (cfg *Config) pollMetricsOverrides() { if cfg.PollMetrics.Enabled() { if cfg.PollingMetricsWaitDuration < minimumPollingMetricsWaitDuration { seelog.Warnf("ECS_POLLING_METRICS_WAIT_DURATION parsed value (%s) is less than the minimum of %s. Setting polling interval to minimum.", cfg.PollingMetricsWaitDuration, minimumPollingMetricsWaitDuration) cfg.PollingMetricsWaitDuration = minimumPollingMetricsWaitDuration } if cfg.PollingMetricsWaitDuration > maximumPollingMetricsWaitDuration { seelog.Warnf("ECS_POLLING_METRICS_WAIT_DURATION parsed value (%s) is greater than the maximum of %s. Setting polling interval to maximum.", cfg.PollingMetricsWaitDuration, maximumPollingMetricsWaitDuration) cfg.PollingMetricsWaitDuration = maximumPollingMetricsWaitDuration } } } // checkMissingAndDeprecated checks all zero-valued fields for tags of the form // missing:STRING and acts based on that string. Current options are: fatal, // warn. Fatal will result in an error being returned, warn will result in a // warning that the field is missing being logged. func (cfg *Config) checkMissingAndDepreciated() error { cfgElem := reflect.ValueOf(cfg).Elem() cfgStructField := reflect.Indirect(reflect.ValueOf(cfg)).Type() fatalFields := []string{} for i := 0; i < cfgElem.NumField(); i++ { cfgField := cfgElem.Field(i) if utils.ZeroOrNil(cfgField.Interface()) { missingTag := cfgStructField.Field(i).Tag.Get("missing") if len(missingTag) == 0 { continue } switch missingTag { case "warn": seelog.Warnf("Configuration key not set, key: %v", cfgStructField.Field(i).Name) case "fatal": seelog.Criticalf("Configuration key not set, key: %v", cfgStructField.Field(i).Name) fatalFields = append(fatalFields, cfgStructField.Field(i).Name) default: seelog.Warnf("Unexpected `missing` tag value, tag %v", missingTag) } } else { // present deprecatedTag := cfgStructField.Field(i).Tag.Get("deprecated") if len(deprecatedTag) == 0 { continue } seelog.Warnf("Use of deprecated configuration key, key: %v message: %v", cfgStructField.Field(i).Name, deprecatedTag) } } if len(fatalFields) > 0 { return errors.New("Missing required fields: " + strings.Join(fatalFields, ", ")) } return nil } // complete returns true if all fields of the config are populated / nonzero func (cfg *Config) complete() bool { cfgElem := reflect.ValueOf(cfg).Elem() for i := 0; i < cfgElem.NumField(); i++ { if utils.ZeroOrNil(cfgElem.Field(i).Interface()) { return false } } return true } func fileConfig() (Config, error) { cfg := Config{} fileName, err := getConfigFileName() if err != nil { return cfg, nil } file, err := os.Open(fileName) if err != nil { return cfg, nil } data, err := ioutil.ReadAll(file) if err != nil { seelog.Errorf("Unable to read cfg file, err %v", err) return cfg, err } if strings.TrimSpace(string(data)) == "" { // empty file, not an error return cfg, nil } err = json.Unmarshal(data, &cfg) if err != nil { seelog.Criticalf("Error reading cfg json data, err %v", err) return cfg, err } // Handle any deprecated keys correctly here if utils.ZeroOrNil(cfg.Cluster) && !utils.ZeroOrNil(cfg.ClusterArn) { cfg.Cluster = cfg.ClusterArn } return cfg, nil } // userDataConfig reads configuration JSON from instance's userdata. It doesn't // return any error as it's entirely optional to configure the ECS agent using // this method. // Example: // {"ECSAgentConfiguration":{"Cluster":"default"}} func userDataConfig(ec2Client ec2.EC2MetadataClient) Config { type userDataParser struct { Config Config `json:"ECSAgentConfiguration"` } parsedUserData := userDataParser{ Config: Config{}, } userData, err := ec2Client.GetUserData() if err != nil { seelog.Warnf("Unable to fetch user data: %v", err) // Unable to read userdata from instance metadata. Just // return early return parsedUserData.Config } // In the future, if we want to support base64 encoded config, // we'd need to add logic to decode the string here. err = json.Unmarshal([]byte(userData), &parsedUserData) if err != nil { seelog.Debugf("Non-json user data, skip merging into agent config: %v", err) // Unable to parse userdata as a valid JSON. Return the // empty config return Config{} } return parsedUserData.Config } // environmentConfig reads the given configs from the environment and attempts // to convert them to the given type func environmentConfig() (Config, error) { dataDir := os.Getenv("ECS_DATADIR") steadyStateRate, burstRate := parseTaskMetadataThrottles() var errs []error instanceAttributes, errs := parseInstanceAttributes(errs) containerInstanceTags, errs := parseContainerInstanceTags(errs) additionalLocalRoutes, errs := parseAdditionalLocalRoutes(errs) var err error if len(errs) > 0 { err = apierrors.NewMultiError(errs...) } return Config{ Cluster: os.Getenv("ECS_CLUSTER"), APIEndpoint: os.Getenv("ECS_BACKEND_HOST"), AWSRegion: os.Getenv("AWS_DEFAULT_REGION"), DockerEndpoint: os.Getenv("DOCKER_HOST"), ReservedPorts: parseReservedPorts("ECS_RESERVED_PORTS"), ReservedPortsUDP: parseReservedPorts("ECS_RESERVED_PORTS_UDP"), DataDir: dataDir, Checkpoint: parseCheckpoint(dataDir), EngineAuthType: os.Getenv("ECS_ENGINE_AUTH_TYPE"), EngineAuthData: NewSensitiveRawMessage([]byte(os.Getenv("ECS_ENGINE_AUTH_DATA"))), UpdatesEnabled: parseBooleanDefaultFalseConfig("ECS_UPDATES_ENABLED"), UpdateDownloadDir: os.Getenv("ECS_UPDATE_DOWNLOAD_DIR"), DisableMetrics: parseBooleanDefaultFalseConfig("ECS_DISABLE_METRICS"), ReservedMemory: parseEnvVariableUint16("ECS_RESERVED_MEMORY"), AvailableLoggingDrivers: parseAvailableLoggingDrivers(), PrivilegedDisabled: parseBooleanDefaultFalseConfig("ECS_DISABLE_PRIVILEGED"), SELinuxCapable: parseBooleanDefaultFalseConfig("ECS_SELINUX_CAPABLE"), AppArmorCapable: parseBooleanDefaultFalseConfig("ECS_APPARMOR_CAPABLE"), TaskCleanupWaitDuration: parseEnvVariableDuration("ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION"), TaskCleanupWaitDurationJitter: parseEnvVariableDuration("ECS_ENGINE_TASK_CLEANUP_WAIT_DURATION_JITTER"), TaskENIEnabled: parseBooleanDefaultFalseConfig("ECS_ENABLE_TASK_ENI"), TaskIAMRoleEnabled: parseBooleanDefaultFalseConfig("ECS_ENABLE_TASK_IAM_ROLE"), DeleteNonECSImagesEnabled: parseBooleanDefaultFalseConfig("ECS_ENABLE_UNTRACKED_IMAGE_CLEANUP"), TaskCPUMemLimit: parseBooleanDefaultTrueConfig("ECS_ENABLE_TASK_CPU_MEM_LIMIT"), DockerStopTimeout: parseDockerStopTimeout(), ContainerStartTimeout: parseContainerStartTimeout(), ContainerCreateTimeout: parseContainerCreateTimeout(), DependentContainersPullUpfront: parseBooleanDefaultFalseConfig("ECS_PULL_DEPENDENT_CONTAINERS_UPFRONT"), ImagePullInactivityTimeout: parseImagePullInactivityTimeout(), ImagePullTimeout: parseEnvVariableDuration("ECS_IMAGE_PULL_TIMEOUT"), CredentialsAuditLogFile: os.Getenv("ECS_AUDIT_LOGFILE"), CredentialsAuditLogDisabled: utils.ParseBool(os.Getenv("ECS_AUDIT_LOGFILE_DISABLED"), false), TaskIAMRoleEnabledForNetworkHost: utils.ParseBool(os.Getenv("ECS_ENABLE_TASK_IAM_ROLE_NETWORK_HOST"), false), ImageCleanupDisabled: parseBooleanDefaultFalseConfig("ECS_DISABLE_IMAGE_CLEANUP"), MinimumImageDeletionAge: parseEnvVariableDuration("ECS_IMAGE_MINIMUM_CLEANUP_AGE"), NonECSMinimumImageDeletionAge: parseEnvVariableDuration("NON_ECS_IMAGE_MINIMUM_CLEANUP_AGE"), ImageCleanupInterval: parseEnvVariableDuration("ECS_IMAGE_CLEANUP_INTERVAL"), NumImagesToDeletePerCycle: parseNumImagesToDeletePerCycle(), NumNonECSContainersToDeletePerCycle: parseNumNonECSContainersToDeletePerCycle(), ImagePullBehavior: parseImagePullBehavior(), ImageCleanupExclusionList: parseImageCleanupExclusionList("ECS_EXCLUDE_UNTRACKED_IMAGE"), InstanceAttributes: instanceAttributes, CNIPluginsPath: os.Getenv("ECS_CNI_PLUGINS_PATH"), AWSVPCBlockInstanceMetdata: parseBooleanDefaultFalseConfig("ECS_AWSVPC_BLOCK_IMDS"), AWSVPCAdditionalLocalRoutes: additionalLocalRoutes, ContainerMetadataEnabled: parseBooleanDefaultFalseConfig("ECS_ENABLE_CONTAINER_METADATA"), DataDirOnHost: os.Getenv("ECS_HOST_DATA_DIR"), OverrideAWSLogsExecutionRole: parseBooleanDefaultFalseConfig("ECS_ENABLE_AWSLOGS_EXECUTIONROLE_OVERRIDE"), CgroupPath: os.Getenv("ECS_CGROUP_PATH"), TaskMetadataSteadyStateRate: steadyStateRate, TaskMetadataBurstRate: burstRate, SharedVolumeMatchFullConfig: parseBooleanDefaultFalseConfig("ECS_SHARED_VOLUME_MATCH_FULL_CONFIG"), ContainerInstanceTags: containerInstanceTags, ContainerInstancePropagateTagsFrom: parseContainerInstancePropagateTagsFrom(), PollMetrics: parseBooleanDefaultFalseConfig("ECS_POLL_METRICS"), PollingMetricsWaitDuration: parseEnvVariableDuration("ECS_POLLING_METRICS_WAIT_DURATION"), DisableDockerHealthCheck: parseBooleanDefaultFalseConfig("ECS_DISABLE_DOCKER_HEALTH_CHECK"), GPUSupportEnabled: utils.ParseBool(os.Getenv("ECS_ENABLE_GPU_SUPPORT"), false), InferentiaSupportEnabled: utils.ParseBool(os.Getenv("ECS_ENABLE_INF_SUPPORT"), false), NvidiaRuntime: os.Getenv("ECS_NVIDIA_RUNTIME"), TaskMetadataAZDisabled: utils.ParseBool(os.Getenv("ECS_DISABLE_TASK_METADATA_AZ"), false), CgroupCPUPeriod: parseCgroupCPUPeriod(), SpotInstanceDrainingEnabled: parseBooleanDefaultFalseConfig("ECS_ENABLE_SPOT_INSTANCE_DRAINING"), GMSACapable: parseGMSACapability(), VolumePluginCapabilities: parseVolumePluginCapabilities(), FSxWindowsFileServerCapable: parseFSxWindowsFileServerCapability(), External: parseBooleanDefaultFalseConfig("ECS_EXTERNAL"), EnableRuntimeStats: parseBooleanDefaultFalseConfig("ECS_ENABLE_RUNTIME_STATS"), ShouldExcludeIPv6PortBinding: parseBooleanDefaultTrueConfig("ECS_EXCLUDE_IPV6_PORTBINDING"), }, err } func ec2MetadataConfig(ec2client ec2.EC2MetadataClient) Config { iid, err := ec2client.InstanceIdentityDocument() if err != nil { seelog.Criticalf("Unable to communicate with EC2 Metadata service to infer region: %v", err.Error()) return Config{} } return Config{AWSRegion: iid.Region} } // String returns a lossy string representation of the config suitable for human readable display. // Consequently, it *should not* return any sensitive information. func (cfg *Config) String() string { return fmt.Sprintf( "Cluster: %v, "+ " Region: %v, "+ " DataDir: %v,"+ " Checkpoint: %v, "+ "AuthType: %v, "+ "UpdatesEnabled: %v, "+ "DisableMetrics: %v, "+ "PollMetrics: %v, "+ "PollingMetricsWaitDuration: %v, "+ "ReservedMem: %v, "+ "TaskCleanupWaitDuration: %v, "+ "DockerStopTimeout: %v, "+ "ContainerStartTimeout: %v, "+ "ContainerCreateTimeout: %v, "+ "DependentContainersPullUpfront: %v, "+ "TaskCPUMemLimit: %v, "+ "ShouldExcludeIPv6PortBinding: %v, "+ "%s", cfg.Cluster, cfg.AWSRegion, cfg.DataDir, cfg.Checkpoint, cfg.EngineAuthType, cfg.UpdatesEnabled, cfg.DisableMetrics, cfg.PollMetrics, cfg.PollingMetricsWaitDuration, cfg.ReservedMemory, cfg.TaskCleanupWaitDuration, cfg.DockerStopTimeout, cfg.ContainerStartTimeout, cfg.ContainerCreateTimeout, cfg.DependentContainersPullUpfront, cfg.TaskCPUMemLimit, cfg.ShouldExcludeIPv6PortBinding, cfg.platformString(), ) }
1
26,658
LGTM, curious though why exactly this is in place, and are there any possible issues from not waiting long enough?
aws-amazon-ecs-agent
go
@@ -104,7 +104,7 @@ module Beaker #cleanup phase rescue => e #cleanup on error - if @options[:preserve_hosts].to_s =~ /(never)/ + if @options[:preserve_hosts].to_s =~ /(?:never)|(?:onpass)/ @logger.notify "Cleanup: cleaning up after failed run" if @network_manager @network_manager.cleanup
1
module Beaker class CLI VERSION_STRING = " wWWWw |o o| | O | %s! |(\")| / \\X/ \\ | V | | | | " def initialize @timestamp = Time.now @options_parser = Beaker::Options::Parser.new @options = @options_parser.parse_args @logger = Beaker::Logger.new(@options) @options[:logger] = @logger @options[:timestamp] = @timestamp @execute = true if @options[:help] @logger.notify(@options_parser.usage) @execute = false return end if @options[:version] @logger.notify(VERSION_STRING % Beaker::Version::STRING) @execute = false return end @logger.info(@options.dump) if @options[:parse_only] @execute = false return end #add additional paths to the LOAD_PATH if not @options[:load_path].empty? @options[:load_path].each do |path| $LOAD_PATH << File.expand_path(path) end end @options[:helper].each do |helper| require File.expand_path(helper) end end #Provision, validate and configure all hosts as defined in the hosts file def provision begin @hosts = [] @network_manager = Beaker::NetworkManager.new(@options, @logger) @hosts = @network_manager.provision @network_manager.validate @network_manager.configure rescue => e report_and_raise(@logger, e, "CLI.provision") end end #Run Beaker tests. # # - provision hosts (includes validation and configuration) # - run pre-suite # - run tests # - run post-suite # - cleanup hosts def execute! if !@execute return end begin trap(:INT) do @logger.warn "Interrupt received; exiting..." exit(1) end provision # Setup perf monitoring if needed @perf = Beaker::Perf.new( @hosts, @options ) if @options[:collect_perf_data] errored = false #pre acceptance phase run_suite(:pre_suite, :fast) #testing phase begin run_suite(:tests) #post acceptance phase rescue => e #post acceptance on failure #run post-suite if we are in fail-slow mode if @options[:fail_mode].to_s =~ /slow/ run_suite(:post_suite) end raise e else #post acceptance on success run_suite(:post_suite) end #cleanup phase rescue => e #cleanup on error if @options[:preserve_hosts].to_s =~ /(never)/ @logger.notify "Cleanup: cleaning up after failed run" if @network_manager @network_manager.cleanup end end @perf.print_perf_info if @options[:collect_perf_data] print_reproduction_info( :error ) @logger.error "Failed running the test suite." puts '' exit 1 else #cleanup on success if @options[:preserve_hosts].to_s =~ /(never)|(onfail)/ @logger.notify "Cleanup: cleaning up after successful run" if @network_manager @network_manager.cleanup end end if @logger.is_debug? print_reproduction_info( :debug ) end @perf.print_perf_info if @options[:collect_perf_data] end end #Run the provided test suite #@param [Symbol] suite_name The test suite to execute #@param [String] failure_strategy How to proceed after a test failure, 'fast' = stop running tests immediately, 'slow' = # continue to execute tests. def run_suite(suite_name, failure_strategy = :slow) if (@options[suite_name].empty?) @logger.notify("No tests to run for suite '#{suite_name.to_s}'") return end Beaker::TestSuite.new( suite_name, @hosts, @options, @timestamp, failure_strategy ).run_and_raise_on_failure end # @see print_env_vars_affecting_beaker & print_command_line def print_reproduction_info( log_level = :debug ) print_command_line( log_level ) print_env_vars_affecting_beaker( log_level ) end # Prints Environment variables affecting the beaker run (those that # beaker introspects + the ruby env that beaker runs within) # @param [Symbol] log_level The log level (coloring) to print the message at # @example Print pertinent env vars using error leve reporting (red) # print_env_vars_affecting_beaker :error # # @return nil def print_env_vars_affecting_beaker( log_level ) beaker_env_vars = Beaker::Options::Presets::ENVIRONMENT_SPEC.values non_beaker_env_vars = [ 'BUNDLE_PATH', 'BUNDLE_BIN', 'GEM_HOME', 'GEM_PATH', 'RUBYLIB', 'PATH'] important_env_vars = beaker_env_vars + non_beaker_env_vars env_var_map = important_env_vars.inject({}) do |memo, possibly_set_vars| set_var = Array(possibly_set_vars).detect {|possible_var| ENV[possible_var] } memo[set_var] = ENV[set_var] if set_var memo end puts '' @logger.send( log_level, "Important ENV variables that may have affected your run:" ) env_var_map.each_pair do |var, value| @logger.send( log_level, " #{var}\t\t#{value}" ) end puts '' end # Prints the command line that can be called to reproduce this run # (assuming the environment is the same) # @param [Symbol] log_level The log level (coloring) to print the message at # @example Print pertinent env vars using error leve reporting (red) # print_command_line :error # # @return nil def print_command_line( log_level = :debug ) puts '' @logger.send(log_level, "You can reproduce this run with:\n") @logger.send(log_level, @options[:command_line]) puts '' end end end
1
7,410
?: is unnecessary because we already force preserve_hosts to be a string with to_s.
voxpupuli-beaker
rb
@@ -76,6 +76,8 @@ public abstract class NewSessionQueuer implements HasReadyState, Routable { .with(requiresSecret), get("/se/grid/newsessionqueuer/queue/size") .to(() -> new GetNewSessionQueueSize(tracer, this)), + get("/se/grid/newsessionqueue") + .to(() -> new GetSessionQueue(tracer, this)), delete("/se/grid/newsessionqueuer/queue") .to(() -> new ClearSessionQueue(tracer, this)) .with(requiresSecret));
1
// Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package org.openqa.selenium.grid.sessionqueue; import static org.openqa.selenium.remote.http.Contents.reader; import static org.openqa.selenium.remote.http.Route.combine; import static org.openqa.selenium.remote.http.Route.delete; import static org.openqa.selenium.remote.http.Route.get; import static org.openqa.selenium.remote.http.Route.post; import static org.openqa.selenium.remote.tracing.Tags.EXCEPTION; import org.openqa.selenium.Capabilities; import org.openqa.selenium.SessionNotCreatedException; import org.openqa.selenium.grid.data.RequestId; import org.openqa.selenium.grid.security.RequiresSecretFilter; import org.openqa.selenium.grid.security.Secret; import org.openqa.selenium.internal.Require; import org.openqa.selenium.remote.NewSessionPayload; import org.openqa.selenium.remote.http.HttpRequest; import org.openqa.selenium.remote.http.HttpResponse; import org.openqa.selenium.remote.http.Routable; import org.openqa.selenium.remote.http.Route; import org.openqa.selenium.remote.tracing.AttributeKey; import org.openqa.selenium.remote.tracing.EventAttribute; import org.openqa.selenium.remote.tracing.EventAttributeValue; import org.openqa.selenium.remote.tracing.Span; import org.openqa.selenium.remote.tracing.Tracer; import org.openqa.selenium.status.HasReadyState; import java.io.IOException; import java.io.Reader; import java.util.HashMap; import java.util.Iterator; import java.util.Map; import java.util.Objects; import java.util.Optional; import java.util.UUID; public abstract class NewSessionQueuer implements HasReadyState, Routable { private final Route routes; protected final Tracer tracer; protected NewSessionQueuer(Tracer tracer, Secret registrationSecret) { this.tracer = Require.nonNull("Tracer", tracer); Require.nonNull("Registration secret", registrationSecret); RequiresSecretFilter requiresSecret = new RequiresSecretFilter(registrationSecret); routes = combine( post("/session") .to(() -> this::addToQueue), post("/se/grid/newsessionqueuer/session") .to(() -> new AddToSessionQueue(tracer, this)), post("/se/grid/newsessionqueuer/session/retry/{requestId}") .to(params -> new AddBackToSessionQueue(tracer, this, requestIdFrom(params))) .with(requiresSecret), get("/se/grid/newsessionqueuer/session/{requestId}") .to(params -> new RemoveFromSessionQueue(tracer, this, requestIdFrom(params))) .with(requiresSecret), get("/se/grid/newsessionqueuer/queue/size") .to(() -> new GetNewSessionQueueSize(tracer, this)), delete("/se/grid/newsessionqueuer/queue") .to(() -> new ClearSessionQueue(tracer, this)) .with(requiresSecret)); } private RequestId requestIdFrom(Map<String, String> params) { return new RequestId(UUID.fromString(params.get("requestId"))); } public void validateSessionRequest(HttpRequest request) { try (Span span = tracer.getCurrentContext().createSpan("newsession_queuer.validate")) { Map<String, EventAttributeValue> attributeMap = new HashMap<>(); try ( Reader reader = reader(request); NewSessionPayload payload = NewSessionPayload.create(reader)) { Objects.requireNonNull(payload, "Requests to process must be set."); attributeMap.put("request.payload", EventAttribute.setValue(payload.toString())); Iterator<Capabilities> iterator = payload.stream().iterator(); if (!iterator.hasNext()) { SessionNotCreatedException exception = new SessionNotCreatedException("No capabilities found"); EXCEPTION.accept(attributeMap, exception); attributeMap.put( AttributeKey.EXCEPTION_MESSAGE.getKey(), EventAttribute.setValue(exception.getMessage())); span.addEvent(AttributeKey.EXCEPTION_EVENT.getKey(), attributeMap); throw exception; } } catch (IOException e) { SessionNotCreatedException exception = new SessionNotCreatedException(e.getMessage(), e); EXCEPTION.accept(attributeMap, exception); String errorMessage = "IOException while reading the request payload. " + exception.getMessage(); attributeMap.put( AttributeKey.EXCEPTION_MESSAGE.getKey(), EventAttribute.setValue(errorMessage)); span.addEvent(AttributeKey.EXCEPTION_EVENT.getKey(), attributeMap); throw exception; } } } public abstract HttpResponse addToQueue(HttpRequest request); public abstract boolean retryAddToQueue(HttpRequest request, RequestId reqId); public abstract Optional<HttpRequest> remove(RequestId reqId); public abstract int clearQueue(); public abstract int getQueueSize(); @Override public boolean matches(HttpRequest req) { return routes.matches(req); } @Override public HttpResponse execute(HttpRequest req) { return routes.execute(req); } }
1
18,614
should it be `newsessionqueue` or `newsessionqueuer`? In case we'd like to be consistent
SeleniumHQ-selenium
rb
@@ -106,8 +106,8 @@ describe('debug with suspense', () => { return loader.then(() => { rerender(); - expect(console.warn).to.be.calledTwice; - expect(warnings[1].includes('MyLazyLoaded')).to.equal(true); + expect(console.warn).to.be.calledThrice; + expect(warnings[2].includes('MyLazyLoaded')).to.equal(true); expect(serializeHtml(scratch)).to.equal('<div>Hi there</div>'); }); });
1
import { createElement, render, lazy, Suspense } from 'preact/compat'; import 'preact/debug'; import { setupRerender } from 'preact/test-utils'; import { setupScratch, teardown, serializeHtml } from '../../../test/_util/helpers'; /** @jsx createElement */ describe('debug with suspense', () => { /** @type {HTMLDivElement} */ let scratch; let rerender; let errors = []; let warnings = []; beforeEach(() => { errors = []; warnings = []; scratch = setupScratch(); rerender = setupRerender(); sinon.stub(console, 'error').callsFake(e => errors.push(e)); sinon.stub(console, 'warn').callsFake(w => warnings.push(w)); }); afterEach(() => { console.error.restore(); console.warn.restore(); teardown(scratch); }); it('should throw on missing <Suspense>', () => { function Foo() { throw Promise.resolve(); } expect(() => render(<Foo />, scratch)).to.throw; }); it('should throw an error when using lazy and missing Suspense', () => { const Foo = () => <div>Foo</div>; const LazyComp = lazy( () => new Promise(resolve => resolve({ default: Foo })) ); const fn = () => { render(<LazyComp />, scratch); }; expect(fn).to.throw(/Missing Suspense/gi); }); describe('PropTypes', () => { it('should validate propTypes inside lazy()', () => { function Baz(props) { return <h1>{props.unhappy}</h1>; } Baz.propTypes = { unhappy: function alwaysThrows(obj, key) { if (obj[key] === 'signal') throw Error('got prop inside lazy()'); } }; const loader = Promise.resolve({ default: Baz }); const LazyBaz = lazy(() => loader); const suspense = ( <Suspense fallback={<div>fallback...</div>}> <LazyBaz unhappy="signal" /> </Suspense> ); render(suspense, scratch); rerender(); // render fallback expect(console.error).to.not.be.called; expect(serializeHtml(scratch)).to.equal('<div>fallback...</div>'); return loader.then(() => { rerender(); expect(errors.length).to.equal(1); expect(errors[0].includes('got prop')).to.equal(true); expect(serializeHtml(scratch)).to.equal('<h1>signal</h1>'); }); }); describe('warn for PropTypes on lazy()', () => { it('should log the function name', () => { const loader = Promise.resolve({ default: function MyLazyLoaded() { return <div>Hi there</div>; } }); const FakeLazy = lazy(() => loader); FakeLazy.propTypes = {}; const suspense = ( <Suspense fallback={<div>fallback...</div>}> <FakeLazy /> </Suspense> ); render(suspense, scratch); rerender(); // Render fallback expect(serializeHtml(scratch)).to.equal('<div>fallback...</div>'); return loader.then(() => { rerender(); expect(console.warn).to.be.calledTwice; expect(warnings[1].includes('MyLazyLoaded')).to.equal(true); expect(serializeHtml(scratch)).to.equal('<div>Hi there</div>'); }); }); it('should log the displayName', () => { function MyLazyLoadedComponent() { return <div>Hi there</div>; } MyLazyLoadedComponent.displayName = 'HelloLazy'; const loader = Promise.resolve({ default: MyLazyLoadedComponent }); const FakeLazy = lazy(() => loader); FakeLazy.propTypes = {}; const suspense = ( <Suspense fallback={<div>fallback...</div>}> <FakeLazy /> </Suspense> ); render(suspense, scratch); rerender(); // Render fallback expect(serializeHtml(scratch)).to.equal('<div>fallback...</div>'); return loader.then(() => { rerender(); expect(console.warn).to.be.calledTwice; expect(warnings[1].includes('HelloLazy')).to.equal(true); expect(serializeHtml(scratch)).to.equal('<div>Hi there</div>'); }); }); it("should not log a component if lazy loader's Promise rejects", () => { const loader = Promise.reject(new Error('Hey there')); const FakeLazy = lazy(() => loader); FakeLazy.propTypes = {}; render( <Suspense fallback={<div>fallback...</div>}> <FakeLazy /> </Suspense>, scratch ); rerender(); // Render fallback expect(serializeHtml(scratch)).to.equal('<div>fallback...</div>'); return loader.catch(() => { try { rerender(); } catch (e) { // Ignore the loader's bubbling error } // Called once on initial render, and again when promise rejects expect(console.warn).to.be.calledTwice; }); }); it("should not log a component if lazy's loader throws", () => { const FakeLazy = lazy(() => { throw new Error('Hello'); }); FakeLazy.propTypes = {}; let error; try { render( <Suspense fallback={<div>fallback...</div>}> <FakeLazy /> </Suspense>, scratch ); } catch (e) { error = e; } expect(console.warn).to.be.calledOnce; expect(error).not.to.be.undefined; expect(error.message).to.eql('Hello'); }); }); }); });
1
16,856
Since lazy is re-rendered when mounting the fallback, these checks get triggered an additional time.
preactjs-preact
js
@@ -173,6 +173,10 @@ class AbstractWebElement(collections.abc.MutableMapping): except KeyError: return False + def is_content_editable_prop(self) -> bool: + """Get the value of this element's isContentEditable property.""" + raise NotImplementedError + def _is_editable_object(self) -> bool: """Check if an object-element is editable.""" if 'type' not in self:
1
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2014-2019 Florian Bruhin (The Compiler) <[email protected]> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. """Generic web element related code.""" import typing import collections.abc from PyQt5.QtCore import QUrl, Qt, QEvent, QTimer, QRect, QPoint from PyQt5.QtGui import QMouseEvent from qutebrowser.config import config from qutebrowser.keyinput import modeman from qutebrowser.mainwindow import mainwindow from qutebrowser.utils import log, usertypes, utils, qtutils, objreg MYPY = False if MYPY: # pylint: disable=unused-import,useless-suppression from qutebrowser.browser import browsertab JsValueType = typing.Union[int, float, str, None] class Error(Exception): """Base class for WebElement errors.""" class OrphanedError(Error): """Raised when a webelement's parent has vanished.""" def css_selector(group: str, url: QUrl) -> str: """Get a CSS selector for the given group/URL.""" selectors = config.instance.get('hints.selectors', url) if group not in selectors: selectors = config.val.hints.selectors if group not in selectors: raise Error("Undefined hinting group {!r}".format(group)) return ','.join(selectors[group]) class AbstractWebElement(collections.abc.MutableMapping): """A wrapper around QtWebKit/QtWebEngine web element.""" def __init__(self, tab: 'browsertab.AbstractTab') -> None: self._tab = tab def __eq__(self, other: object) -> bool: raise NotImplementedError def __str__(self) -> str: raise NotImplementedError def __getitem__(self, key: str) -> str: raise NotImplementedError def __setitem__(self, key: str, val: str) -> None: raise NotImplementedError def __delitem__(self, key: str) -> None: raise NotImplementedError def __iter__(self) -> typing.Iterator[str]: raise NotImplementedError def __len__(self) -> int: raise NotImplementedError def __repr__(self) -> str: try: html = utils.compact_text(self.outer_xml(), 500) except Error: html = None return utils.get_repr(self, html=html) def has_frame(self) -> bool: """Check if this element has a valid frame attached.""" raise NotImplementedError def geometry(self) -> QRect: """Get the geometry for this element.""" raise NotImplementedError def classes(self) -> typing.List[str]: """Get a list of classes assigned to this element.""" raise NotImplementedError def tag_name(self) -> str: """Get the tag name of this element. The returned name will always be lower-case. """ raise NotImplementedError def outer_xml(self) -> str: """Get the full HTML representation of this element.""" raise NotImplementedError def value(self) -> JsValueType: """Get the value attribute for this element, or None.""" raise NotImplementedError def set_value(self, value: JsValueType) -> None: """Set the element value.""" raise NotImplementedError def dispatch_event(self, event: str, bubbles: bool = False, cancelable: bool = False, composed: bool = False) -> None: """Dispatch an event to the element. Args: bubbles: Whether this event should bubble. cancelable: Whether this event can be cancelled. composed: Whether the event will trigger listeners outside of a shadow root. """ raise NotImplementedError def insert_text(self, text: str) -> None: """Insert the given text into the element.""" raise NotImplementedError def rect_on_view(self, *, elem_geometry: QRect = None, no_js: bool = False) -> QRect: """Get the geometry of the element relative to the webview. Args: elem_geometry: The geometry of the element, or None. no_js: Fall back to the Python implementation. """ raise NotImplementedError def is_writable(self) -> bool: """Check whether an element is writable.""" return not ('disabled' in self or 'readonly' in self) def is_content_editable(self) -> bool: """Check if an element has a contenteditable attribute. Args: elem: The QWebElement to check. Return: True if the element has a contenteditable attribute, False otherwise. """ try: return self['contenteditable'].lower() not in ['false', 'inherit'] except KeyError: return False def _is_editable_object(self) -> bool: """Check if an object-element is editable.""" if 'type' not in self: log.webelem.debug("<object> without type clicked...") return False objtype = self['type'].lower() if objtype.startswith('application/') or 'classid' in self: # Let's hope flash/java stuff has an application/* mimetype OR # at least a classid attribute. Oh, and let's hope images/... # DON'T have a classid attribute. HTML sucks. log.webelem.debug("<object type='{}'> clicked.".format(objtype)) return config.val.input.insert_mode.plugins else: # Image/Audio/... return False def _is_editable_input(self) -> bool: """Check if an input-element is editable. Return: True if the element is editable, False otherwise. """ try: objtype = self['type'].lower() except KeyError: return self.is_writable() else: if objtype in ['text', 'email', 'url', 'tel', 'number', 'password', 'search']: return self.is_writable() else: return False def _is_editable_classes(self) -> bool: """Check if an element is editable based on its classes. Return: True if the element is editable, False otherwise. """ # Beginnings of div-classes which are actually some kind of editor. classes = { 'div': ['CodeMirror', # Javascript editor over a textarea 'kix-', # Google Docs editor 'ace_'], # http://ace.c9.io/ 'pre': ['CodeMirror'], } relevant_classes = classes[self.tag_name()] for klass in self.classes(): if any(klass.strip().startswith(e) for e in relevant_classes): return True return False def is_editable(self, strict: bool = False) -> bool: """Check whether we should switch to insert mode for this element. Args: strict: Whether to do stricter checking so only fields where we can get the value match, for use with the :editor command. Return: True if we should switch to insert mode, False otherwise. """ roles = ('combobox', 'textbox') log.webelem.debug("Checking if element is editable: {}".format( repr(self))) tag = self.tag_name() if self.is_content_editable() and self.is_writable(): return True elif self.get('role', None) in roles and self.is_writable(): return True elif tag == 'input': return self._is_editable_input() elif tag == 'textarea': return self.is_writable() elif tag in ['embed', 'applet']: # Flash/Java/... return config.val.input.insert_mode.plugins and not strict elif tag == 'object': return self._is_editable_object() and not strict elif tag in ['div', 'pre']: return self._is_editable_classes() and not strict return False def is_text_input(self) -> bool: """Check if this element is some kind of text box.""" roles = ('combobox', 'textbox') tag = self.tag_name() return self.get('role', None) in roles or tag in ['input', 'textarea'] def remove_blank_target(self) -> None: """Remove target from link.""" raise NotImplementedError def resolve_url(self, baseurl: QUrl) -> typing.Optional[QUrl]: """Resolve the URL in the element's src/href attribute. Args: baseurl: The URL to base relative URLs on as QUrl. Return: A QUrl with the absolute URL, or None. """ if baseurl.isRelative(): raise ValueError("Need an absolute base URL!") for attr in ['href', 'src']: if attr in self: text = self[attr].strip() break else: return None url = QUrl(text) if not url.isValid(): return None if url.isRelative(): url = baseurl.resolved(url) qtutils.ensure_valid(url) return url def is_link(self) -> bool: """Return True if this AbstractWebElement is a link.""" href_tags = ['a', 'area', 'link'] return self.tag_name() in href_tags and 'href' in self def _requires_user_interaction(self) -> bool: """Return True if clicking this element needs user interaction.""" raise NotImplementedError def _mouse_pos(self) -> QPoint: """Get the position to click/hover.""" # Click the center of the largest square fitting into the top/left # corner of the rectangle, this will help if part of the <a> element # is hidden behind other elements # https://github.com/qutebrowser/qutebrowser/issues/1005 rect = self.rect_on_view() if rect.width() > rect.height(): rect.setWidth(rect.height()) else: rect.setHeight(rect.width()) pos = rect.center() if pos.x() < 0 or pos.y() < 0: raise Error("Element position is out of view!") return pos def _move_text_cursor(self) -> None: """Move cursor to end after clicking.""" raise NotImplementedError def _click_fake_event(self, click_target: usertypes.ClickTarget) -> None: """Send a fake click event to the element.""" pos = self._mouse_pos() log.webelem.debug("Sending fake click to {!r} at position {} with " "target {}".format(self, pos, click_target)) target_modifiers = { usertypes.ClickTarget.normal: Qt.NoModifier, usertypes.ClickTarget.window: Qt.AltModifier | Qt.ShiftModifier, usertypes.ClickTarget.tab: Qt.ControlModifier, usertypes.ClickTarget.tab_bg: Qt.ControlModifier, } if config.val.tabs.background: target_modifiers[usertypes.ClickTarget.tab] |= Qt.ShiftModifier else: target_modifiers[usertypes.ClickTarget.tab_bg] |= Qt.ShiftModifier modifiers = typing.cast(Qt.KeyboardModifiers, target_modifiers[click_target]) events = [ QMouseEvent(QEvent.MouseMove, pos, Qt.NoButton, Qt.NoButton, Qt.NoModifier), QMouseEvent(QEvent.MouseButtonPress, pos, Qt.LeftButton, Qt.LeftButton, modifiers), QMouseEvent(QEvent.MouseButtonRelease, pos, Qt.LeftButton, Qt.NoButton, modifiers), ] for evt in events: self._tab.send_event(evt) QTimer.singleShot(0, self._move_text_cursor) def _click_editable(self, click_target: usertypes.ClickTarget) -> None: """Fake a click on an editable input field.""" raise NotImplementedError def _click_js(self, click_target: usertypes.ClickTarget) -> None: """Fake a click by using the JS .click() method.""" raise NotImplementedError def _click_href(self, click_target: usertypes.ClickTarget) -> None: """Fake a click on an element with a href by opening the link.""" baseurl = self._tab.url() url = self.resolve_url(baseurl) if url is None: self._click_fake_event(click_target) return tabbed_browser = objreg.get('tabbed-browser', scope='window', window=self._tab.win_id) if click_target in [usertypes.ClickTarget.tab, usertypes.ClickTarget.tab_bg]: background = click_target == usertypes.ClickTarget.tab_bg tabbed_browser.tabopen(url, background=background) elif click_target == usertypes.ClickTarget.window: window = mainwindow.MainWindow(private=tabbed_browser.is_private) window.show() window.tabbed_browser.tabopen(url) else: raise ValueError("Unknown ClickTarget {}".format(click_target)) def click(self, click_target: usertypes.ClickTarget, *, force_event: bool = False) -> None: """Simulate a click on the element. Args: click_target: A usertypes.ClickTarget member, what kind of click to simulate. force_event: Force generating a fake mouse event. """ log.webelem.debug("Clicking {!r} with click_target {}, force_event {}" .format(self, click_target, force_event)) if force_event: self._click_fake_event(click_target) return if click_target == usertypes.ClickTarget.normal: if self.is_link() and not self._requires_user_interaction(): log.webelem.debug("Clicking via JS click()") self._click_js(click_target) elif self.is_editable(strict=True): log.webelem.debug("Clicking via JS focus()") self._click_editable(click_target) if config.val.input.insert_mode.auto_enter: modeman.enter(self._tab.win_id, usertypes.KeyMode.insert, 'clicking input') else: self._click_fake_event(click_target) elif click_target in [usertypes.ClickTarget.tab, usertypes.ClickTarget.tab_bg, usertypes.ClickTarget.window]: if self.is_link(): self._click_href(click_target) else: self._click_fake_event(click_target) else: raise ValueError("Unknown ClickTarget {}".format(click_target)) def hover(self) -> None: """Simulate a mouse hover over the element.""" pos = self._mouse_pos() event = QMouseEvent(QEvent.MouseMove, pos, Qt.NoButton, Qt.NoButton, Qt.NoModifier) self._tab.send_event(event)
1
23,471
There is a `is_content_editable()` method just above this, what's the difference?
qutebrowser-qutebrowser
py
@@ -86,7 +86,7 @@ type PrometheusList struct { type PrometheusSpec struct { // PodMetadata configures Labels and Annotations which are propagated to the prometheus pods. PodMetadata *EmbeddedObjectMetadata `json:"podMetadata,omitempty"` - // ServiceMonitors to be selected for target discovery. *Deprecated:* if + // ServiceMonitors to be selected for target discovery. *Warning:* if // neither this nor podMonitorSelector are specified, configuration is // unmanaged. ServiceMonitorSelector *metav1.LabelSelector `json:"serviceMonitorSelector,omitempty"`
1
// Copyright 2018 The prometheus-operator Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package v1 import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" ) const ( Version = "v1" PrometheusesKind = "Prometheus" PrometheusName = "prometheuses" PrometheusKindKey = "prometheus" AlertmanagersKind = "Alertmanager" AlertmanagerName = "alertmanagers" AlertManagerKindKey = "alertmanager" ServiceMonitorsKind = "ServiceMonitor" ServiceMonitorName = "servicemonitors" ServiceMonitorKindKey = "servicemonitor" PodMonitorsKind = "PodMonitor" PodMonitorName = "podmonitors" PodMonitorKindKey = "podmonitor" PrometheusRuleKind = "PrometheusRule" PrometheusRuleName = "prometheusrules" PrometheusRuleKindKey = "prometheusrule" ProbesKind = "Probe" ProbeName = "probes" ProbeKindKey = "probe" ) // Prometheus defines a Prometheus deployment. // +genclient // +k8s:openapi-gen=true // +kubebuilder:resource:categories="prometheus-operator" // +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version",description="The version of Prometheus" // +kubebuilder:printcolumn:name="Replicas",type="integer",JSONPath=".spec.replicas",description="The desired replicas number of Prometheuses" // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" type Prometheus struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` // Specification of the desired behavior of the Prometheus cluster. More info: // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status Spec PrometheusSpec `json:"spec"` // Most recent observed status of the Prometheus cluster. Read-only. Not // included when requesting from the apiserver, only from the Prometheus // Operator API itself. More info: // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status Status *PrometheusStatus `json:"status,omitempty"` } // PrometheusList is a list of Prometheuses. // +k8s:openapi-gen=true type PrometheusList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata // More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty"` // List of Prometheuses Items []*Prometheus `json:"items"` } // PrometheusSpec is a specification of the desired behavior of the Prometheus cluster. More info: // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +k8s:openapi-gen=true type PrometheusSpec struct { // PodMetadata configures Labels and Annotations which are propagated to the prometheus pods. PodMetadata *EmbeddedObjectMetadata `json:"podMetadata,omitempty"` // ServiceMonitors to be selected for target discovery. *Deprecated:* if // neither this nor podMonitorSelector are specified, configuration is // unmanaged. ServiceMonitorSelector *metav1.LabelSelector `json:"serviceMonitorSelector,omitempty"` // Namespace's labels to match for ServiceMonitor discovery. If nil, only // check own namespace. ServiceMonitorNamespaceSelector *metav1.LabelSelector `json:"serviceMonitorNamespaceSelector,omitempty"` // *Experimental* PodMonitors to be selected for target discovery. // *Deprecated:* if neither this nor serviceMonitorSelector are specified, // configuration is unmanaged. PodMonitorSelector *metav1.LabelSelector `json:"podMonitorSelector,omitempty"` // Namespace's labels to match for PodMonitor discovery. If nil, only // check own namespace. PodMonitorNamespaceSelector *metav1.LabelSelector `json:"podMonitorNamespaceSelector,omitempty"` // *Experimental* Probes to be selected for target discovery. ProbeSelector *metav1.LabelSelector `json:"probeSelector,omitempty"` // *Experimental* Namespaces to be selected for Probe discovery. If nil, only check own namespace. ProbeNamespaceSelector *metav1.LabelSelector `json:"probeNamespaceSelector,omitempty"` // Version of Prometheus to be deployed. Version string `json:"version,omitempty"` // Tag of Prometheus container image to be deployed. Defaults to the value of `version`. // Version is ignored if Tag is set. // Deprecated: use 'image' instead. The image tag can be specified // as part of the image URL. Tag string `json:"tag,omitempty"` // SHA of Prometheus container image to be deployed. Defaults to the value of `version`. // Similar to a tag, but the SHA explicitly deploys an immutable container image. // Version and Tag are ignored if SHA is set. // Deprecated: use 'image' instead. The image digest can be specified // as part of the image URL. SHA string `json:"sha,omitempty"` // When a Prometheus deployment is paused, no actions except for deletion // will be performed on the underlying objects. Paused bool `json:"paused,omitempty"` // Image if specified has precedence over baseImage, tag and sha // combinations. Specifying the version is still necessary to ensure the // Prometheus Operator knows what version of Prometheus is being // configured. Image *string `json:"image,omitempty"` // Base image to use for a Prometheus deployment. // Deprecated: use 'image' instead BaseImage string `json:"baseImage,omitempty"` // An optional list of references to secrets in the same namespace // to use for pulling prometheus and alertmanager images from registries // see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod ImagePullSecrets []v1.LocalObjectReference `json:"imagePullSecrets,omitempty"` // Number of replicas of each shard to deploy for a Prometheus deployment. // Number of replicas multiplied by shards is the total number of Pods // created. Replicas *int32 `json:"replicas,omitempty"` // EXPERIMENTAL: Number of shards to distribute targets onto. Number of // replicas multiplied by shards is the total number of Pods created. Note // that scaling down shards will not reshard data onto remaining instances, // it must be manually moved. Increasing shards will not reshard data // either but it will continue to be available from the same instances. To // query globally use Thanos sidecar and Thanos querier or remote write // data to a central location. Sharding is done on the content of the // `__address__` target meta-label. Shards *int32 `json:"shards,omitempty"` // Name of Prometheus external label used to denote replica name. // Defaults to the value of `prometheus_replica`. External label will // _not_ be added when value is set to empty string (`""`). ReplicaExternalLabelName *string `json:"replicaExternalLabelName,omitempty"` // Name of Prometheus external label used to denote Prometheus instance // name. Defaults to the value of `prometheus`. External label will // _not_ be added when value is set to empty string (`""`). PrometheusExternalLabelName *string `json:"prometheusExternalLabelName,omitempty"` // Time duration Prometheus shall retain data for. Default is '24h', // and must match the regular expression `[0-9]+(ms|s|m|h|d|w|y)` (milliseconds seconds minutes hours days weeks years). Retention string `json:"retention,omitempty"` // Maximum amount of disk space used by blocks. Supported units: B, KB, MB, GB, TB, PB, EB. Ex: `512MB`. RetentionSize string `json:"retentionSize,omitempty"` // Disable prometheus compaction. DisableCompaction bool `json:"disableCompaction,omitempty"` // Enable compression of the write-ahead log using Snappy. This flag is // only available in versions of Prometheus >= 2.11.0. WALCompression *bool `json:"walCompression,omitempty"` // Log level for Prometheus to be configured with. LogLevel string `json:"logLevel,omitempty"` // Log format for Prometheus to be configured with. LogFormat string `json:"logFormat,omitempty"` // Interval between consecutive scrapes. ScrapeInterval string `json:"scrapeInterval,omitempty"` // Number of seconds to wait for target to respond before erroring. ScrapeTimeout string `json:"scrapeTimeout,omitempty"` // Interval between consecutive evaluations. EvaluationInterval string `json:"evaluationInterval,omitempty"` // /--rules.*/ command-line arguments. Rules Rules `json:"rules,omitempty"` // The labels to add to any time series or alerts when communicating with // external systems (federation, remote storage, Alertmanager). ExternalLabels map[string]string `json:"externalLabels,omitempty"` // Enable access to prometheus web admin API. Defaults to the value of `false`. // WARNING: Enabling the admin APIs enables mutating endpoints, to delete data, // shutdown Prometheus, and more. Enabling this should be done with care and the // user is advised to add additional authentication authorization via a proxy to // ensure only clients authorized to perform these actions can do so. // For more information see https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-admin-apis EnableAdminAPI bool `json:"enableAdminAPI,omitempty"` // The external URL the Prometheus instances will be available under. This is // necessary to generate correct URLs. This is necessary if Prometheus is not // served from root of a DNS name. ExternalURL string `json:"externalUrl,omitempty"` // The route prefix Prometheus registers HTTP handlers for. This is useful, // if using ExternalURL and a proxy is rewriting HTTP routes of a request, // and the actual ExternalURL is still true, but the server serves requests // under a different route prefix. For example for use with `kubectl proxy`. RoutePrefix string `json:"routePrefix,omitempty"` // QuerySpec defines the query command line flags when starting Prometheus. Query *QuerySpec `json:"query,omitempty"` // Storage spec to specify how storage shall be used. Storage *StorageSpec `json:"storage,omitempty"` // Volumes allows configuration of additional volumes on the output StatefulSet definition. Volumes specified will // be appended to other volumes that are generated as a result of StorageSpec objects. Volumes []v1.Volume `json:"volumes,omitempty"` // VolumeMounts allows configuration of additional VolumeMounts on the output StatefulSet definition. // VolumeMounts specified will be appended to other VolumeMounts in the prometheus container, // that are generated as a result of StorageSpec objects. VolumeMounts []v1.VolumeMount `json:"volumeMounts,omitempty"` // WebSpec defines the web command line flags when starting Prometheus. Web *WebSpec `json:"web,omitempty"` // A selector to select which PrometheusRules to mount for loading alerting/recording // rules from. Until (excluding) Prometheus Operator v0.24.0 Prometheus // Operator will migrate any legacy rule ConfigMaps to PrometheusRule custom // resources selected by RuleSelector. Make sure it does not match any config // maps that you do not want to be migrated. RuleSelector *metav1.LabelSelector `json:"ruleSelector,omitempty"` // Namespaces to be selected for PrometheusRules discovery. If unspecified, only // the same namespace as the Prometheus object is in is used. RuleNamespaceSelector *metav1.LabelSelector `json:"ruleNamespaceSelector,omitempty"` // Define details regarding alerting. Alerting *AlertingSpec `json:"alerting,omitempty"` // Define resources requests and limits for single Pods. Resources v1.ResourceRequirements `json:"resources,omitempty"` // Define which Nodes the Pods are scheduled on. NodeSelector map[string]string `json:"nodeSelector,omitempty"` // ServiceAccountName is the name of the ServiceAccount to use to run the // Prometheus Pods. ServiceAccountName string `json:"serviceAccountName,omitempty"` // Secrets is a list of Secrets in the same namespace as the Prometheus // object, which shall be mounted into the Prometheus Pods. // The Secrets are mounted into /etc/prometheus/secrets/<secret-name>. Secrets []string `json:"secrets,omitempty"` // ConfigMaps is a list of ConfigMaps in the same namespace as the Prometheus // object, which shall be mounted into the Prometheus Pods. // The ConfigMaps are mounted into /etc/prometheus/configmaps/<configmap-name>. ConfigMaps []string `json:"configMaps,omitempty"` // If specified, the pod's scheduling constraints. Affinity *v1.Affinity `json:"affinity,omitempty"` // If specified, the pod's tolerations. Tolerations []v1.Toleration `json:"tolerations,omitempty"` // If specified, the pod's topology spread constraints. TopologySpreadConstraints []v1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` // If specified, the remote_write spec. This is an experimental feature, it may change in any upcoming release in a breaking way. RemoteWrite []RemoteWriteSpec `json:"remoteWrite,omitempty"` // If specified, the remote_read spec. This is an experimental feature, it may change in any upcoming release in a breaking way. RemoteRead []RemoteReadSpec `json:"remoteRead,omitempty"` // SecurityContext holds pod-level security attributes and common container settings. // This defaults to the default PodSecurityContext. SecurityContext *v1.PodSecurityContext `json:"securityContext,omitempty"` // ListenLocal makes the Prometheus server listen on loopback, so that it // does not bind against the Pod IP. ListenLocal bool `json:"listenLocal,omitempty"` // Containers allows injecting additional containers or modifying operator // generated containers. This can be used to allow adding an authentication // proxy to a Prometheus pod or to change the behavior of an operator // generated container. Containers described here modify an operator // generated container if they share the same name and modifications are // done via a strategic merge patch. The current container names are: // `prometheus`, `config-reloader`, and `thanos-sidecar`. Overriding // containers is entirely outside the scope of what the maintainers will // support and by doing so, you accept that this behaviour may break at any // time without notice. Containers []v1.Container `json:"containers,omitempty"` // InitContainers allows adding initContainers to the pod definition. Those can be used to e.g. // fetch secrets for injection into the Prometheus configuration from external sources. Any errors // during the execution of an initContainer will lead to a restart of the Pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ // Using initContainers for any use case other then secret fetching is entirely outside the scope // of what the maintainers will support and by doing so, you accept that this behaviour may break // at any time without notice. InitContainers []v1.Container `json:"initContainers,omitempty"` // AdditionalScrapeConfigs allows specifying a key of a Secret containing // additional Prometheus scrape configurations. Scrape configurations // specified are appended to the configurations generated by the Prometheus // Operator. Job configurations specified must have the form as specified // in the official Prometheus documentation: // https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config. // As scrape configs are appended, the user is responsible to make sure it // is valid. Note that using this feature may expose the possibility to // break upgrades of Prometheus. It is advised to review Prometheus release // notes to ensure that no incompatible scrape configs are going to break // Prometheus after the upgrade. AdditionalScrapeConfigs *v1.SecretKeySelector `json:"additionalScrapeConfigs,omitempty"` // AdditionalAlertRelabelConfigs allows specifying a key of a Secret containing // additional Prometheus alert relabel configurations. Alert relabel configurations // specified are appended to the configurations generated by the Prometheus // Operator. Alert relabel configurations specified must have the form as specified // in the official Prometheus documentation: // https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs. // As alert relabel configs are appended, the user is responsible to make sure it // is valid. Note that using this feature may expose the possibility to // break upgrades of Prometheus. It is advised to review Prometheus release // notes to ensure that no incompatible alert relabel configs are going to break // Prometheus after the upgrade. AdditionalAlertRelabelConfigs *v1.SecretKeySelector `json:"additionalAlertRelabelConfigs,omitempty"` // AdditionalAlertManagerConfigs allows specifying a key of a Secret containing // additional Prometheus AlertManager configurations. AlertManager configurations // specified are appended to the configurations generated by the Prometheus // Operator. Job configurations specified must have the form as specified // in the official Prometheus documentation: // https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alertmanager_config. // As AlertManager configs are appended, the user is responsible to make sure it // is valid. Note that using this feature may expose the possibility to // break upgrades of Prometheus. It is advised to review Prometheus release // notes to ensure that no incompatible AlertManager configs are going to break // Prometheus after the upgrade. AdditionalAlertManagerConfigs *v1.SecretKeySelector `json:"additionalAlertManagerConfigs,omitempty"` // APIServerConfig allows specifying a host and auth methods to access apiserver. // If left empty, Prometheus is assumed to run inside of the cluster // and will discover API servers automatically and use the pod's CA certificate // and bearer token file at /var/run/secrets/kubernetes.io/serviceaccount/. APIServerConfig *APIServerConfig `json:"apiserverConfig,omitempty"` // Thanos configuration allows configuring various aspects of a Prometheus // server in a Thanos environment. // // This section is experimental, it may change significantly without // deprecation notice in any release. // // This is experimental and may change significantly without backward // compatibility in any release. Thanos *ThanosSpec `json:"thanos,omitempty"` // Priority class assigned to the Pods PriorityClassName string `json:"priorityClassName,omitempty"` // Port name used for the pods and governing service. // This defaults to web PortName string `json:"portName,omitempty"` // ArbitraryFSAccessThroughSMs configures whether configuration // based on a service monitor can access arbitrary files on the file system // of the Prometheus container e.g. bearer token files. ArbitraryFSAccessThroughSMs ArbitraryFSAccessThroughSMsConfig `json:"arbitraryFSAccessThroughSMs,omitempty"` // OverrideHonorLabels if set to true overrides all user configured honor_labels. // If HonorLabels is set in ServiceMonitor or PodMonitor to true, this overrides honor_labels to false. OverrideHonorLabels bool `json:"overrideHonorLabels,omitempty"` // OverrideHonorTimestamps allows to globally enforce honoring timestamps in all scrape configs. OverrideHonorTimestamps bool `json:"overrideHonorTimestamps,omitempty"` // IgnoreNamespaceSelectors if set to true will ignore NamespaceSelector settings from // the podmonitor and servicemonitor configs, and they will only discover endpoints // within their current namespace. Defaults to false. IgnoreNamespaceSelectors bool `json:"ignoreNamespaceSelectors,omitempty"` // EnforcedNamespaceLabel enforces adding a namespace label of origin for each alert // and metric that is user created. The label value will always be the namespace of the object that is // being created. EnforcedNamespaceLabel string `json:"enforcedNamespaceLabel,omitempty"` // PrometheusRulesExcludedFromEnforce - list of prometheus rules to be excluded from enforcing // of adding namespace labels. Works only if enforcedNamespaceLabel set to true. // Make sure both ruleNamespace and ruleName are set for each pair PrometheusRulesExcludedFromEnforce []PrometheusRuleExcludeConfig `json:"prometheusRulesExcludedFromEnforce,omitempty"` // QueryLogFile specifies the file to which PromQL queries are logged. // Note that this location must be writable, and can be persisted using an attached volume. // Alternatively, the location can be set to a stdout location such as `/dev/stdout` to log // querie information to the default Prometheus log stream. // This is only available in versions of Prometheus >= 2.16.0. // For more details, see the Prometheus docs (https://prometheus.io/docs/guides/query-log/) QueryLogFile string `json:"queryLogFile,omitempty"` // EnforcedSampleLimit defines global limit on number of scraped samples // that will be accepted. This overrides any SampleLimit set per // ServiceMonitor or/and PodMonitor. It is meant to be used by admins to // enforce the SampleLimit to keep overall number of samples/series under // the desired limit. // Note that if SampleLimit is lower that value will be taken instead. EnforcedSampleLimit *uint64 `json:"enforcedSampleLimit,omitempty"` // AllowOverlappingBlocks enables vertical compaction and vertical query merge in Prometheus. // This is still experimental in Prometheus so it may change in any upcoming release. AllowOverlappingBlocks bool `json:"allowOverlappingBlocks,omitempty"` // EnforcedTargetLimit defines a global limit on the number of scraped targets. // This overrides any TargetLimit set per ServiceMonitor or/and PodMonitor. // It is meant to be used by admins to // enforce the TargetLimit to keep overall number of targets under // the desired limit. // Note that if TargetLimit is higher that value will be taken instead. EnforcedTargetLimit *uint64 `json:"enforcedTargetLimit,omitempty"` } // PrometheusRuleExcludeConfig enables users to configure excluded PrometheusRule names and their namespaces // to be ignored while enforcing namespace label for alerts and metrics. type PrometheusRuleExcludeConfig struct { // RuleNamespace - namespace of excluded rule RuleNamespace string `json:"ruleNamespace"` // RuleNamespace - name of excluded rule RuleName string `json:"ruleName"` } // ArbitraryFSAccessThroughSMsConfig enables users to configure, whether // a service monitor selected by the Prometheus instance is allowed to use // arbitrary files on the file system of the Prometheus container. This is the case // when e.g. a service monitor specifies a BearerTokenFile in an endpoint. A // malicious user could create a service monitor selecting arbitrary secret files // in the Prometheus container. Those secrets would then be sent with a scrape // request by Prometheus to a malicious target. Denying the above would prevent the // attack, users can instead use the BearerTokenSecret field. type ArbitraryFSAccessThroughSMsConfig struct { Deny bool `json:"deny,omitempty"` } // PrometheusStatus is the most recent observed status of the Prometheus cluster. Read-only. Not // included when requesting from the apiserver, only from the Prometheus // Operator API itself. More info: // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +k8s:openapi-gen=true type PrometheusStatus struct { // Represents whether any actions on the underlying managed objects are // being performed. Only delete actions will be performed. Paused bool `json:"paused"` // Total number of non-terminated pods targeted by this Prometheus deployment // (their labels match the selector). Replicas int32 `json:"replicas"` // Total number of non-terminated pods targeted by this Prometheus deployment // that have the desired version spec. UpdatedReplicas int32 `json:"updatedReplicas"` // Total number of available pods (ready for at least minReadySeconds) // targeted by this Prometheus deployment. AvailableReplicas int32 `json:"availableReplicas"` // Total number of unavailable pods targeted by this Prometheus deployment. UnavailableReplicas int32 `json:"unavailableReplicas"` } // AlertingSpec defines parameters for alerting configuration of Prometheus servers. // +k8s:openapi-gen=true type AlertingSpec struct { // AlertmanagerEndpoints Prometheus should fire alerts against. Alertmanagers []AlertmanagerEndpoints `json:"alertmanagers"` } // StorageSpec defines the configured storage for a group Prometheus servers. // If neither `emptyDir` nor `volumeClaimTemplate` is specified, then by default an [EmptyDir](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir) will be used. // +k8s:openapi-gen=true type StorageSpec struct { // Deprecated: subPath usage will be disabled by default in a future release, this option will become unnecessary. // DisableMountSubPath allows to remove any subPath usage in volume mounts. DisableMountSubPath bool `json:"disableMountSubPath,omitempty"` // EmptyDirVolumeSource to be used by the Prometheus StatefulSets. If specified, used in place of any volumeClaimTemplate. More // info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir EmptyDir *v1.EmptyDirVolumeSource `json:"emptyDir,omitempty"` // A PVC spec to be used by the Prometheus StatefulSets. VolumeClaimTemplate EmbeddedPersistentVolumeClaim `json:"volumeClaimTemplate,omitempty"` } // EmbeddedPersistentVolumeClaim is an embedded version of k8s.io/api/core/v1.PersistentVolumeClaim. // It contains TypeMeta and a reduced ObjectMeta. type EmbeddedPersistentVolumeClaim struct { metav1.TypeMeta `json:",inline"` // EmbeddedMetadata contains metadata relevant to an EmbeddedResource. EmbeddedObjectMetadata `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the desired characteristics of a volume requested by a pod author. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims // +optional Spec v1.PersistentVolumeClaimSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Status represents the current information/status of a persistent volume claim. // Read-only. // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims // +optional Status v1.PersistentVolumeClaimStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } // EmbeddedObjectMetadata contains a subset of the fields included in k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta // Only fields which are relevant to embedded resources are included. type EmbeddedObjectMetadata struct { // Name must be unique within a namespace. Is required when creating resources, although // some resources may allow a client to request the generation of an appropriate name // automatically. Name is primarily intended for creation idempotence and configuration // definition. // Cannot be updated. // More info: http://kubernetes.io/docs/user-guide/identifiers#names // +optional Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` // Map of string keys and values that can be used to organize and categorize // (scope and select) objects. May match selectors of replication controllers // and services. // More info: http://kubernetes.io/docs/user-guide/labels // +optional Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,11,rep,name=labels"` // Annotations is an unstructured key value map stored with a resource that may be // set by external tools to store and retrieve arbitrary metadata. They are not // queryable and should be preserved when modifying objects. // More info: http://kubernetes.io/docs/user-guide/annotations // +optional Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,12,rep,name=annotations"` } // QuerySpec defines the query command line flags when starting Prometheus. // +k8s:openapi-gen=true type QuerySpec struct { // The delta difference allowed for retrieving metrics during expression evaluations. LookbackDelta *string `json:"lookbackDelta,omitempty"` // Number of concurrent queries that can be run at once. MaxConcurrency *int32 `json:"maxConcurrency,omitempty"` // Maximum number of samples a single query can load into memory. Note that queries will fail if they would load more samples than this into memory, so this also limits the number of samples a query can return. MaxSamples *int32 `json:"maxSamples,omitempty"` // Maximum time a query may take before being aborted. Timeout *string `json:"timeout,omitempty"` } // WebSpec defines the query command line flags when starting Prometheus. // +k8s:openapi-gen=true type WebSpec struct { // The prometheus web page title PageTitle *string `json:"pageTitle,omitempty"` } // ThanosSpec defines parameters for a Prometheus server within a Thanos deployment. // +k8s:openapi-gen=true type ThanosSpec struct { // Image if specified has precedence over baseImage, tag and sha // combinations. Specifying the version is still necessary to ensure the // Prometheus Operator knows what version of Thanos is being // configured. Image *string `json:"image,omitempty"` // Version describes the version of Thanos to use. Version *string `json:"version,omitempty"` // Tag of Thanos sidecar container image to be deployed. Defaults to the value of `version`. // Version is ignored if Tag is set. // Deprecated: use 'image' instead. The image tag can be specified // as part of the image URL. Tag *string `json:"tag,omitempty"` // SHA of Thanos container image to be deployed. Defaults to the value of `version`. // Similar to a tag, but the SHA explicitly deploys an immutable container image. // Version and Tag are ignored if SHA is set. // Deprecated: use 'image' instead. The image digest can be specified // as part of the image URL. SHA *string `json:"sha,omitempty"` // Thanos base image if other than default. // Deprecated: use 'image' instead BaseImage *string `json:"baseImage,omitempty"` // Resources defines the resource requirements for the Thanos sidecar. // If not provided, no requests/limits will be set Resources v1.ResourceRequirements `json:"resources,omitempty"` // ObjectStorageConfig configures object storage in Thanos. // Alternative to ObjectStorageConfigFile, and lower order priority. ObjectStorageConfig *v1.SecretKeySelector `json:"objectStorageConfig,omitempty"` // ObjectStorageConfigFile specifies the path of the object storage configuration file. // When used alongside with ObjectStorageConfig, ObjectStorageConfigFile takes precedence. ObjectStorageConfigFile *string `json:"objectStorageConfigFile,omitempty"` // ListenLocal makes the Thanos sidecar listen on loopback, so that it // does not bind against the Pod IP. ListenLocal bool `json:"listenLocal,omitempty"` // TracingConfig configures tracing in Thanos. This is an experimental feature, it may change in any upcoming release in a breaking way. TracingConfig *v1.SecretKeySelector `json:"tracingConfig,omitempty"` // TracingConfig specifies the path of the tracing configuration file. // When used alongside with TracingConfig, TracingConfigFile takes precedence. TracingConfigFile string `json:"tracingConfigFile,omitempty"` // GRPCServerTLSConfig configures the gRPC server from which Thanos Querier reads // recorded rule data. // Note: Currently only the CAFile, CertFile, and KeyFile fields are supported. // Maps to the '--grpc-server-tls-*' CLI args. GRPCServerTLSConfig *TLSConfig `json:"grpcServerTlsConfig,omitempty"` // LogLevel for Thanos sidecar to be configured with. LogLevel string `json:"logLevel,omitempty"` // LogFormat for Thanos sidecar to be configured with. LogFormat string `json:"logFormat,omitempty"` // MinTime for Thanos sidecar to be configured with. Option can be a constant time in RFC3339 format or time duration relative to current time, such as -1d or 2h45m. Valid duration units are ms, s, m, h, d, w, y. MinTime string `json:"minTime,omitempty"` } // RemoteWriteSpec defines the remote_write configuration for prometheus. // +k8s:openapi-gen=true type RemoteWriteSpec struct { // The URL of the endpoint to send samples to. URL string `json:"url"` // The name of the remote write queue, must be unique if specified. The // name is used in metrics and logging in order to differentiate queues. // Only valid in Prometheus versions 2.15.0 and newer. Name string `json:"name,omitempty"` // Timeout for requests to the remote write endpoint. RemoteTimeout string `json:"remoteTimeout,omitempty"` // The list of remote write relabel configurations. WriteRelabelConfigs []RelabelConfig `json:"writeRelabelConfigs,omitempty"` //BasicAuth for the URL. BasicAuth *BasicAuth `json:"basicAuth,omitempty"` // File to read bearer token for remote write. BearerToken string `json:"bearerToken,omitempty"` // File to read bearer token for remote write. BearerTokenFile string `json:"bearerTokenFile,omitempty"` // TLS Config to use for remote write. TLSConfig *TLSConfig `json:"tlsConfig,omitempty"` // Optional ProxyURL ProxyURL string `json:"proxyUrl,omitempty"` // QueueConfig allows tuning of the remote write queue parameters. QueueConfig *QueueConfig `json:"queueConfig,omitempty"` } // QueueConfig allows the tuning of remote_write queue_config parameters. This object // is referenced in the RemoteWriteSpec object. // +k8s:openapi-gen=true type QueueConfig struct { // Capacity is the number of samples to buffer per shard before we start dropping them. Capacity int `json:"capacity,omitempty"` // MinShards is the minimum number of shards, i.e. amount of concurrency. MinShards int `json:"minShards,omitempty"` // MaxShards is the maximum number of shards, i.e. amount of concurrency. MaxShards int `json:"maxShards,omitempty"` // MaxSamplesPerSend is the maximum number of samples per send. MaxSamplesPerSend int `json:"maxSamplesPerSend,omitempty"` // BatchSendDeadline is the maximum time a sample will wait in buffer. BatchSendDeadline string `json:"batchSendDeadline,omitempty"` // MaxRetries is the maximum number of times to retry a batch on recoverable errors. MaxRetries int `json:"maxRetries,omitempty"` // MinBackoff is the initial retry delay. Gets doubled for every retry. MinBackoff string `json:"minBackoff,omitempty"` // MaxBackoff is the maximum retry delay. MaxBackoff string `json:"maxBackoff,omitempty"` } // RemoteReadSpec defines the remote_read configuration for prometheus. // +k8s:openapi-gen=true type RemoteReadSpec struct { // The URL of the endpoint to send samples to. URL string `json:"url"` // The name of the remote read queue, must be unique if specified. The name // is used in metrics and logging in order to differentiate read // configurations. Only valid in Prometheus versions 2.15.0 and newer. Name string `json:"name,omitempty"` // An optional list of equality matchers which have to be present // in a selector to query the remote read endpoint. RequiredMatchers map[string]string `json:"requiredMatchers,omitempty"` // Timeout for requests to the remote read endpoint. RemoteTimeout string `json:"remoteTimeout,omitempty"` // Whether reads should be made for queries for time ranges that // the local storage should have complete data for. ReadRecent bool `json:"readRecent,omitempty"` // BasicAuth for the URL. BasicAuth *BasicAuth `json:"basicAuth,omitempty"` // bearer token for remote read. BearerToken string `json:"bearerToken,omitempty"` // File to read bearer token for remote read. BearerTokenFile string `json:"bearerTokenFile,omitempty"` // TLS Config to use for remote read. TLSConfig *TLSConfig `json:"tlsConfig,omitempty"` // Optional ProxyURL ProxyURL string `json:"proxyUrl,omitempty"` } // RelabelConfig allows dynamic rewriting of the label set, being applied to samples before ingestion. // It defines `<metric_relabel_configs>`-section of Prometheus configuration. // More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs // +k8s:openapi-gen=true type RelabelConfig struct { //The source labels select values from existing labels. Their content is concatenated //using the configured separator and matched against the configured regular expression //for the replace, keep, and drop actions. SourceLabels []string `json:"sourceLabels,omitempty"` //Separator placed between concatenated source label values. default is ';'. Separator string `json:"separator,omitempty"` //Label to which the resulting value is written in a replace action. //It is mandatory for replace actions. Regex capture groups are available. TargetLabel string `json:"targetLabel,omitempty"` //Regular expression against which the extracted value is matched. Default is '(.*)' Regex string `json:"regex,omitempty"` // Modulus to take of the hash of the source label values. Modulus uint64 `json:"modulus,omitempty"` //Replacement value against which a regex replace is performed if the //regular expression matches. Regex capture groups are available. Default is '$1' Replacement string `json:"replacement,omitempty"` // Action to perform based on regex matching. Default is 'replace' Action string `json:"action,omitempty"` } // APIServerConfig defines a host and auth methods to access apiserver. // More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config // +k8s:openapi-gen=true type APIServerConfig struct { // Host of apiserver. // A valid string consisting of a hostname or IP followed by an optional port number Host string `json:"host"` // BasicAuth allow an endpoint to authenticate over basic authentication BasicAuth *BasicAuth `json:"basicAuth,omitempty"` // Bearer token for accessing apiserver. BearerToken string `json:"bearerToken,omitempty"` // File to read bearer token for accessing apiserver. BearerTokenFile string `json:"bearerTokenFile,omitempty"` // TLS Config to use for accessing apiserver. TLSConfig *TLSConfig `json:"tlsConfig,omitempty"` } // AlertmanagerEndpoints defines a selection of a single Endpoints object // containing alertmanager IPs to fire alerts against. // +k8s:openapi-gen=true type AlertmanagerEndpoints struct { // Namespace of Endpoints object. Namespace string `json:"namespace"` // Name of Endpoints object in Namespace. Name string `json:"name"` // Port the Alertmanager API is exposed on. Port intstr.IntOrString `json:"port"` // Scheme to use when firing alerts. Scheme string `json:"scheme,omitempty"` // Prefix for the HTTP path alerts are pushed to. PathPrefix string `json:"pathPrefix,omitempty"` // TLS Config to use for alertmanager connection. TLSConfig *TLSConfig `json:"tlsConfig,omitempty"` // BearerTokenFile to read from filesystem to use when authenticating to // Alertmanager. BearerTokenFile string `json:"bearerTokenFile,omitempty"` // Version of the Alertmanager API that Prometheus uses to send alerts. It // can be "v1" or "v2". APIVersion string `json:"apiVersion,omitempty"` // Timeout is a per-target Alertmanager timeout when pushing alerts. Timeout *string `json:"timeout,omitempty"` } // ServiceMonitor defines monitoring for a set of services. // +genclient // +k8s:openapi-gen=true // +kubebuilder:resource:categories="prometheus-operator" type ServiceMonitor struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` // Specification of desired Service selection for target discovery by // Prometheus. Spec ServiceMonitorSpec `json:"spec"` } // ServiceMonitorSpec contains specification parameters for a ServiceMonitor. // +k8s:openapi-gen=true type ServiceMonitorSpec struct { // The label to use to retrieve the job name from. JobLabel string `json:"jobLabel,omitempty"` // TargetLabels transfers labels on the Kubernetes Service onto the target. TargetLabels []string `json:"targetLabels,omitempty"` // PodTargetLabels transfers labels on the Kubernetes Pod onto the target. PodTargetLabels []string `json:"podTargetLabels,omitempty"` // A list of endpoints allowed as part of this ServiceMonitor. Endpoints []Endpoint `json:"endpoints"` // Selector to select Endpoints objects. Selector metav1.LabelSelector `json:"selector"` // Selector to select which namespaces the Endpoints objects are discovered from. NamespaceSelector NamespaceSelector `json:"namespaceSelector,omitempty"` // SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. SampleLimit uint64 `json:"sampleLimit,omitempty"` // TargetLimit defines a limit on the number of scraped targets that will be accepted. TargetLimit uint64 `json:"targetLimit,omitempty"` } // Endpoint defines a scrapeable endpoint serving Prometheus metrics. // +k8s:openapi-gen=true type Endpoint struct { // Name of the service port this endpoint refers to. Mutually exclusive with targetPort. Port string `json:"port,omitempty"` // Name or number of the target port of the Pod behind the Service, the port must be specified with container port property. Mutually exclusive with port. TargetPort *intstr.IntOrString `json:"targetPort,omitempty"` // HTTP path to scrape for metrics. Path string `json:"path,omitempty"` // HTTP scheme to use for scraping. Scheme string `json:"scheme,omitempty"` // Optional HTTP URL parameters Params map[string][]string `json:"params,omitempty"` // Interval at which metrics should be scraped Interval string `json:"interval,omitempty"` // Timeout after which the scrape is ended ScrapeTimeout string `json:"scrapeTimeout,omitempty"` // TLS configuration to use when scraping the endpoint TLSConfig *TLSConfig `json:"tlsConfig,omitempty"` // File to read bearer token for scraping targets. BearerTokenFile string `json:"bearerTokenFile,omitempty"` // Secret to mount to read bearer token for scraping targets. The secret // needs to be in the same namespace as the service monitor and accessible by // the Prometheus Operator. BearerTokenSecret v1.SecretKeySelector `json:"bearerTokenSecret,omitempty"` // HonorLabels chooses the metric's labels on collisions with target labels. HonorLabels bool `json:"honorLabels,omitempty"` // HonorTimestamps controls whether Prometheus respects the timestamps present in scraped data. HonorTimestamps *bool `json:"honorTimestamps,omitempty"` // BasicAuth allow an endpoint to authenticate over basic authentication // More info: https://prometheus.io/docs/operating/configuration/#endpoints BasicAuth *BasicAuth `json:"basicAuth,omitempty"` // MetricRelabelConfigs to apply to samples before ingestion. MetricRelabelConfigs []*RelabelConfig `json:"metricRelabelings,omitempty"` // RelabelConfigs to apply to samples before scraping. // More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config RelabelConfigs []*RelabelConfig `json:"relabelings,omitempty"` // ProxyURL eg http://proxyserver:2195 Directs scrapes to proxy through this endpoint. ProxyURL *string `json:"proxyUrl,omitempty"` } // PodMonitor defines monitoring for a set of pods. // +genclient // +k8s:openapi-gen=true // +kubebuilder:resource:categories="prometheus-operator" type PodMonitor struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` // Specification of desired Pod selection for target discovery by Prometheus. Spec PodMonitorSpec `json:"spec"` } // PodMonitorSpec contains specification parameters for a PodMonitor. // +k8s:openapi-gen=true type PodMonitorSpec struct { // The label to use to retrieve the job name from. JobLabel string `json:"jobLabel,omitempty"` // PodTargetLabels transfers labels on the Kubernetes Pod onto the target. PodTargetLabels []string `json:"podTargetLabels,omitempty"` // A list of endpoints allowed as part of this PodMonitor. PodMetricsEndpoints []PodMetricsEndpoint `json:"podMetricsEndpoints"` // Selector to select Pod objects. Selector metav1.LabelSelector `json:"selector"` // Selector to select which namespaces the Endpoints objects are discovered from. NamespaceSelector NamespaceSelector `json:"namespaceSelector,omitempty"` // SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. SampleLimit uint64 `json:"sampleLimit,omitempty"` // TargetLimit defines a limit on the number of scraped targets that will be accepted. TargetLimit uint64 `json:"targetLimit,omitempty"` } // PodMetricsEndpoint defines a scrapeable endpoint of a Kubernetes Pod serving Prometheus metrics. // +k8s:openapi-gen=true type PodMetricsEndpoint struct { // Name of the pod port this endpoint refers to. Mutually exclusive with targetPort. Port string `json:"port,omitempty"` // Deprecated: Use 'port' instead. TargetPort *intstr.IntOrString `json:"targetPort,omitempty"` // HTTP path to scrape for metrics. Path string `json:"path,omitempty"` // HTTP scheme to use for scraping. Scheme string `json:"scheme,omitempty"` // Optional HTTP URL parameters Params map[string][]string `json:"params,omitempty"` // Interval at which metrics should be scraped Interval string `json:"interval,omitempty"` // Timeout after which the scrape is ended ScrapeTimeout string `json:"scrapeTimeout,omitempty"` // TLS configuration to use when scraping the endpoint. TLSConfig *PodMetricsEndpointTLSConfig `json:"tlsConfig,omitempty"` // Secret to mount to read bearer token for scraping targets. The secret // needs to be in the same namespace as the pod monitor and accessible by // the Prometheus Operator. BearerTokenSecret v1.SecretKeySelector `json:"bearerTokenSecret,omitempty"` // HonorLabels chooses the metric's labels on collisions with target labels. HonorLabels bool `json:"honorLabels,omitempty"` // HonorTimestamps controls whether Prometheus respects the timestamps present in scraped data. HonorTimestamps *bool `json:"honorTimestamps,omitempty"` // BasicAuth allow an endpoint to authenticate over basic authentication. // More info: https://prometheus.io/docs/operating/configuration/#endpoint BasicAuth *BasicAuth `json:"basicAuth,omitempty"` // MetricRelabelConfigs to apply to samples before ingestion. MetricRelabelConfigs []*RelabelConfig `json:"metricRelabelings,omitempty"` // RelabelConfigs to apply to samples before ingestion. // More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config RelabelConfigs []*RelabelConfig `json:"relabelings,omitempty"` // ProxyURL eg http://proxyserver:2195 Directs scrapes to proxy through this endpoint. ProxyURL *string `json:"proxyUrl,omitempty"` } // PodMetricsEndpointTLSConfig specifies TLS configuration parameters. // +k8s:openapi-gen=true type PodMetricsEndpointTLSConfig struct { SafeTLSConfig `json:",inline"` } // Probe defines monitoring for a set of static targets or ingresses. // +genclient // +k8s:openapi-gen=true // +kubebuilder:resource:categories="prometheus-operator" type Probe struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` // Specification of desired Ingress selection for target discovery by Prometheus. Spec ProbeSpec `json:"spec"` } // ProbeSpec contains specification parameters for a Probe. // +k8s:openapi-gen=true type ProbeSpec struct { // The job name assigned to scraped metrics by default. JobName string `json:"jobName,omitempty"` // Specification for the prober to use for probing targets. // The prober.URL parameter is required. Targets cannot be probed if left empty. ProberSpec ProberSpec `json:"prober,omitempty"` // The module to use for probing specifying how to probe the target. // Example module configuring in the blackbox exporter: // https://github.com/prometheus/blackbox_exporter/blob/master/example.yml Module string `json:"module,omitempty"` // Targets defines a set of static and/or dynamically discovered targets to be probed using the prober. Targets ProbeTargets `json:"targets,omitempty"` // Interval at which targets are probed using the configured prober. // If not specified Prometheus' global scrape interval is used. Interval string `json:"interval,omitempty"` // Timeout for scraping metrics from the Prometheus exporter. ScrapeTimeout string `json:"scrapeTimeout,omitempty"` } // ProbeTargets defines a set of static and dynamically discovered targets for the prober. // +k8s:openapi-gen=true type ProbeTargets struct { // StaticConfig defines static targets which are considers for probing. // More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#static_config. StaticConfig *ProbeTargetStaticConfig `json:"staticConfig,omitempty"` // Ingress defines the set of dynamically discovered ingress objects which hosts are considered for probing. Ingress *ProbeTargetIngress `json:"ingress,omitempty"` } // ProbeTargetStaticConfig defines the set of static targets considered for probing. // +k8s:openapi-gen=true type ProbeTargetStaticConfig struct { // Targets is a list of URLs to probe using the configured prober. Targets []string `json:"static,omitempty"` // Labels assigned to all metrics scraped from the targets. Labels map[string]string `json:"labels,omitempty"` // RelabelConfigs to apply to samples before ingestion. // More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config RelabelConfigs []*RelabelConfig `json:"relabelingConfigs,omitempty"` } // ProbeTargetIngress defines the set of Ingress objects considered for probing. // +k8s:openapi-gen=true type ProbeTargetIngress struct { // Select Ingress objects by labels. Selector metav1.LabelSelector `json:"selector,omitempty"` // Select Ingress objects by namespace. NamespaceSelector NamespaceSelector `json:"namespaceSelector,omitempty"` // RelabelConfigs to apply to samples before ingestion. // More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config RelabelConfigs []*RelabelConfig `json:"relabelingConfigs,omitempty"` } // ProberSpec contains specification parameters for the Prober used for probing. // +k8s:openapi-gen=true type ProberSpec struct { // Mandatory URL of the prober. URL string `json:"url"` // HTTP scheme to use for scraping. // Defaults to `http`. Scheme string `json:"scheme,omitempty"` // Path to collect metrics from. // Defaults to `/probe`. Path string `json:"path,omitempty"` } // BasicAuth allow an endpoint to authenticate over basic authentication // More info: https://prometheus.io/docs/operating/configuration/#endpoints // +k8s:openapi-gen=true type BasicAuth struct { // The secret in the service monitor namespace that contains the username // for authentication. Username v1.SecretKeySelector `json:"username,omitempty"` // The secret in the service monitor namespace that contains the password // for authentication. Password v1.SecretKeySelector `json:"password,omitempty"` } // SecretOrConfigMap allows to specify data as a Secret or ConfigMap. Fields are mutually exclusive. type SecretOrConfigMap struct { // Secret containing data to use for the targets. Secret *v1.SecretKeySelector `json:"secret,omitempty"` // ConfigMap containing data to use for the targets. ConfigMap *v1.ConfigMapKeySelector `json:"configMap,omitempty"` } // SecretOrConfigMapValidationError is returned by SecretOrConfigMap.Validate() // on semantically invalid configurations. // +k8s:openapi-gen=false type SecretOrConfigMapValidationError struct { err string } func (e *SecretOrConfigMapValidationError) Error() string { return e.err } // Validate semantically validates the given TLSConfig. func (c *SecretOrConfigMap) Validate() error { if c.Secret != nil && c.ConfigMap != nil { return &SecretOrConfigMapValidationError{"SecretOrConfigMap can not specify both Secret and ConfigMap"} } return nil } // SafeTLSConfig specifies safe TLS configuration parameters. // +k8s:openapi-gen=true type SafeTLSConfig struct { // Struct containing the CA cert to use for the targets. CA SecretOrConfigMap `json:"ca,omitempty"` // Struct containing the client cert file for the targets. Cert SecretOrConfigMap `json:"cert,omitempty"` // Secret containing the client key file for the targets. KeySecret *v1.SecretKeySelector `json:"keySecret,omitempty"` // Used to verify the hostname for the targets. ServerName string `json:"serverName,omitempty"` // Disable target certificate validation. InsecureSkipVerify bool `json:"insecureSkipVerify,omitempty"` } // Validate semantically validates the given SafeTLSConfig. func (c *SafeTLSConfig) Validate() error { if c.CA != (SecretOrConfigMap{}) { if err := c.CA.Validate(); err != nil { return err } } if c.Cert != (SecretOrConfigMap{}) { if err := c.Cert.Validate(); err != nil { return err } } if c.Cert != (SecretOrConfigMap{}) && c.KeySecret == nil { return &TLSConfigValidationError{"client cert specified without client key"} } if c.KeySecret != nil && c.Cert == (SecretOrConfigMap{}) { return &TLSConfigValidationError{"client key specified without client cert"} } return nil } // TLSConfig extends the safe TLS configuration with file parameters. // +k8s:openapi-gen=true type TLSConfig struct { SafeTLSConfig `json:",inline"` // Path to the CA cert in the Prometheus container to use for the targets. CAFile string `json:"caFile,omitempty"` // Path to the client cert file in the Prometheus container for the targets. CertFile string `json:"certFile,omitempty"` // Path to the client key file in the Prometheus container for the targets. KeyFile string `json:"keyFile,omitempty"` } // TLSConfigValidationError is returned by TLSConfig.Validate() on semantically // invalid tls configurations. // +k8s:openapi-gen=false type TLSConfigValidationError struct { err string } func (e *TLSConfigValidationError) Error() string { return e.err } // Validate semantically validates the given TLSConfig. func (c *TLSConfig) Validate() error { if c.CA != (SecretOrConfigMap{}) { if c.CAFile != "" { return &TLSConfigValidationError{"tls config can not both specify CAFile and CA"} } if err := c.CA.Validate(); err != nil { return &TLSConfigValidationError{"tls config CA is invalid"} } } if c.Cert != (SecretOrConfigMap{}) { if c.CertFile != "" { return &TLSConfigValidationError{"tls config can not both specify CertFile and Cert"} } if err := c.Cert.Validate(); err != nil { return &TLSConfigValidationError{"tls config Cert is invalid"} } } if c.KeyFile != "" && c.KeySecret != nil { return &TLSConfigValidationError{"tls config can not both specify KeyFile and KeySecret"} } hasCert := c.CertFile != "" || c.Cert != (SecretOrConfigMap{}) hasKey := c.KeyFile != "" || c.KeySecret != nil if hasCert && !hasKey { return &TLSConfigValidationError{"tls config can not specify client cert without client key"} } if hasKey && !hasCert { return &TLSConfigValidationError{"tls config can not specify client key without client cert"} } return nil } // ServiceMonitorList is a list of ServiceMonitors. // +k8s:openapi-gen=true type ServiceMonitorList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata // More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty"` // List of ServiceMonitors Items []*ServiceMonitor `json:"items"` } // PodMonitorList is a list of PodMonitors. // +k8s:openapi-gen=true type PodMonitorList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata // More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty"` // List of PodMonitors Items []*PodMonitor `json:"items"` } // ProbeList is a list of Probes. // +k8s:openapi-gen=true type ProbeList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata // More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty"` // List of Probes Items []*Probe `json:"items"` } // PrometheusRuleList is a list of PrometheusRules. // +k8s:openapi-gen=true type PrometheusRuleList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata // More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty"` // List of Rules Items []*PrometheusRule `json:"items"` } // PrometheusRule defines recording and alerting rules for a Prometheus instance // +genclient // +k8s:openapi-gen=true type PrometheusRule struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` // Specification of desired alerting rule definitions for Prometheus. Spec PrometheusRuleSpec `json:"spec"` } // PrometheusRuleSpec contains specification parameters for a Rule. // +k8s:openapi-gen=true type PrometheusRuleSpec struct { // Content of Prometheus rule file Groups []RuleGroup `json:"groups,omitempty"` } // RuleGroup and Rule are copied instead of vendored because the // upstream Prometheus struct definitions don't have json struct tags. // RuleGroup is a list of sequentially evaluated recording and alerting rules. // Note: PartialResponseStrategy is only used by ThanosRuler and will // be ignored by Prometheus instances. Valid values for this field are 'warn' // or 'abort'. More info: https://github.com/thanos-io/thanos/blob/master/docs/components/rule.md#partial-response // +k8s:openapi-gen=true type RuleGroup struct { Name string `json:"name"` Interval string `json:"interval,omitempty"` Rules []Rule `json:"rules"` PartialResponseStrategy string `json:"partial_response_strategy,omitempty"` } // Rule describes an alerting or recording rule. // +k8s:openapi-gen=true type Rule struct { Record string `json:"record,omitempty"` Alert string `json:"alert,omitempty"` Expr intstr.IntOrString `json:"expr"` For string `json:"for,omitempty"` Labels map[string]string `json:"labels,omitempty"` Annotations map[string]string `json:"annotations,omitempty"` } // Alertmanager describes an Alertmanager cluster. // +genclient // +k8s:openapi-gen=true // +kubebuilder:resource:categories="prometheus-operator" // +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".spec.version",description="The version of Alertmanager" // +kubebuilder:printcolumn:name="Replicas",type="integer",JSONPath=".spec.replicas",description="The desired replicas number of Alertmanagers" // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" type Alertmanager struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` // Specification of the desired behavior of the Alertmanager cluster. More info: // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status Spec AlertmanagerSpec `json:"spec"` // Most recent observed status of the Alertmanager cluster. Read-only. Not // included when requesting from the apiserver, only from the Prometheus // Operator API itself. More info: // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status Status *AlertmanagerStatus `json:"status,omitempty"` } // AlertmanagerSpec is a specification of the desired behavior of the Alertmanager cluster. More info: // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +k8s:openapi-gen=true type AlertmanagerSpec struct { // PodMetadata configures Labels and Annotations which are propagated to the alertmanager pods. PodMetadata *EmbeddedObjectMetadata `json:"podMetadata,omitempty"` // Image if specified has precedence over baseImage, tag and sha // combinations. Specifying the version is still necessary to ensure the // Prometheus Operator knows what version of Alertmanager is being // configured. Image *string `json:"image,omitempty"` // Version the cluster should be on. Version string `json:"version,omitempty"` // Tag of Alertmanager container image to be deployed. Defaults to the value of `version`. // Version is ignored if Tag is set. // Deprecated: use 'image' instead. The image tag can be specified // as part of the image URL. Tag string `json:"tag,omitempty"` // SHA of Alertmanager container image to be deployed. Defaults to the value of `version`. // Similar to a tag, but the SHA explicitly deploys an immutable container image. // Version and Tag are ignored if SHA is set. // Deprecated: use 'image' instead. The image digest can be specified // as part of the image URL. SHA string `json:"sha,omitempty"` // Base image that is used to deploy pods, without tag. // Deprecated: use 'image' instead BaseImage string `json:"baseImage,omitempty"` // An optional list of references to secrets in the same namespace // to use for pulling prometheus and alertmanager images from registries // see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod ImagePullSecrets []v1.LocalObjectReference `json:"imagePullSecrets,omitempty"` // Secrets is a list of Secrets in the same namespace as the Alertmanager // object, which shall be mounted into the Alertmanager Pods. // The Secrets are mounted into /etc/alertmanager/secrets/<secret-name>. Secrets []string `json:"secrets,omitempty"` // ConfigMaps is a list of ConfigMaps in the same namespace as the Alertmanager // object, which shall be mounted into the Alertmanager Pods. // The ConfigMaps are mounted into /etc/alertmanager/configmaps/<configmap-name>. ConfigMaps []string `json:"configMaps,omitempty"` // ConfigSecret is the name of a Kubernetes Secret in the same namespace as the // Alertmanager object, which contains configuration for this Alertmanager // instance. Defaults to 'alertmanager-<alertmanager-name>' // The secret is mounted into /etc/alertmanager/config. ConfigSecret string `json:"configSecret,omitempty"` // Log level for Alertmanager to be configured with. LogLevel string `json:"logLevel,omitempty"` // Log format for Alertmanager to be configured with. LogFormat string `json:"logFormat,omitempty"` // Size is the expected size of the alertmanager cluster. The controller will // eventually make the size of the running cluster equal to the expected // size. Replicas *int32 `json:"replicas,omitempty"` // Time duration Alertmanager shall retain data for. Default is '120h', // and must match the regular expression `[0-9]+(ms|s|m|h)` (milliseconds seconds minutes hours). Retention string `json:"retention,omitempty"` // Storage is the definition of how storage will be used by the Alertmanager // instances. Storage *StorageSpec `json:"storage,omitempty"` // Volumes allows configuration of additional volumes on the output StatefulSet definition. // Volumes specified will be appended to other volumes that are generated as a result of // StorageSpec objects. Volumes []v1.Volume `json:"volumes,omitempty"` // VolumeMounts allows configuration of additional VolumeMounts on the output StatefulSet definition. // VolumeMounts specified will be appended to other VolumeMounts in the alertmanager container, // that are generated as a result of StorageSpec objects. VolumeMounts []v1.VolumeMount `json:"volumeMounts,omitempty"` // The external URL the Alertmanager instances will be available under. This is // necessary to generate correct URLs. This is necessary if Alertmanager is not // served from root of a DNS name. ExternalURL string `json:"externalUrl,omitempty"` // The route prefix Alertmanager registers HTTP handlers for. This is useful, // if using ExternalURL and a proxy is rewriting HTTP routes of a request, // and the actual ExternalURL is still true, but the server serves requests // under a different route prefix. For example for use with `kubectl proxy`. RoutePrefix string `json:"routePrefix,omitempty"` // If set to true all actions on the underlying managed objects are not // goint to be performed, except for delete actions. Paused bool `json:"paused,omitempty"` // Define which Nodes the Pods are scheduled on. NodeSelector map[string]string `json:"nodeSelector,omitempty"` // Define resources requests and limits for single Pods. Resources v1.ResourceRequirements `json:"resources,omitempty"` // If specified, the pod's scheduling constraints. Affinity *v1.Affinity `json:"affinity,omitempty"` // If specified, the pod's tolerations. Tolerations []v1.Toleration `json:"tolerations,omitempty"` // If specified, the pod's topology spread constraints. TopologySpreadConstraints []v1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` // SecurityContext holds pod-level security attributes and common container settings. // This defaults to the default PodSecurityContext. SecurityContext *v1.PodSecurityContext `json:"securityContext,omitempty"` // ServiceAccountName is the name of the ServiceAccount to use to run the // Prometheus Pods. ServiceAccountName string `json:"serviceAccountName,omitempty"` // ListenLocal makes the Alertmanager server listen on loopback, so that it // does not bind against the Pod IP. Note this is only for the Alertmanager // UI, not the gossip communication. ListenLocal bool `json:"listenLocal,omitempty"` // Containers allows injecting additional containers. This is meant to // allow adding an authentication proxy to an Alertmanager pod. // Containers described here modify an operator generated container if they // share the same name and modifications are done via a strategic merge // patch. The current container names are: `alertmanager` and // `config-reloader`. Overriding containers is entirely outside the scope // of what the maintainers will support and by doing so, you accept that // this behaviour may break at any time without notice. Containers []v1.Container `json:"containers,omitempty"` // InitContainers allows adding initContainers to the pod definition. Those can be used to e.g. // fetch secrets for injection into the Alertmanager configuration from external sources. Any // errors during the execution of an initContainer will lead to a restart of the Pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ // Using initContainers for any use case other then secret fetching is entirely outside the scope // of what the maintainers will support and by doing so, you accept that this behaviour may break // at any time without notice. InitContainers []v1.Container `json:"initContainers,omitempty"` // Priority class assigned to the Pods PriorityClassName string `json:"priorityClassName,omitempty"` // AdditionalPeers allows injecting a set of additional Alertmanagers to peer with to form a highly available cluster. AdditionalPeers []string `json:"additionalPeers,omitempty"` // ClusterAdvertiseAddress is the explicit address to advertise in cluster. // Needs to be provided for non RFC1918 [1] (public) addresses. // [1] RFC1918: https://tools.ietf.org/html/rfc1918 ClusterAdvertiseAddress string `json:"clusterAdvertiseAddress,omitempty"` // Interval between gossip attempts. ClusterGossipInterval string `json:"clusterGossipInterval,omitempty"` // Interval between pushpull attempts. ClusterPushpullInterval string `json:"clusterPushpullInterval,omitempty"` // Timeout for cluster peering. ClusterPeerTimeout string `json:"clusterPeerTimeout,omitempty"` // Port name used for the pods and governing service. // This defaults to web PortName string `json:"portName,omitempty"` // ForceEnableClusterMode ensures Alertmanager does not deactivate the cluster mode when running with a single replica. // Use case is e.g. spanning an Alertmanager cluster across Kubernetes clusters with a single replica in each. ForceEnableClusterMode bool `json:"forceEnableClusterMode,omitempty"` // AlertmanagerConfigs to be selected for to merge and configure Alertmanager with. AlertmanagerConfigSelector *metav1.LabelSelector `json:"alertmanagerConfigSelector,omitempty"` // Namespaces to be selected for AlertmanagerConfig discovery. If nil, only // check own namespace. AlertmanagerConfigNamespaceSelector *metav1.LabelSelector `json:"alertmanagerConfigNamespaceSelector,omitempty"` } // AlertmanagerList is a list of Alertmanagers. // +k8s:openapi-gen=true type AlertmanagerList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata // More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty"` // List of Alertmanagers Items []Alertmanager `json:"items"` } // AlertmanagerStatus is the most recent observed status of the Alertmanager cluster. Read-only. Not // included when requesting from the apiserver, only from the Prometheus // Operator API itself. More info: // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#spec-and-status // +k8s:openapi-gen=true type AlertmanagerStatus struct { // Represents whether any actions on the underlying managed objects are // being performed. Only delete actions will be performed. Paused bool `json:"paused"` // Total number of non-terminated pods targeted by this Alertmanager // cluster (their labels match the selector). Replicas int32 `json:"replicas"` // Total number of non-terminated pods targeted by this Alertmanager // cluster that have the desired version spec. UpdatedReplicas int32 `json:"updatedReplicas"` // Total number of available pods (ready for at least minReadySeconds) // targeted by this Alertmanager cluster. AvailableReplicas int32 `json:"availableReplicas"` // Total number of unavailable pods targeted by this Alertmanager cluster. UnavailableReplicas int32 `json:"unavailableReplicas"` } // NamespaceSelector is a selector for selecting either all namespaces or a // list of namespaces. // +k8s:openapi-gen=true type NamespaceSelector struct { // Boolean describing whether all namespaces are selected in contrast to a // list restricting them. Any bool `json:"any,omitempty"` // List of namespace names. MatchNames []string `json:"matchNames,omitempty"` // TODO(fabxc): this should embed metav1.LabelSelector eventually. // Currently the selector is only used for namespaces which require more complex // implementation to support label selections. } // /--rules.*/ command-line arguments // +k8s:openapi-gen=true type Rules struct { Alert RulesAlert `json:"alert,omitempty"` } // /--rules.alert.*/ command-line arguments // +k8s:openapi-gen=true type RulesAlert struct { // Max time to tolerate prometheus outage for restoring 'for' state of alert. ForOutageTolerance string `json:"forOutageTolerance,omitempty"` // Minimum duration between alert and restored 'for' state. // This is maintained only for alerts with configured 'for' time greater than grace period. ForGracePeriod string `json:"forGracePeriod,omitempty"` // Minimum amount of time to wait before resending an alert to Alertmanager. ResendDelay string `json:"resendDelay,omitempty"` } // DeepCopyObject implements the runtime.Object interface. func (l *Alertmanager) DeepCopyObject() runtime.Object { return l.DeepCopy() } // DeepCopyObject implements the runtime.Object interface. func (l *AlertmanagerList) DeepCopyObject() runtime.Object { return l.DeepCopy() } // DeepCopyObject implements the runtime.Object interface. func (l *Prometheus) DeepCopyObject() runtime.Object { return l.DeepCopy() } // DeepCopyObject implements the runtime.Object interface. func (l *PrometheusList) DeepCopyObject() runtime.Object { return l.DeepCopy() } // DeepCopyObject implements the runtime.Object interface. func (l *ServiceMonitor) DeepCopyObject() runtime.Object { return l.DeepCopy() } // DeepCopyObject implements the runtime.Object interface. func (l *ServiceMonitorList) DeepCopyObject() runtime.Object { return l.DeepCopy() } // DeepCopyObject implements the runtime.Object interface. func (l *PodMonitor) DeepCopyObject() runtime.Object { return l.DeepCopy() } // DeepCopyObject implements the runtime.Object interface. func (l *PodMonitorList) DeepCopyObject() runtime.Object { return l.DeepCopy() } // DeepCopyObject implements the runtime.Object interface. func (l *Probe) DeepCopyObject() runtime.Object { return l.DeepCopy() } // DeepCopyObject implements the runtime.Object interface. func (l *ProbeList) DeepCopyObject() runtime.Object { return l.DeepCopy() } // DeepCopyObject implements the runtime.Object interface. func (f *PrometheusRule) DeepCopyObject() runtime.Object { return f.DeepCopy() } // DeepCopyObject implements the runtime.Object interface. func (l *PrometheusRuleList) DeepCopyObject() runtime.Object { return l.DeepCopy() }
1
15,682
Hmm, I'm not sure this is exactly what we want to get across. The behavior of leaving both values unset allowing the entire config to be managed is what is indeed deprecated. It's just that not the entire field is deprecated, just that one behavior.
prometheus-operator-prometheus-operator
go
@@ -146,10 +146,17 @@ static void complete_cb (struct bulk_exec *exec, void *arg) static void output_cb (struct bulk_exec *exec, flux_subprocess_t *p, const char *stream, const char *data, - int data_len, + int len, void *arg) { struct jobinfo *job = arg; + const char *cmd = flux_cmd_arg (flux_subprocess_get_cmd (p), 0); + jobinfo_log_output (job, + flux_subprocess_rank (p), + basename (cmd), + stream, + data, + len); flux_log (job->h, LOG_INFO, "%ju: %d: %s: %s", (uintmax_t) job->id, flux_subprocess_rank (p),
1
/************************************************************\ * Copyright 2019 Lawrence Livermore National Security, LLC * (c.f. AUTHORS, NOTICE.LLNS, COPYING) * * This file is part of the Flux resource manager framework. * For details, see https://github.com/flux-framework. * * SPDX-License-Identifier: LGPL-3.0 \************************************************************/ /* Flux subprocess-based exec implementation * * DESCRIPTION * * Launch configured job shell, one per rank. * * TEST CONFIGURATION * * Test and other configuration may be presented in the jobspec * attributes.system.exec.bulkexec object. Supported keys include * * { * "mock_exception":s - Generate a mock execption in phase: * "init", or "starting" * } * */ #if HAVE_CONFIG_H # include "config.h" #endif #include <unistd.h> #include "job-exec.h" #include "bulk-exec.h" #include "rset.h" extern char **environ; static const char *default_cwd = "/tmp"; static const char *default_job_shell = NULL; static const char *flux_imp_path = NULL; /* Configuration for "bulk" execution implementation. Used only for testing * for now. */ struct exec_conf { const char * mock_exception; /* fake exception */ }; static void exec_conf_destroy (struct exec_conf *tc) { free (tc); } static struct exec_conf *exec_conf_create (json_t *jobspec) { struct exec_conf *conf = calloc (1, sizeof (*conf)); if (conf == NULL) return NULL; (void) json_unpack (jobspec, "{s:{s:{s:{s:{s:s}}}}}", "attributes", "system", "exec", "bulkexec", "mock_exception", &conf->mock_exception); return conf; } static const char * exec_mock_exception (struct bulk_exec *exec) { struct exec_conf *conf = bulk_exec_aux_get (exec, "conf"); if (!conf || !conf->mock_exception) return "none"; return conf->mock_exception; } static const char *jobspec_get_job_shell (json_t *jobspec) { const char *path = NULL; (void) json_unpack (jobspec, "{s:{s:{s:{s:s}}}}", "attributes", "system", "exec", "job_shell", &path); return path; } static const char *job_shell_path (struct jobinfo *job) { const char *path = jobspec_get_job_shell (job->jobspec); return path ? path : default_job_shell; } static const char *jobspec_get_cwd (json_t *jobspec) { const char *cwd = NULL; (void) json_unpack (jobspec, "{s:{s:{s:s}}}", "attributes", "system", "cwd", &cwd); return cwd; } static const char *job_get_cwd (struct jobinfo *job) { const char *cwd; if (job->multiuser) cwd = "/"; else if (!(cwd = jobspec_get_cwd (job->jobspec))) cwd = default_cwd; return (cwd); } static void start_cb (struct bulk_exec *exec, void *arg) { struct jobinfo *job = arg; jobinfo_started (job, NULL); /* This is going to be really slow. However, it should at least * work for now. We wait for all imp's to start, then send input */ if (job->multiuser) { char *input = NULL; json_t *o = json_pack ("{s:s}", "J", job->J); if (!o || !(input = json_dumps (o, JSON_COMPACT))) { jobinfo_fatal_error (job, errno, "Failed to get input to IMP"); goto out; } if (bulk_exec_write (exec, "stdin", input, strlen (input)) < 0) jobinfo_fatal_error (job, errno, "Failed to write %ld bytes input to IMP", strlen (input)); (void) bulk_exec_close (exec, "stdin"); out: json_decref (o); free (input); } } static void complete_cb (struct bulk_exec *exec, void *arg) { struct jobinfo *job = arg; jobinfo_tasks_complete (job, resource_set_ranks (job->R), bulk_exec_rc (exec)); } static void output_cb (struct bulk_exec *exec, flux_subprocess_t *p, const char *stream, const char *data, int data_len, void *arg) { struct jobinfo *job = arg; flux_log (job->h, LOG_INFO, "%ju: %d: %s: %s", (uintmax_t) job->id, flux_subprocess_rank (p), stream, data); } static void error_cb (struct bulk_exec *exec, flux_subprocess_t *p, void *arg) { struct jobinfo *job = arg; const char *arg0 = flux_cmd_arg (flux_subprocess_get_cmd (p), 0); jobinfo_fatal_error (job, flux_subprocess_fail_errno (p), "cmd=%s: rank=%d failed", arg0, flux_subprocess_rank (p)); } static struct bulk_exec_ops exec_ops = { .on_start = start_cb, .on_exit = NULL, .on_complete = complete_cb, .on_output = output_cb, .on_error = error_cb }; static int exec_init (struct jobinfo *job) { flux_cmd_t *cmd = NULL; struct exec_conf *conf = NULL; struct bulk_exec *exec = NULL; const struct idset *ranks = NULL; if (job->multiuser && !flux_imp_path) { flux_log (job->h, LOG_ERR, "unable run multiuser job with no IMP configured!"); goto err; } if (!(ranks = resource_set_ranks (job->R))) { flux_log_error (job->h, "exec_init: resource_set_ranks"); goto err; } if (!(exec = bulk_exec_create (&exec_ops, job))) { flux_log_error (job->h, "exec_init: bulk_exec_create"); goto err; } if (!(conf = exec_conf_create (job->jobspec))) { flux_log_error (job->h, "exec_init: exec_conf_create"); goto err; } if (bulk_exec_aux_set (exec, "conf", conf, (flux_free_f) exec_conf_destroy) < 0) { flux_log_error (job->h, "exec_init: bulk_exec_aux_set"); goto err; } if (!(cmd = flux_cmd_create (0, NULL, environ))) { flux_log_error (job->h, "exec_init: flux_cmd_create"); goto err; } if (flux_cmd_setenvf (cmd, 1, "FLUX_KVS_NAMESPACE", "%s", job->ns) < 0) { flux_log_error (job->h, "exec_init: flux_cmd_setenvf"); goto err; } if (job->multiuser) { if (flux_cmd_argv_append (cmd, flux_imp_path) < 0 || flux_cmd_argv_append (cmd, "exec") < 0) { flux_log_error (job->h, "exec_init: flux_cmd_argv_append"); goto err; } } if (flux_cmd_argv_append (cmd, job_shell_path (job)) < 0 || flux_cmd_argv_appendf (cmd, "%ju", (uintmax_t) job->id) < 0) { flux_log_error (job->h, "exec_init: flux_cmd_argv_append"); goto err; } if (flux_cmd_setcwd (cmd, job_get_cwd (job)) < 0) { flux_log_error (job->h, "exec_init: flux_cmd_setcwd"); goto err; } if (bulk_exec_push_cmd (exec, ranks, cmd, 0) < 0) { flux_log_error (job->h, "exec_init: bulk_exec_push_cmd"); goto err; } flux_cmd_destroy (cmd); job->data = exec; return 1; err: flux_cmd_destroy (cmd); bulk_exec_destroy (exec); return -1; } static void exec_check_cb (flux_reactor_t *r, flux_watcher_t *w, int revents, void *arg) { struct jobinfo *job = arg; struct bulk_exec *exec = job->data; if (bulk_exec_current (exec) >= 1) { jobinfo_fatal_error (job, 0, "mock starting exception generated"); flux_log (job->h, LOG_DEBUG, "mock exception for starting job total=%d, current=%d", bulk_exec_total (exec), bulk_exec_current (exec)); flux_watcher_destroy (w); } } static int exec_start (struct jobinfo *job) { struct bulk_exec *exec = job->data; if (strcmp (exec_mock_exception (exec), "init") == 0) { /* If creating an "init" mock exception, generate it and * then return to simulate an exception that came in before * we could actually start the job */ jobinfo_fatal_error (job, 0, "mock init exception generated"); return 0; } else if (strcmp (exec_mock_exception (exec), "starting") == 0) { /* If we're going to mock an exception in "starting" phase, then * set up a check watcher to cancel the job when some shells have * started but (potentially) not all. */ flux_reactor_t *r = flux_get_reactor (job->h); flux_watcher_t *w = flux_check_watcher_create (r, exec_check_cb, job); if (w) flux_watcher_start (w); } return bulk_exec_start (job->h, exec); } static void exec_kill_cb (flux_future_t *f, void *arg) { struct jobinfo *job = arg; if (flux_future_get (f, NULL) < 0 && errno != ENOENT) flux_log_error (job->h, "%ju: exec_kill", (uintmax_t) job->id); jobinfo_decref (job); flux_future_destroy (f); } static int exec_kill (struct jobinfo *job, int signum) { struct bulk_exec *exec = job->data; flux_future_t *f; if (job->multiuser) f = bulk_exec_imp_kill (exec, flux_imp_path, signum); else f = bulk_exec_kill (exec, signum); if (!f) { if (errno != ENOENT) flux_log_error (job->h, "%ju: bulk_exec_kill", job->id); return 0; } flux_log (job->h, LOG_DEBUG, "exec_kill: %ju: signal %d", (uintmax_t) job->id, signum); jobinfo_incref (job); if (flux_future_then (f, 3., exec_kill_cb, job) < 0) { flux_log_error (job->h, "%ju: exec_kill: flux_future_then", job->id); flux_future_destroy (f); return -1; } return 0; } static int exec_cancel (struct jobinfo *job) { struct bulk_exec *exec = job->data; return bulk_exec_cancel (exec); } static int exec_cleanup (struct jobinfo *job, const struct idset *idset) { /* No epilog supported */ jobinfo_cleanup_complete (job, idset, 0); return 0; } static void exec_exit (struct jobinfo *job) { struct bulk_exec *exec = job->data; bulk_exec_destroy (exec); job->data = NULL; } /* Configure the exec module. * Read the default job shell path from config. Allow override on cmdline */ static int exec_config (flux_t *h, int argc, char **argv) { flux_conf_error_t err; /* Set default job shell path from builtin configuration, * allow override via configuration, then cmdline. */ default_job_shell = flux_conf_builtin_get ("shell_path", FLUX_CONF_AUTO); /* Check configuration for exec.job-shell */ if (flux_conf_unpack (flux_get_conf (h), &err, "{s?:{s?s}}", "exec", "job-shell", &default_job_shell) < 0) { flux_log (h, LOG_ERR, "error reading config value exec.job-shell: %s", err.errbuf); return -1; } /* Check configuration for exec.imp */ if (flux_conf_unpack (flux_get_conf (h), &err, "{s?:{s?s}}", "exec", "imp", &flux_imp_path) < 0) { flux_log (h, LOG_ERR, "error reading config value exec.imp: %s", err.errbuf); return -1; } /* Finally, override values on cmdline */ for (int i = 0; i < argc; i++) { if (strncmp (argv[i], "job-shell=", 10) == 0) default_job_shell = argv[i]+10; else if (strncmp (argv[i], "imp=", 4) == 0) flux_imp_path = argv[i]+4; } flux_log (h, LOG_DEBUG, "using default shell path %s", default_job_shell); if (flux_imp_path) flux_log (h, LOG_DEBUG, "using imp path %s", flux_imp_path); return 0; } struct exec_implementation bulkexec = { .name = "bulk-exec", .config = exec_config, .init = exec_init, .exit = exec_exit, .start = exec_start, .kill = exec_kill, .cancel = exec_cancel, .cleanup = exec_cleanup }; /* vi: ts=4 sw=4 expandtab */
1
29,582
Given the slightly vague discussion in basename(3) about POSIX basename (modifies arg) vs GNU (doesn't), I always assumed it was advisable to pass a string copy However, I guess you'd get a "discarding const" warning promoted to error by our build system here if you were getting the POSIX implementation. Hah! Cool, I probably can go remove some memory copies in other parts of the code where those conditions exist.
flux-framework-flux-core
c
@@ -81,12 +81,12 @@ public class MetricManagerTest { @Test public void managerEmitterHandlingTest() throws Exception { this.emitter.purgeAllData(); - final Date from = DateTime.now().minusMinutes(1).toDate(); + final Date from = DateTime.now().minusMinutes(10).toDate(); this.metric.notifyManager(); this.emitterWrapper.countDownLatch.await(10L, TimeUnit.SECONDS); - final Date to = DateTime.now().plusMinutes(1).toDate(); + final Date to = DateTime.now().plusMinutes(10).toDate(); final List<InMemoryHistoryNode> nodes = this.emitter.getMetrics("FakeMetric", from, to, false); assertEquals("Failed to report metric", 1, nodes.size());
1
package azkaban.metric; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import azkaban.metric.inmemoryemitter.InMemoryHistoryNode; import azkaban.metric.inmemoryemitter.InMemoryMetricEmitter; import azkaban.utils.Props; import java.util.Date; import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import org.joda.time.DateTime; import org.junit.Before; import org.junit.Test; /** * Azkaban Metric Manager Tests */ public class MetricManagerTest { MetricReportManager manager; FakeMetric metric; InMemoryMetricEmitter emitter; MetricEmitterWrapper emitterWrapper; @Before public void setUp() throws Exception { this.manager = MetricReportManager.getInstance(); this.metric = new FakeMetric(this.manager); this.manager.addMetric(this.metric); this.emitter = new InMemoryMetricEmitter(new Props()); this.emitterWrapper = new MetricEmitterWrapper(); this.manager.addMetricEmitter(this.emitterWrapper); } /** * Test enable disable and status methods */ @Test public void managerStatusTest() { assertNotNull("Singleton Failed to instantiate", this.manager); assertTrue("Failed to enable metric manager", MetricReportManager.isAvailable()); this.manager.disableManager(); assertFalse("Failed to disable metric manager", MetricReportManager.isAvailable()); this.manager.enableManager(); assertTrue("Failed to enable metric manager", MetricReportManager.isAvailable()); } /** * Test adding and accessing metric methods */ @Test public void managerMetricMaintenanceTest() { assertEquals("Failed to add metric", this.manager.getAllMetrics().size(), 1); assertTrue("Failed to add metric", this.manager.getAllMetrics().contains(this.metric)); assertEquals("Failed to get metric by Name", this.manager.getMetricFromName("FakeMetric"), this.metric); } /** * Test adding, removing and accessing metric emitter. */ @Test public void managerEmitterMaintenanceTest() { assertTrue("Failed to add Emitter", this.manager.getMetricEmitters().contains(this.emitterWrapper)); final int originalSize = this.manager.getMetricEmitters().size(); this.manager.removeMetricEmitter(this.emitterWrapper); assertEquals("Failed to remove emitter", this.manager.getMetricEmitters().size(), originalSize - 1); this.manager.addMetricEmitter(this.emitterWrapper); } /** * Test metric reporting methods, including InMemoryMetricEmitter methods */ @Test public void managerEmitterHandlingTest() throws Exception { this.emitter.purgeAllData(); final Date from = DateTime.now().minusMinutes(1).toDate(); this.metric.notifyManager(); this.emitterWrapper.countDownLatch.await(10L, TimeUnit.SECONDS); final Date to = DateTime.now().plusMinutes(1).toDate(); final List<InMemoryHistoryNode> nodes = this.emitter.getMetrics("FakeMetric", from, to, false); assertEquals("Failed to report metric", 1, nodes.size()); assertEquals("Failed to report metric", nodes.get(0).getValue(), 4); } private class MetricEmitterWrapper implements IMetricEmitter { private final CountDownLatch countDownLatch = new CountDownLatch(1); @Override public void reportMetric(final IMetric<?> metric) throws MetricException { MetricManagerTest.this.emitter.reportMetric(metric); this.countDownLatch.countDown(); } @Override public void purgeAllData() throws MetricException { MetricManagerTest.this.emitter.purgeAllData(); } } }
1
16,063
Do you know somehow that this was the culprit? Even 1 minute is a lot, so I would expect the bug to lie somewhere else.
azkaban-azkaban
java
@@ -146,7 +146,9 @@ void ActiveHostsMan::cleanExpiredHosts() { LOG(INFO) << "set " << data.size() << " expired hosts to offline in meta rocksdb"; kvstore_->asyncMultiPut(kDefaultSpaceId, kDefaultPartId, std::move(data), [] (kvstore::ResultCode code) { - CHECK_EQ(code, kvstore::ResultCode::SUCCEEDED); + if (code != kvstore::ResultCode::SUCCEEDED) { + LOG(WARNING) << "put failed, ret " << static_cast<int32_t>(code); + } }); } }
1
/* Copyright (c) 2019 vesoft inc. All rights reserved. * * This source code is licensed under Apache 2.0 License, * attached with Common Clause Condition 1.0, found in the LICENSES directory. */ #include "meta/ActiveHostsMan.h" #include "meta/MetaServiceUtils.h" #include "meta/processors/BaseProcessor.h" namespace nebula { namespace meta { ActiveHostsMan::ActiveHostsMan(int32_t intervalSeconds, int32_t expiredSeconds, kvstore::KVStore* kv) : intervalSeconds_(intervalSeconds) , expirationInSeconds_(expiredSeconds) { if (kv != nullptr) { kvstore_ = dynamic_cast<kvstore::NebulaStore*>(kv); } CHECK_GT(intervalSeconds, 0) << "intervalSeconds " << intervalSeconds << " should > 0!"; CHECK_GE(expiredSeconds, intervalSeconds) << "expiredSeconds " << expiredSeconds << " should >= intervalSeconds " << intervalSeconds; CHECK(checkThread_.start()); checkThread_.addTimerTask(intervalSeconds * 1000, intervalSeconds * 1000, &ActiveHostsMan::cleanExpiredHosts, this); } bool ActiveHostsMan::updateHostInfo(const HostAddr& hostAddr, const HostInfo& info) { std::vector<kvstore::KV> data; { folly::RWSpinLock::ReadHolder rh(&lock_); auto it = hostsMap_.find(hostAddr); if (it == hostsMap_.end()) { folly::RWSpinLock::UpgradedHolder uh(&lock_); hostsMap_.emplace(hostAddr, std::move(info)); data.emplace_back(MetaServiceUtils::hostKey(hostAddr.first, hostAddr.second), MetaServiceUtils::hostValOnline()); } else { it->second.lastHBTimeInSec_ = info.lastHBTimeInSec_; } } if (kvstore_ != nullptr && !data.empty()) { if (kvstore_->isLeader(kDefaultSpaceId, kDefaultPartId)) { folly::SharedMutex::WriteHolder wHolder(LockUtils::spaceLock()); folly::Baton<true, std::atomic> baton; bool succeeded = true; kvstore_->asyncMultiPut(kDefaultSpaceId, kDefaultPartId, std::move(data), [&] (kvstore::ResultCode code) { if (code != kvstore::ResultCode::SUCCEEDED) { succeeded = false; } baton.post(); }); baton.wait(); return succeeded; } else { return false; } } return true; } std::vector<HostAddr> ActiveHostsMan::getActiveHosts() { std::vector<HostAddr> hosts; folly::RWSpinLock::ReadHolder rh(&lock_); hosts.resize(hostsMap_.size()); std::transform(hostsMap_.begin(), hostsMap_.end(), hosts.begin(), [](const auto& entry) -> decltype(auto) { return entry.first; }); return hosts; } void ActiveHostsMan::loadHostMap() { if (kvstore_ == nullptr) { return; } const auto& prefix = MetaServiceUtils::hostPrefix(); std::unique_ptr<kvstore::KVIterator> iter; auto ret = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); if (ret != kvstore::ResultCode::SUCCEEDED) { return; } while (iter->valid()) { auto host = MetaServiceUtils::parseHostKey(iter->key()); HostInfo info; info.lastHBTimeInSec_ = time::WallClock::fastNowInSec(); if (iter->val() == MetaServiceUtils::hostValOnline()) { LOG(INFO) << "load host " << host.ip << ":" << host.port; updateHostInfo({host.ip, host.port}, info); } iter->next(); } } void ActiveHostsMan::cleanExpiredHosts() { int64_t now = time::WallClock::fastNowInSec(); std::vector<kvstore::KV> data; { folly::RWSpinLock::WriteHolder rh(&lock_); auto it = hostsMap_.begin(); while (it != hostsMap_.end()) { if ((now - it->second.lastHBTimeInSec_) > expirationInSeconds_) { LOG(INFO) << it->first << " expired! last hb time " << it->second.lastHBTimeInSec_; data.emplace_back(MetaServiceUtils::hostKey(it->first.first, it->first.second), MetaServiceUtils::hostValOffline()); it = hostsMap_.erase(it); } else { it++; } } } // merge host info from kvstore if (kvstore_ != nullptr) { const auto& prefix = MetaServiceUtils::hostPrefix(); std::unique_ptr<kvstore::KVIterator> iter; auto ret = kvstore_->prefix(kDefaultSpaceId, kDefaultPartId, prefix, &iter); if (ret == kvstore::ResultCode::SUCCEEDED) { while (iter->valid()) { auto host = MetaServiceUtils::parseHostKey(iter->key()); if (iter->val() == MetaServiceUtils::hostValOnline()) { folly::RWSpinLock::ReadHolder rh(&lock_); bool notFound = hostsMap_.find({host.ip, host.port}) == hostsMap_.end(); if (notFound) { data.emplace_back(MetaServiceUtils::hostKey(host.ip, host.port), MetaServiceUtils::hostValOffline()); } } iter->next(); } } } if (!data.empty() && kvstore_ != nullptr) { folly::SharedMutex::WriteHolder wHolder(LockUtils::spaceLock()); LOG(INFO) << "set " << data.size() << " expired hosts to offline in meta rocksdb"; kvstore_->asyncMultiPut(kDefaultSpaceId, kDefaultPartId, std::move(data), [] (kvstore::ResultCode code) { CHECK_EQ(code, kvstore::ResultCode::SUCCEEDED); }); } } } // namespace meta } // namespace nebula
1
19,487
what to do if some failed? just logging?
vesoft-inc-nebula
cpp
@@ -16,6 +16,19 @@ module Faker fetch('quote.famous_last_words') end + ## + # Produces a quote from Deep Thoughts by Jack Handey. + # + # @return [String] + # + # @example + # Faker::Quote.deep_thoughts # => "I hope life isn't a big joke, because I don't get it." + # + # @faker.version next + def deep_thoughts + fetch('quote.deep_thoughts') + end + ## # Produces a quote from Matz. #
1
# frozen_string_literal: true module Faker class Quote < Base class << self ## # Produces a famous last words quote. # # @return [String] # # @example # Faker::Quote.famous_last_words #=> "My vocabulary did this to me. Your love will let you go on..." # # @faker.version 1.9.0 def famous_last_words fetch('quote.famous_last_words') end ## # Produces a quote from Matz. # # @return [String] # # @example # Faker::Quote.matz #=> "You want to enjoy life, don't you? If you get your job done quickly and your job is fun, that's good isn't it? That's the purpose of life, partly. Your life is better." # # @faker.version 1.9.0 def matz fetch('quote.matz') end ## # Produces a quote about the most interesting man in the world. # # @return [String] # # @example # Faker::Quote.most_interesting_man_in_the_world #=> "He can speak Russian... in French" # # @faker.version 1.9.0 def most_interesting_man_in_the_world fetch('quote.most_interesting_man_in_the_world') end ## # Produces a Robin quote. # # @return [String] # # @example # Faker::Quote.robin #=> "Holy Razors Edge" # # @faker.version 1.9.0 def robin fetch('quote.robin') end ## # Produces a singular siegler quote. # # @return [String] # # @example # Faker::Quote.singular_siegler #=> "Texas!" # # @faker.version 1.9.0 def singular_siegler fetch('quote.singular_siegler') end ## # Produces a quote from Yoda. # # @return [String] # # @example # Faker::Quote.yoda #=> "Use your feelings, Obi-Wan, and find him you will." # # @faker.version 1.9.0 def yoda fetch('quote.yoda') end ## # Produces a quote from a fortune cookie. # # @return [String] # # @example # Faker::Quote.fortune_cookie #=> "This cookie senses that you are superstitious; it is an inclination that is bad for your mental health." # # @faker.version next def fortune_cookie fetch('quote.fortune_cookie') end end end end
1
10,169
since these thoughts are all Jack Handey's the generator should probably be called `jack_handey` to reflect that.
faker-ruby-faker
rb
@@ -288,6 +288,8 @@ type Container struct { Environment map[string]*string `locationName:"environment" type:"map"` + EnvironmentFiles []*EnvironmentFile `locationName:"environmentFiles" type:"list"` + Essential *bool `locationName:"essential" type:"boolean"` FirelensConfiguration *FirelensConfiguration `locationName:"firelensConfiguration" type:"structure"`
1
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may // not use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either // express or implied. See the License for the specific language governing // permissions and limitations under the License. // // Code generated by [agent/gogenerate/awssdk.go] DO NOT EDIT. package ecsacs import "github.com/aws/aws-sdk-go/aws/awsutil" type ASMAuthData struct { _ struct{} `type:"structure"` CredentialsParameter *string `locationName:"credentialsParameter" type:"string"` Region *string `locationName:"region" type:"string"` } // String returns the string representation func (s ASMAuthData) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ASMAuthData) GoString() string { return s.String() } type AccessDeniedException struct { _ struct{} `type:"structure"` Message_ *string `locationName:"message" type:"string"` } // String returns the string representation func (s AccessDeniedException) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s AccessDeniedException) GoString() string { return s.String() } type AckRequest struct { _ struct{} `type:"structure"` Cluster *string `locationName:"cluster" type:"string"` ContainerInstance *string `locationName:"containerInstance" type:"string"` MessageId *string `locationName:"messageId" type:"string"` } // String returns the string representation func (s AckRequest) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s AckRequest) GoString() string { return s.String() } type Association struct { _ struct{} `type:"structure"` Containers []*string `locationName:"containers" type:"list"` Content *EncodedString `locationName:"content" type:"structure"` Name *string `locationName:"name" type:"string"` Type *string `locationName:"type" type:"string" enum:"AssociationType"` } // String returns the string representation func (s Association) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Association) GoString() string { return s.String() } type AttachInstanceNetworkInterfacesInput struct { _ struct{} `type:"structure"` ClusterArn *string `locationName:"clusterArn" type:"string"` ContainerInstanceArn *string `locationName:"containerInstanceArn" type:"string"` ElasticNetworkInterfaces []*ElasticNetworkInterface `locationName:"elasticNetworkInterfaces" type:"list"` GeneratedAt *int64 `locationName:"generatedAt" type:"long"` MessageId *string `locationName:"messageId" type:"string"` WaitTimeoutMs *int64 `locationName:"waitTimeoutMs" type:"long"` } // String returns the string representation func (s AttachInstanceNetworkInterfacesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s AttachInstanceNetworkInterfacesInput) GoString() string { return s.String() } type AttachInstanceNetworkInterfacesMessage struct { _ struct{} `type:"structure"` ClusterArn *string `locationName:"clusterArn" type:"string"` ContainerInstanceArn *string `locationName:"containerInstanceArn" type:"string"` ElasticNetworkInterfaces []*ElasticNetworkInterface `locationName:"elasticNetworkInterfaces" type:"list"` GeneratedAt *int64 `locationName:"generatedAt" type:"long"` MessageId *string `locationName:"messageId" type:"string"` WaitTimeoutMs *int64 `locationName:"waitTimeoutMs" type:"long"` } // String returns the string representation func (s AttachInstanceNetworkInterfacesMessage) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s AttachInstanceNetworkInterfacesMessage) GoString() string { return s.String() } type AttachInstanceNetworkInterfacesOutput struct { _ struct{} `type:"structure"` Cluster *string `locationName:"cluster" type:"string"` ContainerInstance *string `locationName:"containerInstance" type:"string"` MessageId *string `locationName:"messageId" type:"string"` } // String returns the string representation func (s AttachInstanceNetworkInterfacesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s AttachInstanceNetworkInterfacesOutput) GoString() string { return s.String() } type AttachTaskNetworkInterfacesInput struct { _ struct{} `type:"structure"` ClusterArn *string `locationName:"clusterArn" type:"string"` ContainerInstanceArn *string `locationName:"containerInstanceArn" type:"string"` ElasticNetworkInterfaces []*ElasticNetworkInterface `locationName:"elasticNetworkInterfaces" type:"list"` GeneratedAt *int64 `locationName:"generatedAt" type:"long"` MessageId *string `locationName:"messageId" type:"string"` TaskArn *string `locationName:"taskArn" type:"string"` WaitTimeoutMs *int64 `locationName:"waitTimeoutMs" type:"long"` } // String returns the string representation func (s AttachTaskNetworkInterfacesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s AttachTaskNetworkInterfacesInput) GoString() string { return s.String() } type AttachTaskNetworkInterfacesMessage struct { _ struct{} `type:"structure"` ClusterArn *string `locationName:"clusterArn" type:"string"` ContainerInstanceArn *string `locationName:"containerInstanceArn" type:"string"` ElasticNetworkInterfaces []*ElasticNetworkInterface `locationName:"elasticNetworkInterfaces" type:"list"` GeneratedAt *int64 `locationName:"generatedAt" type:"long"` MessageId *string `locationName:"messageId" type:"string"` TaskArn *string `locationName:"taskArn" type:"string"` WaitTimeoutMs *int64 `locationName:"waitTimeoutMs" type:"long"` } // String returns the string representation func (s AttachTaskNetworkInterfacesMessage) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s AttachTaskNetworkInterfacesMessage) GoString() string { return s.String() } type AttachTaskNetworkInterfacesOutput struct { _ struct{} `type:"structure"` Cluster *string `locationName:"cluster" type:"string"` ContainerInstance *string `locationName:"containerInstance" type:"string"` MessageId *string `locationName:"messageId" type:"string"` } // String returns the string representation func (s AttachTaskNetworkInterfacesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s AttachTaskNetworkInterfacesOutput) GoString() string { return s.String() } type BadRequestException struct { _ struct{} `type:"structure"` Message_ *string `locationName:"message" type:"string"` } // String returns the string representation func (s BadRequestException) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s BadRequestException) GoString() string { return s.String() } type CloseMessage struct { _ struct{} `type:"structure"` Message *string `locationName:"message" type:"string"` } // String returns the string representation func (s CloseMessage) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CloseMessage) GoString() string { return s.String() } type Container struct { _ struct{} `type:"structure"` Command []*string `locationName:"command" type:"list"` Cpu *int64 `locationName:"cpu" type:"integer"` DependsOn []*ContainerDependency `locationName:"dependsOn" type:"list"` DockerConfig *DockerConfig `locationName:"dockerConfig" type:"structure"` EntryPoint []*string `locationName:"entryPoint" type:"list"` Environment map[string]*string `locationName:"environment" type:"map"` Essential *bool `locationName:"essential" type:"boolean"` FirelensConfiguration *FirelensConfiguration `locationName:"firelensConfiguration" type:"structure"` HealthCheckType *string `locationName:"healthCheckType" type:"string" enum:"HealthCheckType"` Image *string `locationName:"image" type:"string"` Links []*string `locationName:"links" type:"list"` LogsAuthStrategy *string `locationName:"logsAuthStrategy" type:"string" enum:"AuthStrategy"` Memory *int64 `locationName:"memory" type:"integer"` MountPoints []*MountPoint `locationName:"mountPoints" type:"list"` Name *string `locationName:"name" type:"string"` Overrides *string `locationName:"overrides" type:"string"` PortMappings []*PortMapping `locationName:"portMappings" type:"list"` RegistryAuthentication *RegistryAuthenticationData `locationName:"registryAuthentication" type:"structure"` Secrets []*Secret `locationName:"secrets" type:"list"` StartTimeout *int64 `locationName:"startTimeout" type:"integer"` StopTimeout *int64 `locationName:"stopTimeout" type:"integer"` VolumesFrom []*VolumeFrom `locationName:"volumesFrom" type:"list"` } // String returns the string representation func (s Container) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Container) GoString() string { return s.String() } type ContainerDependency struct { _ struct{} `type:"structure"` Condition *string `locationName:"condition" type:"string" enum:"ContainerCondition"` ContainerName *string `locationName:"containerName" type:"string"` } // String returns the string representation func (s ContainerDependency) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ContainerDependency) GoString() string { return s.String() } type DockerConfig struct { _ struct{} `type:"structure"` Config *string `locationName:"config" type:"string"` HostConfig *string `locationName:"hostConfig" type:"string"` Version *string `locationName:"version" type:"string"` } // String returns the string representation func (s DockerConfig) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DockerConfig) GoString() string { return s.String() } type DockerVolumeConfiguration struct { _ struct{} `type:"structure"` Autoprovision *bool `locationName:"autoprovision" type:"boolean"` Driver *string `locationName:"driver" type:"string"` DriverOpts map[string]*string `locationName:"driverOpts" type:"map"` Labels map[string]*string `locationName:"labels" type:"map"` Scope *string `locationName:"scope" type:"string" enum:"Scope"` } // String returns the string representation func (s DockerVolumeConfiguration) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DockerVolumeConfiguration) GoString() string { return s.String() } type ECRAuthData struct { _ struct{} `type:"structure"` EndpointOverride *string `locationName:"endpointOverride" type:"string"` Region *string `locationName:"region" type:"string"` RegistryId *string `locationName:"registryId" type:"string"` UseExecutionRole *bool `locationName:"useExecutionRole" type:"boolean"` } // String returns the string representation func (s ECRAuthData) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ECRAuthData) GoString() string { return s.String() } type EFSVolumeConfiguration struct { _ struct{} `type:"structure"` FileSystemId *string `locationName:"fileSystemId" type:"string"` RootDirectory *string `locationName:"rootDirectory" type:"string"` } // String returns the string representation func (s EFSVolumeConfiguration) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s EFSVolumeConfiguration) GoString() string { return s.String() } type ElasticNetworkInterface struct { _ struct{} `type:"structure"` AttachmentArn *string `locationName:"attachmentArn" type:"string"` DomainName []*string `locationName:"domainName" type:"list"` DomainNameServers []*string `locationName:"domainNameServers" type:"list"` Ec2Id *string `locationName:"ec2Id" type:"string"` InterfaceAssociationProtocol *string `locationName:"interfaceAssociationProtocol" type:"string" enum:"NetworkInterfaceAssociationProtocol"` InterfaceVlanProperties *NetworkInterfaceVlanProperties `locationName:"interfaceVlanProperties" type:"structure"` Ipv4Addresses []*IPv4AddressAssignment `locationName:"ipv4Addresses" type:"list"` Ipv6Addresses []*IPv6AddressAssignment `locationName:"ipv6Addresses" type:"list"` MacAddress *string `locationName:"macAddress" type:"string"` PrivateDnsName *string `locationName:"privateDnsName" type:"string"` SubnetGatewayIpv4Address *string `locationName:"subnetGatewayIpv4Address" type:"string"` } // String returns the string representation func (s ElasticNetworkInterface) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ElasticNetworkInterface) GoString() string { return s.String() } type EncodedString struct { _ struct{} `type:"structure"` Encoding *string `locationName:"encoding" type:"string" enum:"Encoding"` Value *string `locationName:"value" type:"string"` } // String returns the string representation func (s EncodedString) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s EncodedString) GoString() string { return s.String() } type ErrorInput struct { _ struct{} `type:"structure"` Message *string `locationName:"message" type:"string"` } // String returns the string representation func (s ErrorInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ErrorInput) GoString() string { return s.String() } type ErrorMessage struct { _ struct{} `type:"structure"` Message *string `locationName:"message" type:"string"` } // String returns the string representation func (s ErrorMessage) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ErrorMessage) GoString() string { return s.String() } type ErrorOutput struct { _ struct{} `type:"structure"` } // String returns the string representation func (s ErrorOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ErrorOutput) GoString() string { return s.String() } type FirelensConfiguration struct { _ struct{} `type:"structure"` Options map[string]*string `locationName:"options" type:"map"` Type *string `locationName:"type" type:"string" enum:"FirelensConfigurationType"` } // String returns the string representation func (s FirelensConfiguration) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s FirelensConfiguration) GoString() string { return s.String() } type HeartbeatInput struct { _ struct{} `type:"structure"` Healthy *bool `locationName:"healthy" type:"boolean"` } // String returns the string representation func (s HeartbeatInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s HeartbeatInput) GoString() string { return s.String() } type HeartbeatMessage struct { _ struct{} `type:"structure"` Healthy *bool `locationName:"healthy" type:"boolean"` } // String returns the string representation func (s HeartbeatMessage) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s HeartbeatMessage) GoString() string { return s.String() } type HeartbeatOutput struct { _ struct{} `type:"structure"` } // String returns the string representation func (s HeartbeatOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s HeartbeatOutput) GoString() string { return s.String() } type HostVolumeProperties struct { _ struct{} `type:"structure"` SourcePath *string `locationName:"sourcePath" type:"string"` } // String returns the string representation func (s HostVolumeProperties) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s HostVolumeProperties) GoString() string { return s.String() } type IAMRoleCredentials struct { _ struct{} `type:"structure"` AccessKeyId *string `locationName:"accessKeyId" type:"string"` CredentialsId *string `locationName:"credentialsId" type:"string"` Expiration *string `locationName:"expiration" type:"string"` RoleArn *string `locationName:"roleArn" type:"string"` SecretAccessKey *string `locationName:"secretAccessKey" type:"string" sensitive:"true"` SessionToken *string `locationName:"sessionToken" type:"string"` } // String returns the string representation func (s IAMRoleCredentials) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s IAMRoleCredentials) GoString() string { return s.String() } type IAMRoleCredentialsAckRequest struct { _ struct{} `type:"structure"` CredentialsId *string `locationName:"credentialsId" type:"string"` Expiration *string `locationName:"expiration" type:"string"` MessageId *string `locationName:"messageId" type:"string"` } // String returns the string representation func (s IAMRoleCredentialsAckRequest) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s IAMRoleCredentialsAckRequest) GoString() string { return s.String() } type IAMRoleCredentialsMessage struct { _ struct{} `type:"structure"` MessageId *string `locationName:"messageId" type:"string"` RoleCredentials *IAMRoleCredentials `locationName:"roleCredentials" type:"structure"` RoleType *string `locationName:"roleType" type:"string" enum:"RoleType"` TaskArn *string `locationName:"taskArn" type:"string"` } // String returns the string representation func (s IAMRoleCredentialsMessage) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s IAMRoleCredentialsMessage) GoString() string { return s.String() } type IPv4AddressAssignment struct { _ struct{} `type:"structure"` Primary *bool `locationName:"primary" type:"boolean"` PrivateAddress *string `locationName:"privateAddress" type:"string"` } // String returns the string representation func (s IPv4AddressAssignment) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s IPv4AddressAssignment) GoString() string { return s.String() } type IPv6AddressAssignment struct { _ struct{} `type:"structure"` Address *string `locationName:"address" type:"string"` } // String returns the string representation func (s IPv6AddressAssignment) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s IPv6AddressAssignment) GoString() string { return s.String() } type InactiveInstanceException struct { _ struct{} `type:"structure"` Message_ *string `locationName:"message" type:"string"` } // String returns the string representation func (s InactiveInstanceException) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s InactiveInstanceException) GoString() string { return s.String() } type InvalidClusterException struct { _ struct{} `type:"structure"` Message_ *string `locationName:"message" type:"string"` } // String returns the string representation func (s InvalidClusterException) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s InvalidClusterException) GoString() string { return s.String() } type InvalidInstanceException struct { _ struct{} `type:"structure"` Message_ *string `locationName:"message" type:"string"` } // String returns the string representation func (s InvalidInstanceException) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s InvalidInstanceException) GoString() string { return s.String() } type MountPoint struct { _ struct{} `type:"structure"` ContainerPath *string `locationName:"containerPath" type:"string"` ReadOnly *bool `locationName:"readOnly" type:"boolean"` SourceVolume *string `locationName:"sourceVolume" type:"string"` } // String returns the string representation func (s MountPoint) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s MountPoint) GoString() string { return s.String() } type NackRequest struct { _ struct{} `type:"structure"` Cluster *string `locationName:"cluster" type:"string"` ContainerInstance *string `locationName:"containerInstance" type:"string"` MessageId *string `locationName:"messageId" type:"string"` Reason *string `locationName:"reason" type:"string"` } // String returns the string representation func (s NackRequest) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s NackRequest) GoString() string { return s.String() } type NetworkInterfaceVlanProperties struct { _ struct{} `type:"structure"` TrunkInterfaceMacAddress *string `locationName:"trunkInterfaceMacAddress" type:"string"` VlanId *string `locationName:"vlanId" type:"string"` } // String returns the string representation func (s NetworkInterfaceVlanProperties) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s NetworkInterfaceVlanProperties) GoString() string { return s.String() } type PayloadInput struct { _ struct{} `type:"structure"` ClusterArn *string `locationName:"clusterArn" type:"string"` ContainerInstanceArn *string `locationName:"containerInstanceArn" type:"string"` GeneratedAt *int64 `locationName:"generatedAt" type:"long"` MessageId *string `locationName:"messageId" type:"string"` SeqNum *int64 `locationName:"seqNum" type:"integer"` Tasks []*Task `locationName:"tasks" type:"list"` } // String returns the string representation func (s PayloadInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s PayloadInput) GoString() string { return s.String() } type PayloadMessage struct { _ struct{} `type:"structure"` ClusterArn *string `locationName:"clusterArn" type:"string"` ContainerInstanceArn *string `locationName:"containerInstanceArn" type:"string"` GeneratedAt *int64 `locationName:"generatedAt" type:"long"` MessageId *string `locationName:"messageId" type:"string"` SeqNum *int64 `locationName:"seqNum" type:"integer"` Tasks []*Task `locationName:"tasks" type:"list"` } // String returns the string representation func (s PayloadMessage) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s PayloadMessage) GoString() string { return s.String() } type PayloadOutput struct { _ struct{} `type:"structure"` Cluster *string `locationName:"cluster" type:"string"` ContainerInstance *string `locationName:"containerInstance" type:"string"` MessageId *string `locationName:"messageId" type:"string"` } // String returns the string representation func (s PayloadOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s PayloadOutput) GoString() string { return s.String() } type PerformUpdateInput struct { _ struct{} `type:"structure"` ClusterArn *string `locationName:"clusterArn" type:"string"` ContainerInstanceArn *string `locationName:"containerInstanceArn" type:"string"` MessageId *string `locationName:"messageId" type:"string"` UpdateInfo *UpdateInfo `locationName:"updateInfo" type:"structure"` } // String returns the string representation func (s PerformUpdateInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s PerformUpdateInput) GoString() string { return s.String() } type PerformUpdateMessage struct { _ struct{} `type:"structure"` ClusterArn *string `locationName:"clusterArn" type:"string"` ContainerInstanceArn *string `locationName:"containerInstanceArn" type:"string"` MessageId *string `locationName:"messageId" type:"string"` UpdateInfo *UpdateInfo `locationName:"updateInfo" type:"structure"` } // String returns the string representation func (s PerformUpdateMessage) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s PerformUpdateMessage) GoString() string { return s.String() } type PerformUpdateOutput struct { _ struct{} `type:"structure"` Cluster *string `locationName:"cluster" type:"string"` ContainerInstance *string `locationName:"containerInstance" type:"string"` MessageId *string `locationName:"messageId" type:"string"` } // String returns the string representation func (s PerformUpdateOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s PerformUpdateOutput) GoString() string { return s.String() } type PollInput struct { _ struct{} `type:"structure"` Cluster *string `locationName:"cluster" type:"string"` ContainerInstance *string `locationName:"containerInstance" type:"string"` SendCredentials *bool `locationName:"sendCredentials" type:"boolean"` SeqNum *int64 `locationName:"seqNum" type:"integer"` VersionInfo *VersionInfo `locationName:"versionInfo" type:"structure"` } // String returns the string representation func (s PollInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s PollInput) GoString() string { return s.String() } type PollOutput struct { _ struct{} `type:"structure"` Message *string `locationName:"message" type:"string"` } // String returns the string representation func (s PollOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s PollOutput) GoString() string { return s.String() } type PollRequest struct { _ struct{} `type:"structure"` Cluster *string `locationName:"cluster" type:"string"` ContainerInstance *string `locationName:"containerInstance" type:"string"` SendCredentials *bool `locationName:"sendCredentials" type:"boolean"` SeqNum *int64 `locationName:"seqNum" type:"integer"` VersionInfo *VersionInfo `locationName:"versionInfo" type:"structure"` } // String returns the string representation func (s PollRequest) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s PollRequest) GoString() string { return s.String() } type PortMapping struct { _ struct{} `type:"structure"` ContainerPort *int64 `locationName:"containerPort" type:"integer"` HostPort *int64 `locationName:"hostPort" type:"integer"` Protocol *string `locationName:"protocol" type:"string" enum:"TransportProtocol"` } // String returns the string representation func (s PortMapping) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s PortMapping) GoString() string { return s.String() } type ProxyConfiguration struct { _ struct{} `type:"structure"` ContainerName *string `locationName:"containerName" type:"string"` Properties map[string]*string `locationName:"properties" type:"map"` Type *string `locationName:"type" type:"string" enum:"ProxyConfigurationType"` } // String returns the string representation func (s ProxyConfiguration) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ProxyConfiguration) GoString() string { return s.String() } type RefreshTaskIAMRoleCredentialsInput struct { _ struct{} `type:"structure"` MessageId *string `locationName:"messageId" type:"string"` RoleCredentials *IAMRoleCredentials `locationName:"roleCredentials" type:"structure"` RoleType *string `locationName:"roleType" type:"string" enum:"RoleType"` TaskArn *string `locationName:"taskArn" type:"string"` } // String returns the string representation func (s RefreshTaskIAMRoleCredentialsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s RefreshTaskIAMRoleCredentialsInput) GoString() string { return s.String() } type RefreshTaskIAMRoleCredentialsOutput struct { _ struct{} `type:"structure"` CredentialsId *string `locationName:"credentialsId" type:"string"` Expiration *string `locationName:"expiration" type:"string"` MessageId *string `locationName:"messageId" type:"string"` } // String returns the string representation func (s RefreshTaskIAMRoleCredentialsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s RefreshTaskIAMRoleCredentialsOutput) GoString() string { return s.String() } type RegistryAuthenticationData struct { _ struct{} `type:"structure"` AsmAuthData *ASMAuthData `locationName:"asmAuthData" type:"structure"` EcrAuthData *ECRAuthData `locationName:"ecrAuthData" type:"structure"` Type *string `locationName:"type" type:"string" enum:"AuthenticationType"` } // String returns the string representation func (s RegistryAuthenticationData) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s RegistryAuthenticationData) GoString() string { return s.String() } type Secret struct { _ struct{} `type:"structure"` ContainerPath *string `locationName:"containerPath" type:"string"` Name *string `locationName:"name" type:"string"` Provider *string `locationName:"provider" type:"string" enum:"SecretProvider"` Region *string `locationName:"region" type:"string"` Target *string `locationName:"target" type:"string" enum:"SecretTarget"` Type *string `locationName:"type" type:"string" enum:"SecretType"` ValueFrom *string `locationName:"valueFrom" type:"string"` } // String returns the string representation func (s Secret) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Secret) GoString() string { return s.String() } type ServerException struct { _ struct{} `type:"structure"` Message_ *string `locationName:"message" type:"string"` } // String returns the string representation func (s ServerException) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ServerException) GoString() string { return s.String() } type StageUpdateInput struct { _ struct{} `type:"structure"` ClusterArn *string `locationName:"clusterArn" type:"string"` ContainerInstanceArn *string `locationName:"containerInstanceArn" type:"string"` MessageId *string `locationName:"messageId" type:"string"` UpdateInfo *UpdateInfo `locationName:"updateInfo" type:"structure"` } // String returns the string representation func (s StageUpdateInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s StageUpdateInput) GoString() string { return s.String() } type StageUpdateMessage struct { _ struct{} `type:"structure"` ClusterArn *string `locationName:"clusterArn" type:"string"` ContainerInstanceArn *string `locationName:"containerInstanceArn" type:"string"` MessageId *string `locationName:"messageId" type:"string"` UpdateInfo *UpdateInfo `locationName:"updateInfo" type:"structure"` } // String returns the string representation func (s StageUpdateMessage) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s StageUpdateMessage) GoString() string { return s.String() } type StageUpdateOutput struct { _ struct{} `type:"structure"` Cluster *string `locationName:"cluster" type:"string"` ContainerInstance *string `locationName:"containerInstance" type:"string"` MessageId *string `locationName:"messageId" type:"string"` } // String returns the string representation func (s StageUpdateOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s StageUpdateOutput) GoString() string { return s.String() } type Task struct { _ struct{} `type:"structure"` Arn *string `locationName:"arn" type:"string"` Associations []*Association `locationName:"associations" type:"list"` Containers []*Container `locationName:"containers" type:"list"` Cpu *float64 `locationName:"cpu" type:"double"` DesiredStatus *string `locationName:"desiredStatus" type:"string"` ElasticNetworkInterfaces []*ElasticNetworkInterface `locationName:"elasticNetworkInterfaces" type:"list"` ExecutionRoleCredentials *IAMRoleCredentials `locationName:"executionRoleCredentials" type:"structure"` Family *string `locationName:"family" type:"string"` IpcMode *string `locationName:"ipcMode" type:"string"` Memory *int64 `locationName:"memory" type:"integer"` Overrides *string `locationName:"overrides" type:"string"` PidMode *string `locationName:"pidMode" type:"string"` ProxyConfiguration *ProxyConfiguration `locationName:"proxyConfiguration" type:"structure"` RoleCredentials *IAMRoleCredentials `locationName:"roleCredentials" type:"structure"` TaskDefinitionAccountId *string `locationName:"taskDefinitionAccountId" type:"string"` Version *string `locationName:"version" type:"string"` Volumes []*Volume `locationName:"volumes" type:"list"` } // String returns the string representation func (s Task) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Task) GoString() string { return s.String() } type TaskIdentifier struct { _ struct{} `type:"structure"` DesiredStatus *string `locationName:"desiredStatus" type:"string"` TaskArn *string `locationName:"taskArn" type:"string"` TaskClusterArn *string `locationName:"taskClusterArn" type:"string"` } // String returns the string representation func (s TaskIdentifier) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s TaskIdentifier) GoString() string { return s.String() } type TaskManifestMessage struct { _ struct{} `type:"structure"` ClusterArn *string `locationName:"clusterArn" type:"string"` ContainerInstanceArn *string `locationName:"containerInstanceArn" type:"string"` GeneratedAt *int64 `locationName:"generatedAt" type:"long"` MessageId *string `locationName:"messageId" type:"string"` Tasks []*TaskIdentifier `locationName:"tasks" type:"list"` Timeline *int64 `locationName:"timeline" type:"long"` } // String returns the string representation func (s TaskManifestMessage) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s TaskManifestMessage) GoString() string { return s.String() } type TaskStopVerificationAck struct { _ struct{} `type:"structure"` GeneratedAt *int64 `locationName:"generatedAt" type:"long"` MessageId *string `locationName:"messageId" type:"string"` StopTasks []*TaskIdentifier `locationName:"stopTasks" type:"list"` } // String returns the string representation func (s TaskStopVerificationAck) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s TaskStopVerificationAck) GoString() string { return s.String() } type TaskStopVerificationMessage struct { _ struct{} `type:"structure"` MessageId *string `locationName:"messageId" type:"string"` StopCandidates []*TaskIdentifier `locationName:"stopCandidates" type:"list"` } // String returns the string representation func (s TaskStopVerificationMessage) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s TaskStopVerificationMessage) GoString() string { return s.String() } type UpdateFailureInput struct { _ struct{} `type:"structure"` Cluster *string `locationName:"cluster" type:"string"` ContainerInstance *string `locationName:"containerInstance" type:"string"` MessageId *string `locationName:"messageId" type:"string"` Reason *string `locationName:"reason" type:"string"` } // String returns the string representation func (s UpdateFailureInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s UpdateFailureInput) GoString() string { return s.String() } type UpdateFailureOutput struct { _ struct{} `type:"structure"` } // String returns the string representation func (s UpdateFailureOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s UpdateFailureOutput) GoString() string { return s.String() } type UpdateInfo struct { _ struct{} `type:"structure"` Location *string `locationName:"location" type:"string"` Signature *string `locationName:"signature" type:"string"` } // String returns the string representation func (s UpdateInfo) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s UpdateInfo) GoString() string { return s.String() } type VersionInfo struct { _ struct{} `type:"structure"` AgentHash *string `locationName:"agentHash" type:"string"` AgentVersion *string `locationName:"agentVersion" type:"string"` DockerVersion *string `locationName:"dockerVersion" type:"string"` } // String returns the string representation func (s VersionInfo) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s VersionInfo) GoString() string { return s.String() } type Volume struct { _ struct{} `type:"structure"` DockerVolumeConfiguration *DockerVolumeConfiguration `locationName:"dockerVolumeConfiguration" type:"structure"` EfsVolumeConfiguration *EFSVolumeConfiguration `locationName:"efsVolumeConfiguration" type:"structure"` Host *HostVolumeProperties `locationName:"host" type:"structure"` Name *string `locationName:"name" type:"string"` Type *string `locationName:"type" type:"string" enum:"VolumeType"` } // String returns the string representation func (s Volume) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Volume) GoString() string { return s.String() } type VolumeFrom struct { _ struct{} `type:"structure"` ReadOnly *bool `locationName:"readOnly" type:"boolean"` SourceContainer *string `locationName:"sourceContainer" type:"string"` } // String returns the string representation func (s VolumeFrom) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s VolumeFrom) GoString() string { return s.String() }
1
24,095
this file is autogenerated. you should edit `model/api/api-2.json` and then go generate this file. otherwise the next people generating api.go will remove the changes you added here
aws-amazon-ecs-agent
go
@@ -0,0 +1,8 @@ +class ProgressBarsController < ApplicationController + + def show + _trail = Trail.find(params[:trail_id]) + @trail = TrailWithProgress.new(_trail, user: current_user) + render layout: false + end +end
1
1
14,575
Extra empty line detected at class body beginning.
thoughtbot-upcase
rb
@@ -934,7 +934,9 @@ public class ExecutorManager extends EventHandler implements @Override public String submitExecutableFlow(ExecutableFlow exflow, String userId) throws ExecutorManagerException { - synchronized (exflow) { + + String exFlowKey = exflow.getProjectName() + "." + exflow.getId() + ".submitFlow"; + synchronized (exFlowKey.intern()) { String flowId = exflow.getFlowId(); logger.info("Submitting execution flow " + flowId + " by " + userId);
1
/* * Copyright 2014 LinkedIn Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package azkaban.executor; import azkaban.metrics.CommonMetrics; import azkaban.constants.ServerProperties; import azkaban.utils.FlowUtils; import java.io.File; import java.io.IOException; import java.lang.Thread.State; import java.net.URI; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.TreeMap; import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import org.apache.commons.lang.StringUtils; import org.apache.log4j.Logger; import org.joda.time.DateTime; import azkaban.alert.Alerter; import azkaban.event.Event; import azkaban.event.Event.Type; import azkaban.event.EventData; import azkaban.event.EventHandler; import azkaban.executor.selector.ExecutorComparator; import azkaban.executor.selector.ExecutorFilter; import azkaban.executor.selector.ExecutorSelector; import azkaban.project.Project; import azkaban.project.ProjectWhitelist; import azkaban.scheduler.ScheduleStatisticManager; import azkaban.utils.FileIOUtils.JobMetaData; import azkaban.utils.FileIOUtils.LogData; import azkaban.utils.JSONUtils; import azkaban.utils.Pair; import azkaban.utils.Props; /** * Executor manager used to manage the client side job. * */ public class ExecutorManager extends EventHandler implements ExecutorManagerAdapter { static final String AZKABAN_EXECUTOR_SELECTOR_FILTERS = "azkaban.executorselector.filters"; static final String AZKABAN_EXECUTOR_SELECTOR_COMPARATOR_PREFIX = "azkaban.executorselector.comparator."; static final String AZKABAN_QUEUEPROCESSING_ENABLED = "azkaban.queueprocessing.enabled"; static final String AZKABAN_USE_MULTIPLE_EXECUTORS = "azkaban.use.multiple.executors"; private static final String AZKABAN_WEBSERVER_QUEUE_SIZE = "azkaban.webserver.queue.size"; private static final String AZKABAN_ACTIVE_EXECUTOR_REFRESH_IN_MS = "azkaban.activeexecutor.refresh.milisecinterval"; private static final String AZKABAN_ACTIVE_EXECUTOR_REFRESH_IN_NUM_FLOW = "azkaban.activeexecutor.refresh.flowinterval"; private static final String AZKABAN_EXECUTORINFO_REFRESH_MAX_THREADS = "azkaban.executorinfo.refresh.maxThreads"; private static final String AZKABAN_MAX_DISPATCHING_ERRORS_PERMITTED = "azkaban.maxDispatchingErrors"; private static Logger logger = Logger.getLogger(ExecutorManager.class); private ExecutorLoader executorLoader; private CleanerThread cleanerThread; private ConcurrentHashMap<Integer, Pair<ExecutionReference, ExecutableFlow>> runningFlows = new ConcurrentHashMap<Integer, Pair<ExecutionReference, ExecutableFlow>>(); private ConcurrentHashMap<Integer, ExecutableFlow> recentlyFinished = new ConcurrentHashMap<Integer, ExecutableFlow>(); QueuedExecutions queuedFlows; final private Set<Executor> activeExecutors = new HashSet<Executor>(); private QueueProcessorThread queueProcessor; private ExecutingManagerUpdaterThread executingManager; // 12 weeks private static final long DEFAULT_EXECUTION_LOGS_RETENTION_MS = 3 * 4 * 7 * 24 * 60 * 60 * 1000L; private long lastCleanerThreadCheckTime = -1; private long lastThreadCheckTime = -1; private String updaterStage = "not started"; private Map<String, Alerter> alerters; File cacheDir; private final Props azkProps; private List<String> filterList; private Map<String, Integer> comparatorWeightsMap; private long lastSuccessfulExecutorInfoRefresh; private ExecutorService executorInforRefresherService; public ExecutorManager(Props azkProps, ExecutorLoader loader, Map<String, Alerter> alerters) throws ExecutorManagerException { this.alerters = alerters; this.azkProps = azkProps; this.executorLoader = loader; this.setupExecutors(); this.loadRunningFlows(); queuedFlows = new QueuedExecutions(azkProps.getLong(AZKABAN_WEBSERVER_QUEUE_SIZE, 100000)); this.loadQueuedFlows(); cacheDir = new File(azkProps.getString("cache.directory", "cache")); executingManager = new ExecutingManagerUpdaterThread(); executingManager.start(); if(isMultiExecutorMode()) { setupMultiExecutorMode(); } long executionLogsRetentionMs = azkProps.getLong("execution.logs.retention.ms", DEFAULT_EXECUTION_LOGS_RETENTION_MS); cleanerThread = new CleanerThread(executionLogsRetentionMs); cleanerThread.start(); } private void setupMultiExecutorMode() { // initliatize hard filters for executor selector from azkaban.properties String filters = azkProps.getString(AZKABAN_EXECUTOR_SELECTOR_FILTERS, ""); if (filters != null) { filterList = Arrays.asList(StringUtils.split(filters, ",")); } // initliatize comparator feature weights for executor selector from // azkaban.properties Map<String, String> compListStrings = azkProps.getMapByPrefix(AZKABAN_EXECUTOR_SELECTOR_COMPARATOR_PREFIX); if (compListStrings != null) { comparatorWeightsMap = new TreeMap<String, Integer>(); for (Map.Entry<String, String> entry : compListStrings.entrySet()) { comparatorWeightsMap.put(entry.getKey(), Integer.valueOf(entry.getValue())); } } executorInforRefresherService = Executors.newFixedThreadPool(azkProps.getInt( AZKABAN_EXECUTORINFO_REFRESH_MAX_THREADS, 5)); // configure queue processor queueProcessor = new QueueProcessorThread(azkProps.getBoolean( AZKABAN_QUEUEPROCESSING_ENABLED, true), azkProps.getLong( AZKABAN_ACTIVE_EXECUTOR_REFRESH_IN_MS, 50000), azkProps.getInt( AZKABAN_ACTIVE_EXECUTOR_REFRESH_IN_NUM_FLOW, 5), azkProps.getInt( AZKABAN_MAX_DISPATCHING_ERRORS_PERMITTED, activeExecutors.size())); queueProcessor.start(); } /** * * {@inheritDoc} * @see azkaban.executor.ExecutorManagerAdapter#setupExecutors() */ @Override public void setupExecutors() throws ExecutorManagerException { Set<Executor> newExecutors = new HashSet<Executor>(); if (isMultiExecutorMode()) { logger.info("Initializing multi executors from database"); newExecutors.addAll(executorLoader.fetchActiveExecutors()); } else if (azkProps.containsKey("executor.port")) { // Add local executor, if specified as per properties String executorHost = azkProps.getString(ServerProperties.EXECUTOR_HOST, "localhost"); int executorPort = azkProps.getInt("executor.port"); logger.info(String.format("Initializing local executor %s:%d", executorHost, executorPort)); Executor executor = executorLoader.fetchExecutor(executorHost, executorPort); if (executor == null) { executor = executorLoader.addExecutor(executorHost, executorPort); } else if (!executor.isActive()) { executor.setActive(true); executorLoader.updateExecutor(executor); } newExecutors.add(new Executor(executor.getId(), executorHost, executorPort, true)); } if (newExecutors.isEmpty()) { logger.error("No active executor found"); throw new ExecutorManagerException("No active executor found"); } else if(newExecutors.size() > 1 && !isMultiExecutorMode()) { logger.error("Multiple local executors specified"); throw new ExecutorManagerException("Multiple local executors specified"); } else { // clear all active executors, only if we have at least one new active // executors activeExecutors.clear(); activeExecutors.addAll(newExecutors); } } private boolean isMultiExecutorMode() { return azkProps.getBoolean(AZKABAN_USE_MULTIPLE_EXECUTORS, false); } /** * Refresh Executor stats for all the actie executors in this executorManager */ private void refreshExecutors() { synchronized (activeExecutors) { List<Pair<Executor, Future<String>>> futures = new ArrayList<Pair<Executor, Future<String>>>(); for (final Executor executor : activeExecutors) { // execute each executorInfo refresh task to fetch Future<String> fetchExecutionInfo = executorInforRefresherService.submit(new Callable<String>() { @Override public String call() throws Exception { return callExecutorForJsonString(executor.getHost(), executor.getPort(), "/serverStatistics", null); } }); futures.add(new Pair<Executor, Future<String>>(executor, fetchExecutionInfo)); } boolean wasSuccess = true; for (Pair<Executor, Future<String>> refreshPair : futures) { Executor executor = refreshPair.getFirst(); executor.setExecutorInfo(null); // invalidate cached EXecutorInfo try { // max 5 secs String jsonString = refreshPair.getSecond().get(5, TimeUnit.SECONDS); executor.setExecutorInfo(ExecutorInfo.fromJSONString(jsonString)); logger.info(String.format( "Successfully refreshed executor: %s with executor info : %s", executor, jsonString)); } catch (TimeoutException e) { wasSuccess = false; logger.error("Timed out while waiting for ExecutorInfo refresh" + executor, e); } catch (Exception e) { wasSuccess = false; logger.error("Failed to update ExecutorInfo for executor : " + executor, e); } } // update is successful for all executors if (wasSuccess) { lastSuccessfulExecutorInfoRefresh = System.currentTimeMillis(); } } } /** * Throws exception if running in local mode * {@inheritDoc} * @see azkaban.executor.ExecutorManagerAdapter#disableQueueProcessorThread() */ @Override public void disableQueueProcessorThread() throws ExecutorManagerException { if (isMultiExecutorMode()) { queueProcessor.setActive(false); } else { throw new ExecutorManagerException( "Cannot disable QueueProcessor in local mode"); } } /** * Throws exception if running in local mode * {@inheritDoc} * @see azkaban.executor.ExecutorManagerAdapter#enableQueueProcessorThread() */ @Override public void enableQueueProcessorThread() throws ExecutorManagerException { if (isMultiExecutorMode()) { queueProcessor.setActive(true); } else { throw new ExecutorManagerException( "Cannot enable QueueProcessor in local mode"); } } public State getQueueProcessorThreadState() { if (isMultiExecutorMode()) return queueProcessor.getState(); else return State.NEW; // not started in local mode } /** * Returns state of QueueProcessor False, no flow is being dispatched True , * flows are being dispatched as expected * * @return */ public boolean isQueueProcessorThreadActive() { if (isMultiExecutorMode()) return queueProcessor.isActive(); else return false; } /** * Return last Successful ExecutorInfo Refresh for all active executors * * @return */ public long getLastSuccessfulExecutorInfoRefresh() { return this.lastSuccessfulExecutorInfoRefresh; } /** * Get currently supported Comparators available to use via azkaban.properties * * @return */ public Set<String> getAvailableExecutorComparatorNames() { return ExecutorComparator.getAvailableComparatorNames(); } /** * Get currently supported filters available to use via azkaban.properties * * @return */ public Set<String> getAvailableExecutorFilterNames() { return ExecutorFilter.getAvailableFilterNames(); } @Override public State getExecutorManagerThreadState() { return executingManager.getState(); } public String getExecutorThreadStage() { return updaterStage; } @Override public boolean isExecutorManagerThreadActive() { return executingManager.isAlive(); } @Override public long getLastExecutorManagerThreadCheckTime() { return lastThreadCheckTime; } public long getLastCleanerThreadCheckTime() { return this.lastCleanerThreadCheckTime; } @Override public Collection<Executor> getAllActiveExecutors() { return Collections.unmodifiableCollection(activeExecutors); } /** * * {@inheritDoc} * * @see azkaban.executor.ExecutorManagerAdapter#fetchExecutor(int) */ @Override public Executor fetchExecutor(int executorId) throws ExecutorManagerException { for (Executor executor : activeExecutors) { if (executor.getId() == executorId) { return executor; } } return executorLoader.fetchExecutor(executorId); } @Override public Set<String> getPrimaryServerHosts() { // Only one for now. More probably later. HashSet<String> ports = new HashSet<String>(); for (Executor executor : activeExecutors) { ports.add(executor.getHost() + ":" + executor.getPort()); } return ports; } @Override public Set<String> getAllActiveExecutorServerHosts() { // Includes non primary server/hosts HashSet<String> ports = new HashSet<String>(); for (Executor executor : activeExecutors) { ports.add(executor.getHost() + ":" + executor.getPort()); } // include executor which were initially active and still has flows running for (Pair<ExecutionReference, ExecutableFlow> running : runningFlows .values()) { ExecutionReference ref = running.getFirst(); ports.add(ref.getHost() + ":" + ref.getPort()); } return ports; } private void loadRunningFlows() throws ExecutorManagerException { runningFlows.putAll(executorLoader.fetchActiveFlows()); } /* * load queued flows i.e with active_execution_reference and not assigned to * any executor */ private void loadQueuedFlows() throws ExecutorManagerException { List<Pair<ExecutionReference, ExecutableFlow>> retrievedExecutions = executorLoader.fetchQueuedFlows(); if (retrievedExecutions != null) { for (Pair<ExecutionReference, ExecutableFlow> pair : retrievedExecutions) { queuedFlows.enqueue(pair.getSecond(), pair.getFirst()); } } } /** * Gets a list of all the active (running flows and non-dispatched flows) * executions for a given project and flow {@inheritDoc}. Results should * be sorted as we assume this while setting up pipelined execution Id. * * @see azkaban.executor.ExecutorManagerAdapter#getRunningFlows(int, * java.lang.String) */ @Override public List<Integer> getRunningFlows(int projectId, String flowId) { List<Integer> executionIds = new ArrayList<Integer>(); executionIds.addAll(getRunningFlowsHelper(projectId, flowId, queuedFlows.getAllEntries())); executionIds.addAll(getRunningFlowsHelper(projectId, flowId, runningFlows.values())); Collections.sort(executionIds); return executionIds; } /* Helper method for getRunningFlows */ private List<Integer> getRunningFlowsHelper(int projectId, String flowId, Collection<Pair<ExecutionReference, ExecutableFlow>> collection) { List<Integer> executionIds = new ArrayList<Integer>(); for (Pair<ExecutionReference, ExecutableFlow> ref : collection) { if (ref.getSecond().getFlowId().equals(flowId) && ref.getSecond().getProjectId() == projectId) { executionIds.add(ref.getFirst().getExecId()); } } return executionIds; } /** * * {@inheritDoc} * * @see azkaban.executor.ExecutorManagerAdapter#getActiveFlowsWithExecutor() */ @Override public List<Pair<ExecutableFlow, Executor>> getActiveFlowsWithExecutor() throws IOException { List<Pair<ExecutableFlow, Executor>> flows = new ArrayList<Pair<ExecutableFlow, Executor>>(); getActiveFlowsWithExecutorHelper(flows, queuedFlows.getAllEntries()); getActiveFlowsWithExecutorHelper(flows, runningFlows.values()); return flows; } /* Helper method for getActiveFlowsWithExecutor */ private void getActiveFlowsWithExecutorHelper( List<Pair<ExecutableFlow, Executor>> flows, Collection<Pair<ExecutionReference, ExecutableFlow>> collection) { for (Pair<ExecutionReference, ExecutableFlow> ref : collection) { flows.add(new Pair<ExecutableFlow, Executor>(ref.getSecond(), ref .getFirst().getExecutor())); } } /** * Checks whether the given flow has an active (running, non-dispatched) * executions {@inheritDoc} * * @see azkaban.executor.ExecutorManagerAdapter#isFlowRunning(int, * java.lang.String) */ @Override public boolean isFlowRunning(int projectId, String flowId) { boolean isRunning = false; isRunning = isRunning || isFlowRunningHelper(projectId, flowId, queuedFlows.getAllEntries()); isRunning = isRunning || isFlowRunningHelper(projectId, flowId, runningFlows.values()); return isRunning; } /* Search a running flow in a collection */ private boolean isFlowRunningHelper(int projectId, String flowId, Collection<Pair<ExecutionReference, ExecutableFlow>> collection) { for (Pair<ExecutionReference, ExecutableFlow> ref : collection) { if (ref.getSecond().getProjectId() == projectId && ref.getSecond().getFlowId().equals(flowId)) { return true; } } return false; } /** * Fetch ExecutableFlow from database {@inheritDoc} * * @see azkaban.executor.ExecutorManagerAdapter#getExecutableFlow(int) */ @Override public ExecutableFlow getExecutableFlow(int execId) throws ExecutorManagerException { return executorLoader.fetchExecutableFlow(execId); } /** * Get all active (running, non-dispatched) flows * * {@inheritDoc} * * @see azkaban.executor.ExecutorManagerAdapter#getRunningFlows() */ @Override public List<ExecutableFlow> getRunningFlows() { ArrayList<ExecutableFlow> flows = new ArrayList<ExecutableFlow>(); getActiveFlowHelper(flows, queuedFlows.getAllEntries()); getActiveFlowHelper(flows, runningFlows.values()); return flows; } /* * Helper method to get all running flows from a Pair<ExecutionReference, * ExecutableFlow collection */ private void getActiveFlowHelper(ArrayList<ExecutableFlow> flows, Collection<Pair<ExecutionReference, ExecutableFlow>> collection) { for (Pair<ExecutionReference, ExecutableFlow> ref : collection) { flows.add(ref.getSecond()); } } /** * Get execution Ids of all active (running, non-dispatched) flows * * {@inheritDoc} * * @see azkaban.executor.ExecutorManagerAdapter#getRunningFlows() */ public String getRunningFlowIds() { List<Integer> allIds = new ArrayList<Integer>(); getRunningFlowsIdsHelper(allIds, queuedFlows.getAllEntries()); getRunningFlowsIdsHelper(allIds, runningFlows.values()); Collections.sort(allIds); return allIds.toString(); } /** * Get execution Ids of all non-dispatched flows * * {@inheritDoc} * * @see azkaban.executor.ExecutorManagerAdapter#getRunningFlows() */ public String getQueuedFlowIds() { List<Integer> allIds = new ArrayList<Integer>(); getRunningFlowsIdsHelper(allIds, queuedFlows.getAllEntries()); Collections.sort(allIds); return allIds.toString(); } public long getQueuedFlowSize() { return queuedFlows.size(); } /* Helper method to flow ids of all running flows */ private void getRunningFlowsIdsHelper(List<Integer> allIds, Collection<Pair<ExecutionReference, ExecutableFlow>> collection) { for (Pair<ExecutionReference, ExecutableFlow> ref : collection) { allIds.add(ref.getSecond().getExecutionId()); } } public List<ExecutableFlow> getRecentlyFinishedFlows() { return new ArrayList<ExecutableFlow>(recentlyFinished.values()); } @Override public List<ExecutableFlow> getExecutableFlows(Project project, String flowId, int skip, int size) throws ExecutorManagerException { List<ExecutableFlow> flows = executorLoader.fetchFlowHistory(project.getId(), flowId, skip, size); return flows; } @Override public List<ExecutableFlow> getExecutableFlows(int skip, int size) throws ExecutorManagerException { List<ExecutableFlow> flows = executorLoader.fetchFlowHistory(skip, size); return flows; } @Override public List<ExecutableFlow> getExecutableFlows(String flowIdContains, int skip, int size) throws ExecutorManagerException { List<ExecutableFlow> flows = executorLoader.fetchFlowHistory(null, '%' + flowIdContains + '%', null, 0, -1, -1, skip, size); return flows; } @Override public List<ExecutableFlow> getExecutableFlows(String projContain, String flowContain, String userContain, int status, long begin, long end, int skip, int size) throws ExecutorManagerException { List<ExecutableFlow> flows = executorLoader.fetchFlowHistory(projContain, flowContain, userContain, status, begin, end, skip, size); return flows; } @Override public List<ExecutableJobInfo> getExecutableJobs(Project project, String jobId, int skip, int size) throws ExecutorManagerException { List<ExecutableJobInfo> nodes = executorLoader.fetchJobHistory(project.getId(), jobId, skip, size); return nodes; } @Override public int getNumberOfJobExecutions(Project project, String jobId) throws ExecutorManagerException { return executorLoader.fetchNumExecutableNodes(project.getId(), jobId); } @Override public int getNumberOfExecutions(Project project, String flowId) throws ExecutorManagerException { return executorLoader.fetchNumExecutableFlows(project.getId(), flowId); } @Override public LogData getExecutableFlowLog(ExecutableFlow exFlow, int offset, int length) throws ExecutorManagerException { Pair<ExecutionReference, ExecutableFlow> pair = runningFlows.get(exFlow.getExecutionId()); if (pair != null) { Pair<String, String> typeParam = new Pair<String, String>("type", "flow"); Pair<String, String> offsetParam = new Pair<String, String>("offset", String.valueOf(offset)); Pair<String, String> lengthParam = new Pair<String, String>("length", String.valueOf(length)); @SuppressWarnings("unchecked") Map<String, Object> result = callExecutorServer(pair.getFirst(), ConnectorParams.LOG_ACTION, typeParam, offsetParam, lengthParam); return LogData.createLogDataFromObject(result); } else { LogData value = executorLoader.fetchLogs(exFlow.getExecutionId(), "", 0, offset, length); return value; } } @Override public LogData getExecutionJobLog(ExecutableFlow exFlow, String jobId, int offset, int length, int attempt) throws ExecutorManagerException { Pair<ExecutionReference, ExecutableFlow> pair = runningFlows.get(exFlow.getExecutionId()); if (pair != null) { Pair<String, String> typeParam = new Pair<String, String>("type", "job"); Pair<String, String> jobIdParam = new Pair<String, String>("jobId", jobId); Pair<String, String> offsetParam = new Pair<String, String>("offset", String.valueOf(offset)); Pair<String, String> lengthParam = new Pair<String, String>("length", String.valueOf(length)); Pair<String, String> attemptParam = new Pair<String, String>("attempt", String.valueOf(attempt)); @SuppressWarnings("unchecked") Map<String, Object> result = callExecutorServer(pair.getFirst(), ConnectorParams.LOG_ACTION, typeParam, jobIdParam, offsetParam, lengthParam, attemptParam); return LogData.createLogDataFromObject(result); } else { LogData value = executorLoader.fetchLogs(exFlow.getExecutionId(), jobId, attempt, offset, length); return value; } } @Override public List<Object> getExecutionJobStats(ExecutableFlow exFlow, String jobId, int attempt) throws ExecutorManagerException { Pair<ExecutionReference, ExecutableFlow> pair = runningFlows.get(exFlow.getExecutionId()); if (pair == null) { return executorLoader.fetchAttachments(exFlow.getExecutionId(), jobId, attempt); } Pair<String, String> jobIdParam = new Pair<String, String>("jobId", jobId); Pair<String, String> attemptParam = new Pair<String, String>("attempt", String.valueOf(attempt)); @SuppressWarnings("unchecked") Map<String, Object> result = callExecutorServer(pair.getFirst(), ConnectorParams.ATTACHMENTS_ACTION, jobIdParam, attemptParam); @SuppressWarnings("unchecked") List<Object> jobStats = (List<Object>) result.get("attachments"); return jobStats; } @Override public JobMetaData getExecutionJobMetaData(ExecutableFlow exFlow, String jobId, int offset, int length, int attempt) throws ExecutorManagerException { Pair<ExecutionReference, ExecutableFlow> pair = runningFlows.get(exFlow.getExecutionId()); if (pair != null) { Pair<String, String> typeParam = new Pair<String, String>("type", "job"); Pair<String, String> jobIdParam = new Pair<String, String>("jobId", jobId); Pair<String, String> offsetParam = new Pair<String, String>("offset", String.valueOf(offset)); Pair<String, String> lengthParam = new Pair<String, String>("length", String.valueOf(length)); Pair<String, String> attemptParam = new Pair<String, String>("attempt", String.valueOf(attempt)); @SuppressWarnings("unchecked") Map<String, Object> result = callExecutorServer(pair.getFirst(), ConnectorParams.METADATA_ACTION, typeParam, jobIdParam, offsetParam, lengthParam, attemptParam); return JobMetaData.createJobMetaDataFromObject(result); } else { return null; } } /** * if flows was dispatched to an executor, cancel by calling Executor else if * flow is still in queue, remove from queue and finalize {@inheritDoc} * * @see azkaban.executor.ExecutorManagerAdapter#cancelFlow(azkaban.executor.ExecutableFlow, * java.lang.String) */ @Override public void cancelFlow(ExecutableFlow exFlow, String userId) throws ExecutorManagerException { synchronized (exFlow) { if (runningFlows.containsKey(exFlow.getExecutionId())) { Pair<ExecutionReference, ExecutableFlow> pair = runningFlows.get(exFlow.getExecutionId()); callExecutorServer(pair.getFirst(), ConnectorParams.CANCEL_ACTION, userId); } else if (queuedFlows.hasExecution(exFlow.getExecutionId())) { queuedFlows.dequeue(exFlow.getExecutionId()); finalizeFlows(exFlow); } else { throw new ExecutorManagerException("Execution " + exFlow.getExecutionId() + " of flow " + exFlow.getFlowId() + " isn't running."); } } } @Override public void resumeFlow(ExecutableFlow exFlow, String userId) throws ExecutorManagerException { synchronized (exFlow) { Pair<ExecutionReference, ExecutableFlow> pair = runningFlows.get(exFlow.getExecutionId()); if (pair == null) { throw new ExecutorManagerException("Execution " + exFlow.getExecutionId() + " of flow " + exFlow.getFlowId() + " isn't running."); } callExecutorServer(pair.getFirst(), ConnectorParams.RESUME_ACTION, userId); } } @Override public void pauseFlow(ExecutableFlow exFlow, String userId) throws ExecutorManagerException { synchronized (exFlow) { Pair<ExecutionReference, ExecutableFlow> pair = runningFlows.get(exFlow.getExecutionId()); if (pair == null) { throw new ExecutorManagerException("Execution " + exFlow.getExecutionId() + " of flow " + exFlow.getFlowId() + " isn't running."); } callExecutorServer(pair.getFirst(), ConnectorParams.PAUSE_ACTION, userId); } } @Override public void pauseExecutingJobs(ExecutableFlow exFlow, String userId, String... jobIds) throws ExecutorManagerException { modifyExecutingJobs(exFlow, ConnectorParams.MODIFY_PAUSE_JOBS, userId, jobIds); } @Override public void resumeExecutingJobs(ExecutableFlow exFlow, String userId, String... jobIds) throws ExecutorManagerException { modifyExecutingJobs(exFlow, ConnectorParams.MODIFY_RESUME_JOBS, userId, jobIds); } @Override public void retryFailures(ExecutableFlow exFlow, String userId) throws ExecutorManagerException { modifyExecutingJobs(exFlow, ConnectorParams.MODIFY_RETRY_FAILURES, userId); } @Override public void retryExecutingJobs(ExecutableFlow exFlow, String userId, String... jobIds) throws ExecutorManagerException { modifyExecutingJobs(exFlow, ConnectorParams.MODIFY_RETRY_JOBS, userId, jobIds); } @Override public void disableExecutingJobs(ExecutableFlow exFlow, String userId, String... jobIds) throws ExecutorManagerException { modifyExecutingJobs(exFlow, ConnectorParams.MODIFY_DISABLE_JOBS, userId, jobIds); } @Override public void enableExecutingJobs(ExecutableFlow exFlow, String userId, String... jobIds) throws ExecutorManagerException { modifyExecutingJobs(exFlow, ConnectorParams.MODIFY_ENABLE_JOBS, userId, jobIds); } @Override public void cancelExecutingJobs(ExecutableFlow exFlow, String userId, String... jobIds) throws ExecutorManagerException { modifyExecutingJobs(exFlow, ConnectorParams.MODIFY_CANCEL_JOBS, userId, jobIds); } @SuppressWarnings("unchecked") private Map<String, Object> modifyExecutingJobs(ExecutableFlow exFlow, String command, String userId, String... jobIds) throws ExecutorManagerException { synchronized (exFlow) { Pair<ExecutionReference, ExecutableFlow> pair = runningFlows.get(exFlow.getExecutionId()); if (pair == null) { throw new ExecutorManagerException("Execution " + exFlow.getExecutionId() + " of flow " + exFlow.getFlowId() + " isn't running."); } Map<String, Object> response = null; if (jobIds != null && jobIds.length > 0) { for (String jobId : jobIds) { if (!jobId.isEmpty()) { ExecutableNode node = exFlow.getExecutableNode(jobId); if (node == null) { throw new ExecutorManagerException("Job " + jobId + " doesn't exist in execution " + exFlow.getExecutionId() + "."); } } } String ids = StringUtils.join(jobIds, ','); response = callExecutorServer(pair.getFirst(), ConnectorParams.MODIFY_EXECUTION_ACTION, userId, new Pair<String, String>( ConnectorParams.MODIFY_EXECUTION_ACTION_TYPE, command), new Pair<String, String>(ConnectorParams.MODIFY_JOBS_LIST, ids)); } else { response = callExecutorServer(pair.getFirst(), ConnectorParams.MODIFY_EXECUTION_ACTION, userId, new Pair<String, String>( ConnectorParams.MODIFY_EXECUTION_ACTION_TYPE, command)); } return response; } } @Override public String submitExecutableFlow(ExecutableFlow exflow, String userId) throws ExecutorManagerException { synchronized (exflow) { String flowId = exflow.getFlowId(); logger.info("Submitting execution flow " + flowId + " by " + userId); String message = ""; if (queuedFlows.isFull()) { message = String .format( "Failed to submit %s for project %s. Azkaban has overrun its webserver queue capacity", flowId, exflow.getProjectName()); logger.error(message); } else { int projectId = exflow.getProjectId(); exflow.setSubmitUser(userId); exflow.setSubmitTime(System.currentTimeMillis()); List<Integer> running = getRunningFlows(projectId, flowId); ExecutionOptions options = exflow.getExecutionOptions(); if (options == null) { options = new ExecutionOptions(); } if (options.getDisabledJobs() != null) { FlowUtils.applyDisabledJobs(options.getDisabledJobs(), exflow); } if (!running.isEmpty()) { if (options.getConcurrentOption().equals( ExecutionOptions.CONCURRENT_OPTION_PIPELINE)) { Collections.sort(running); Integer runningExecId = running.get(running.size() - 1); options.setPipelineExecutionId(runningExecId); message = "Flow " + flowId + " is already running with exec id " + runningExecId + ". Pipelining level " + options.getPipelineLevel() + ". \n"; } else if (options.getConcurrentOption().equals( ExecutionOptions.CONCURRENT_OPTION_SKIP)) { throw new ExecutorManagerException("Flow " + flowId + " is already running. Skipping execution.", ExecutorManagerException.Reason.SkippedExecution); } else { // The settings is to run anyways. message = "Flow " + flowId + " is already running with exec id " + StringUtils.join(running, ",") + ". Will execute concurrently. \n"; } } boolean memoryCheck = !ProjectWhitelist.isProjectWhitelisted(exflow.getProjectId(), ProjectWhitelist.WhitelistType.MemoryCheck); options.setMemoryCheck(memoryCheck); // The exflow id is set by the loader. So it's unavailable until after // this call. executorLoader.uploadExecutableFlow(exflow); // We create an active flow reference in the datastore. If the upload // fails, we remove the reference. ExecutionReference reference = new ExecutionReference(exflow.getExecutionId()); if (isMultiExecutorMode()) { //Take MultiExecutor route executorLoader.addActiveExecutableReference(reference); queuedFlows.enqueue(exflow, reference); } else { // assign only local executor we have Executor choosenExecutor = activeExecutors.iterator().next(); executorLoader.addActiveExecutableReference(reference); try { dispatch(reference, exflow, choosenExecutor); } catch (ExecutorManagerException e) { executorLoader.removeActiveExecutableReference(reference .getExecId()); throw e; } } message += "Execution submitted successfully with exec id " + exflow.getExecutionId(); } return message; } } private void cleanOldExecutionLogs(long millis) { long beforeDeleteLogsTimestamp = System.currentTimeMillis(); try { int count = executorLoader.removeExecutionLogsByTime(millis); logger.info("Cleaned up " + count + " log entries."); } catch (ExecutorManagerException e) { logger.error("log clean up failed. ", e); } logger.info("log clean up time: " + (System.currentTimeMillis() - beforeDeleteLogsTimestamp)/1000 + " seconds."); } private Map<String, Object> callExecutorServer(ExecutableFlow exflow, Executor executor, String action) throws ExecutorManagerException { try { return callExecutorServer(executor.getHost(), executor.getPort(), action, exflow.getExecutionId(), null, (Pair<String, String>[]) null); } catch (IOException e) { throw new ExecutorManagerException(e); } } private Map<String, Object> callExecutorServer(ExecutionReference ref, String action, String user) throws ExecutorManagerException { try { return callExecutorServer(ref.getHost(), ref.getPort(), action, ref.getExecId(), user, (Pair<String, String>[]) null); } catch (IOException e) { throw new ExecutorManagerException(e); } } private Map<String, Object> callExecutorServer(ExecutionReference ref, String action, Pair<String, String>... params) throws ExecutorManagerException { try { return callExecutorServer(ref.getHost(), ref.getPort(), action, ref.getExecId(), null, params); } catch (IOException e) { throw new ExecutorManagerException(e); } } private Map<String, Object> callExecutorServer(ExecutionReference ref, String action, String user, Pair<String, String>... params) throws ExecutorManagerException { try { return callExecutorServer(ref.getHost(), ref.getPort(), action, ref.getExecId(), user, params); } catch (IOException e) { throw new ExecutorManagerException(e); } } private Map<String, Object> callExecutorServer(String host, int port, String action, Integer executionId, String user, Pair<String, String>... params) throws IOException { List<Pair<String, String>> paramList = new ArrayList<Pair<String,String>>(); // if params = null if(params != null) { paramList.addAll(Arrays.asList(params)); } paramList .add(new Pair<String, String>(ConnectorParams.ACTION_PARAM, action)); paramList.add(new Pair<String, String>(ConnectorParams.EXECID_PARAM, String .valueOf(executionId))); paramList.add(new Pair<String, String>(ConnectorParams.USER_PARAM, user)); Map<String, Object> jsonResponse = callExecutorForJsonObject(host, port, "/executor", paramList); return jsonResponse; } /* * Helper method used by ExecutorManager to call executor and return json * object map */ private Map<String, Object> callExecutorForJsonObject(String host, int port, String path, List<Pair<String, String>> paramList) throws IOException { String responseString = callExecutorForJsonString(host, port, path, paramList); @SuppressWarnings("unchecked") Map<String, Object> jsonResponse = (Map<String, Object>) JSONUtils.parseJSONFromString(responseString); String error = (String) jsonResponse.get(ConnectorParams.RESPONSE_ERROR); if (error != null) { throw new IOException(error); } return jsonResponse; } /* * Helper method used by ExecutorManager to call executor and return raw json * string */ private String callExecutorForJsonString(String host, int port, String path, List<Pair<String, String>> paramList) throws IOException { if (paramList == null) { paramList = new ArrayList<Pair<String, String>>(); } ExecutorApiClient apiclient = ExecutorApiClient.getInstance(); @SuppressWarnings("unchecked") URI uri = ExecutorApiClient.buildUri(host, port, path, true, paramList.toArray(new Pair[0])); return apiclient.httpGet(uri, null); } /** * Manage servlet call for stats servlet in Azkaban execution server * {@inheritDoc} * * @throws ExecutorManagerException * * @see azkaban.executor.ExecutorManagerAdapter#callExecutorStats(java.lang.String, * azkaban.utils.Pair[]) */ @Override public Map<String, Object> callExecutorStats(int executorId, String action, Pair<String, String>... params) throws IOException, ExecutorManagerException { Executor executor = fetchExecutor(executorId); List<Pair<String, String>> paramList = new ArrayList<Pair<String, String>>(); // if params = null if (params != null) { paramList.addAll(Arrays.asList(params)); } paramList .add(new Pair<String, String>(ConnectorParams.ACTION_PARAM, action)); return callExecutorForJsonObject(executor.getHost(), executor.getPort(), "/stats", paramList); } @Override public Map<String, Object> callExecutorJMX(String hostPort, String action, String mBean) throws IOException { List<Pair<String, String>> paramList = new ArrayList<Pair<String, String>>(); paramList.add(new Pair<String, String>(action, "")); if(mBean != null) { paramList.add(new Pair<String, String>(ConnectorParams.JMX_MBEAN, mBean)); } String[] hostPortSplit = hostPort.split(":"); return callExecutorForJsonObject(hostPortSplit[0], Integer.valueOf(hostPortSplit[1]), "/jmx", paramList); } @Override public void shutdown() { if (isMultiExecutorMode()) { queueProcessor.shutdown(); } executingManager.shutdown(); } private class ExecutingManagerUpdaterThread extends Thread { private boolean shutdown = false; public ExecutingManagerUpdaterThread() { this.setName("ExecutorManagerUpdaterThread"); } // 10 mins recently finished threshold. private long recentlyFinishedLifetimeMs = 600000; private int waitTimeIdleMs = 2000; private int waitTimeMs = 500; // When we have an http error, for that flow, we'll check every 10 secs, 6 // times (1 mins) before we evict. private int numErrors = 6; private long errorThreshold = 10000; private void shutdown() { shutdown = true; } @SuppressWarnings("unchecked") public void run() { while (!shutdown) { try { lastThreadCheckTime = System.currentTimeMillis(); updaterStage = "Starting update all flows."; Map<Executor, List<ExecutableFlow>> exFlowMap = getFlowToExecutorMap(); ArrayList<ExecutableFlow> finishedFlows = new ArrayList<ExecutableFlow>(); ArrayList<ExecutableFlow> finalizeFlows = new ArrayList<ExecutableFlow>(); if (exFlowMap.size() > 0) { for (Map.Entry<Executor, List<ExecutableFlow>> entry : exFlowMap .entrySet()) { List<Long> updateTimesList = new ArrayList<Long>(); List<Integer> executionIdsList = new ArrayList<Integer>(); Executor executor = entry.getKey(); updaterStage = "Starting update flows on " + executor.getHost() + ":" + executor.getPort(); // We pack the parameters of the same host together before we // query. fillUpdateTimeAndExecId(entry.getValue(), executionIdsList, updateTimesList); Pair<String, String> updateTimes = new Pair<String, String>( ConnectorParams.UPDATE_TIME_LIST_PARAM, JSONUtils.toJSON(updateTimesList)); Pair<String, String> executionIds = new Pair<String, String>(ConnectorParams.EXEC_ID_LIST_PARAM, JSONUtils.toJSON(executionIdsList)); Map<String, Object> results = null; try { results = callExecutorServer(executor.getHost(), executor.getPort(), ConnectorParams.UPDATE_ACTION, null, null, executionIds, updateTimes); } catch (IOException e) { logger.error(e); for (ExecutableFlow flow : entry.getValue()) { Pair<ExecutionReference, ExecutableFlow> pair = runningFlows.get(flow.getExecutionId()); updaterStage = "Failed to get update. Doing some clean up for flow " + pair.getSecond().getExecutionId(); if (pair != null) { ExecutionReference ref = pair.getFirst(); int numErrors = ref.getNumErrors(); if (ref.getNumErrors() < this.numErrors) { ref.setNextCheckTime(System.currentTimeMillis() + errorThreshold); ref.setNumErrors(++numErrors); } else { logger.error("Evicting flow " + flow.getExecutionId() + ". The executor is unresponsive."); // TODO should send out an unresponsive email here. finalizeFlows.add(pair.getSecond()); } } } } // We gets results if (results != null) { List<Map<String, Object>> executionUpdates = (List<Map<String, Object>>) results .get(ConnectorParams.RESPONSE_UPDATED_FLOWS); for (Map<String, Object> updateMap : executionUpdates) { try { ExecutableFlow flow = updateExecution(updateMap); updaterStage = "Updated flow " + flow.getExecutionId(); if (isFinished(flow)) { finishedFlows.add(flow); finalizeFlows.add(flow); } } catch (ExecutorManagerException e) { ExecutableFlow flow = e.getExecutableFlow(); logger.error(e); if (flow != null) { logger.error("Finalizing flow " + flow.getExecutionId()); finalizeFlows.add(flow); } } } } } updaterStage = "Evicting old recently finished flows."; evictOldRecentlyFinished(recentlyFinishedLifetimeMs); // Add new finished for (ExecutableFlow flow : finishedFlows) { if (flow.getScheduleId() >= 0 && flow.getStatus() == Status.SUCCEEDED) { ScheduleStatisticManager.invalidateCache(flow.getScheduleId(), cacheDir); } fireEventListeners(Event.create(flow, Type.FLOW_FINISHED, new EventData(flow.getStatus()))); recentlyFinished.put(flow.getExecutionId(), flow); } updaterStage = "Finalizing " + finalizeFlows.size() + " error flows."; // Kill error flows for (ExecutableFlow flow : finalizeFlows) { finalizeFlows(flow); } } updaterStage = "Updated all active flows. Waiting for next round."; synchronized (this) { try { if (runningFlows.size() > 0) { this.wait(waitTimeMs); } else { this.wait(waitTimeIdleMs); } } catch (InterruptedException e) { } } } catch (Exception e) { logger.error(e); } } } } private void finalizeFlows(ExecutableFlow flow) { int execId = flow.getExecutionId(); boolean alertUser = true; updaterStage = "finalizing flow " + execId; // First we check if the execution in the datastore is complete try { ExecutableFlow dsFlow; if (isFinished(flow)) { dsFlow = flow; } else { updaterStage = "finalizing flow " + execId + " loading from db"; dsFlow = executorLoader.fetchExecutableFlow(execId); // If it's marked finished, we're good. If not, we fail everything and // then mark it finished. if (!isFinished(dsFlow)) { updaterStage = "finalizing flow " + execId + " failing the flow"; failEverything(dsFlow); executorLoader.updateExecutableFlow(dsFlow); } } updaterStage = "finalizing flow " + execId + " deleting active reference"; // Delete the executing reference. if (flow.getEndTime() == -1) { flow.setEndTime(System.currentTimeMillis()); executorLoader.updateExecutableFlow(dsFlow); } executorLoader.removeActiveExecutableReference(execId); updaterStage = "finalizing flow " + execId + " cleaning from memory"; runningFlows.remove(execId); fireEventListeners(Event.create(dsFlow, Type.FLOW_FINISHED, new EventData(dsFlow.getStatus()))); recentlyFinished.put(execId, dsFlow); } catch (ExecutorManagerException e) { alertUser = false; // failed due to azkaban internal error, not to alert user logger.error(e); } // TODO append to the flow log that we forced killed this flow because the // target no longer had // the reference. updaterStage = "finalizing flow " + execId + " alerting and emailing"; if(alertUser) { ExecutionOptions options = flow.getExecutionOptions(); // But we can definitely email them. Alerter mailAlerter = alerters.get("email"); if (flow.getStatus() == Status.FAILED || flow.getStatus() == Status.KILLED) { if (options.getFailureEmails() != null && !options.getFailureEmails().isEmpty()) { try { mailAlerter.alertOnError(flow); } catch (Exception e) { logger.error(e); } } if (options.getFlowParameters().containsKey("alert.type")) { String alertType = options.getFlowParameters().get("alert.type"); Alerter alerter = alerters.get(alertType); if (alerter != null) { try { alerter.alertOnError(flow); } catch (Exception e) { // TODO Auto-generated catch block e.printStackTrace(); logger.error("Failed to alert by " + alertType); } } else { logger.error("Alerter type " + alertType + " doesn't exist. Failed to alert."); } } } else { if (options.getSuccessEmails() != null && !options.getSuccessEmails().isEmpty()) { try { mailAlerter.alertOnSuccess(flow); } catch (Exception e) { logger.error(e); } } if (options.getFlowParameters().containsKey("alert.type")) { String alertType = options.getFlowParameters().get("alert.type"); Alerter alerter = alerters.get(alertType); if (alerter != null) { try { alerter.alertOnSuccess(flow); } catch (Exception e) { // TODO Auto-generated catch block e.printStackTrace(); logger.error("Failed to alert by " + alertType); } } else { logger.error("Alerter type " + alertType + " doesn't exist. Failed to alert."); } } } } } private void failEverything(ExecutableFlow exFlow) { long time = System.currentTimeMillis(); for (ExecutableNode node : exFlow.getExecutableNodes()) { switch (node.getStatus()) { case SUCCEEDED: case FAILED: case KILLED: case SKIPPED: case DISABLED: continue; // case UNKNOWN: case READY: node.setStatus(Status.KILLED); break; default: node.setStatus(Status.FAILED); break; } if (node.getStartTime() == -1) { node.setStartTime(time); } if (node.getEndTime() == -1) { node.setEndTime(time); } } if (exFlow.getEndTime() == -1) { exFlow.setEndTime(time); } exFlow.setStatus(Status.FAILED); } private void evictOldRecentlyFinished(long ageMs) { ArrayList<Integer> recentlyFinishedKeys = new ArrayList<Integer>(recentlyFinished.keySet()); long oldAgeThreshold = System.currentTimeMillis() - ageMs; for (Integer key : recentlyFinishedKeys) { ExecutableFlow flow = recentlyFinished.get(key); if (flow.getEndTime() < oldAgeThreshold) { // Evict recentlyFinished.remove(key); } } } private ExecutableFlow updateExecution(Map<String, Object> updateData) throws ExecutorManagerException { Integer execId = (Integer) updateData.get(ConnectorParams.UPDATE_MAP_EXEC_ID); if (execId == null) { throw new ExecutorManagerException( "Response is malformed. Need exec id to update."); } Pair<ExecutionReference, ExecutableFlow> refPair = this.runningFlows.get(execId); if (refPair == null) { throw new ExecutorManagerException( "No running flow found with the execution id. Removing " + execId); } ExecutionReference ref = refPair.getFirst(); ExecutableFlow flow = refPair.getSecond(); if (updateData.containsKey("error")) { // The flow should be finished here. throw new ExecutorManagerException((String) updateData.get("error"), flow); } // Reset errors. ref.setNextCheckTime(0); ref.setNumErrors(0); Status oldStatus = flow.getStatus(); flow.applyUpdateObject(updateData); Status newStatus = flow.getStatus(); if(oldStatus != newStatus && newStatus == Status.FAILED) { CommonMetrics.INSTANCE.markFlowFail(); } ExecutionOptions options = flow.getExecutionOptions(); if (oldStatus != newStatus && newStatus.equals(Status.FAILED_FINISHING)) { // We want to see if we should give an email status on first failure. if (options.getNotifyOnFirstFailure()) { Alerter mailAlerter = alerters.get("email"); try { mailAlerter.alertOnFirstError(flow); } catch (Exception e) { e.printStackTrace(); logger.error("Failed to send first error email." + e.getMessage()); } } if (options.getFlowParameters().containsKey("alert.type")) { String alertType = options.getFlowParameters().get("alert.type"); Alerter alerter = alerters.get(alertType); if (alerter != null) { try { alerter.alertOnFirstError(flow); } catch (Exception e) { // TODO Auto-generated catch block e.printStackTrace(); logger.error("Failed to alert by " + alertType); } } else { logger.error("Alerter type " + alertType + " doesn't exist. Failed to alert."); } } } return flow; } public boolean isFinished(ExecutableFlow flow) { switch (flow.getStatus()) { case SUCCEEDED: case FAILED: case KILLED: return true; default: return false; } } private void fillUpdateTimeAndExecId(List<ExecutableFlow> flows, List<Integer> executionIds, List<Long> updateTimes) { for (ExecutableFlow flow : flows) { executionIds.add(flow.getExecutionId()); updateTimes.add(flow.getUpdateTime()); } } /* Group Executable flow by Executors to reduce number of REST calls */ private Map<Executor, List<ExecutableFlow>> getFlowToExecutorMap() { HashMap<Executor, List<ExecutableFlow>> exFlowMap = new HashMap<Executor, List<ExecutableFlow>>(); for (Pair<ExecutionReference, ExecutableFlow> runningFlow : runningFlows .values()) { ExecutionReference ref = runningFlow.getFirst(); ExecutableFlow flow = runningFlow.getSecond(); Executor executor = ref.getExecutor(); // We can set the next check time to prevent the checking of certain // flows. if (ref.getNextCheckTime() >= System.currentTimeMillis()) { continue; } List<ExecutableFlow> flows = exFlowMap.get(executor); if (flows == null) { flows = new ArrayList<ExecutableFlow>(); exFlowMap.put(executor, flows); } flows.add(flow); } return exFlowMap; } @Override public int getExecutableFlows(int projectId, String flowId, int from, int length, List<ExecutableFlow> outputList) throws ExecutorManagerException { List<ExecutableFlow> flows = executorLoader.fetchFlowHistory(projectId, flowId, from, length); outputList.addAll(flows); return executorLoader.fetchNumExecutableFlows(projectId, flowId); } @Override public List<ExecutableFlow> getExecutableFlows(int projectId, String flowId, int from, int length, Status status) throws ExecutorManagerException { return executorLoader.fetchFlowHistory(projectId, flowId, from, length, status); } /* * cleaner thread to clean up execution_logs, etc in DB. Runs every day. */ private class CleanerThread extends Thread { // log file retention is 1 month. // check every day private static final long CLEANER_THREAD_WAIT_INTERVAL_MS = 24 * 60 * 60 * 1000; private final long executionLogsRetentionMs; private boolean shutdown = false; private long lastLogCleanTime = -1; public CleanerThread(long executionLogsRetentionMs) { this.executionLogsRetentionMs = executionLogsRetentionMs; this.setName("AzkabanWebServer-Cleaner-Thread"); } @SuppressWarnings("unused") public void shutdown() { shutdown = true; this.interrupt(); } public void run() { while (!shutdown) { synchronized (this) { try { lastCleanerThreadCheckTime = System.currentTimeMillis(); // Cleanup old stuff. long currentTime = System.currentTimeMillis(); if (currentTime - CLEANER_THREAD_WAIT_INTERVAL_MS > lastLogCleanTime) { cleanExecutionLogs(); lastLogCleanTime = currentTime; } wait(CLEANER_THREAD_WAIT_INTERVAL_MS); } catch (InterruptedException e) { logger.info("Interrupted. Probably to shut down."); } } } } private void cleanExecutionLogs() { logger.info("Cleaning old logs from execution_logs"); long cutoff = DateTime.now().getMillis() - executionLogsRetentionMs; logger.info("Cleaning old log files before " + new DateTime(cutoff).toString()); cleanOldExecutionLogs(DateTime.now().getMillis() - executionLogsRetentionMs); } } /** * Calls executor to dispatch the flow, update db to assign the executor and * in-memory state of executableFlow */ private void dispatch(ExecutionReference reference, ExecutableFlow exflow, Executor choosenExecutor) throws ExecutorManagerException { exflow.setUpdateTime(System.currentTimeMillis()); executorLoader.assignExecutor(choosenExecutor.getId(), exflow.getExecutionId()); try { callExecutorServer(exflow, choosenExecutor, ConnectorParams.EXECUTE_ACTION); } catch (ExecutorManagerException ex) { logger.error("Rolling back executor assignment for execution id:" + exflow.getExecutionId(), ex); executorLoader.unassignExecutor(exflow.getExecutionId()); throw new ExecutorManagerException(ex); } reference.setExecutor(choosenExecutor); // move from flow to running flows runningFlows.put(exflow.getExecutionId(), new Pair<ExecutionReference, ExecutableFlow>(reference, exflow)); logger.info(String.format( "Successfully dispatched exec %d with error count %d", exflow.getExecutionId(), reference.getNumErrors())); } /* * This thread is responsible for processing queued flows using dispatcher and * making rest api calls to executor server */ private class QueueProcessorThread extends Thread { private static final long QUEUE_PROCESSOR_WAIT_IN_MS = 1000; private final int maxDispatchingErrors; private final long activeExecutorRefreshWindowInMilisec; private final int activeExecutorRefreshWindowInFlows; private volatile boolean shutdown = false; private volatile boolean isActive = true; public QueueProcessorThread(boolean isActive, long activeExecutorRefreshWindowInTime, int activeExecutorRefreshWindowInFlows, int maxDispatchingErrors) { setActive(isActive); this.maxDispatchingErrors = maxDispatchingErrors; this.activeExecutorRefreshWindowInFlows = activeExecutorRefreshWindowInFlows; this.activeExecutorRefreshWindowInMilisec = activeExecutorRefreshWindowInTime; this.setName("AzkabanWebServer-QueueProcessor-Thread"); } public void setActive(boolean isActive) { this.isActive = isActive; logger.info("QueueProcessorThread active turned " + this.isActive); } public boolean isActive() { return isActive; } public void shutdown() { shutdown = true; this.interrupt(); } public void run() { // Loops till QueueProcessorThread is shutdown while (!shutdown) { synchronized (this) { try { // start processing queue if active, other wait for sometime if (isActive) { processQueuedFlows(activeExecutorRefreshWindowInMilisec, activeExecutorRefreshWindowInFlows); } wait(QUEUE_PROCESSOR_WAIT_IN_MS); } catch (Exception e) { logger.error( "QueueProcessorThread Interrupted. Probably to shut down.", e); } } } } /* Method responsible for processing the non-dispatched flows */ private void processQueuedFlows(long activeExecutorsRefreshWindow, int maxContinuousFlowProcessed) throws InterruptedException, ExecutorManagerException { long lastExecutorRefreshTime = 0; Pair<ExecutionReference, ExecutableFlow> runningCandidate; int currentContinuousFlowProcessed = 0; while (isActive() && (runningCandidate = queuedFlows.fetchHead()) != null) { ExecutionReference reference = runningCandidate.getFirst(); ExecutableFlow exflow = runningCandidate.getSecond(); long currentTime = System.currentTimeMillis(); // if we have dispatched more than maxContinuousFlowProcessed or // It has been more then activeExecutorsRefreshWindow millisec since we // refreshed if (currentTime - lastExecutorRefreshTime > activeExecutorsRefreshWindow || currentContinuousFlowProcessed >= maxContinuousFlowProcessed) { // Refresh executorInfo for all activeExecutors refreshExecutors(); lastExecutorRefreshTime = currentTime; currentContinuousFlowProcessed = 0; } /** * <pre> * TODO: Work around till we improve Filters to have a notion of GlobalSystemState. * Currently we try each queued flow once to infer a global busy state * Possible improvements:- * 1. Move system level filters in refreshExecutors and sleep if we have all executors busy after refresh * 2. Implement GlobalSystemState in selector or in a third place to manage system filters. Basically * taking out all the filters which do not depend on the flow but are still being part of Selector. * Assumptions:- * 1. no one else except QueueProcessor is updating ExecutableFlow update time * 2. re-attempting a flow (which has been tried before) is considered as all executors are busy * </pre> */ if(exflow.getUpdateTime() > lastExecutorRefreshTime) { // put back in the queue queuedFlows.enqueue(exflow, reference); long sleepInterval = activeExecutorsRefreshWindow - (currentTime - lastExecutorRefreshTime); // wait till next executor refresh sleep(sleepInterval); } else { exflow.setUpdateTime(currentTime); // process flow with current snapshot of activeExecutors selectExecutorAndDispatchFlow(reference, exflow, new HashSet<Executor>(activeExecutors)); } // do not count failed flow processsing (flows still in queue) if(queuedFlows.getFlow(exflow.getExecutionId()) == null) { currentContinuousFlowProcessed++; } } } /* process flow with a snapshot of available Executors */ private void selectExecutorAndDispatchFlow(ExecutionReference reference, ExecutableFlow exflow, Set<Executor> availableExecutors) throws ExecutorManagerException { synchronized (exflow) { Executor selectedExecutor = selectExecutor(exflow, availableExecutors); if (selectedExecutor != null) { try { dispatch(reference, exflow, selectedExecutor); } catch (ExecutorManagerException e) { logger.warn(String.format( "Executor %s responded with exception for exec: %d", selectedExecutor, exflow.getExecutionId()), e); handleDispatchExceptionCase(reference, exflow, selectedExecutor, availableExecutors); } } else { handleNoExecutorSelectedCase(reference, exflow); } } } /* Helper method to fetch overriding Executor, if a valid user has specifed otherwise return null */ private Executor getUserSpecifiedExecutor(ExecutionOptions options, int executionId) { Executor executor = null; if (options != null && options.getFlowParameters() != null && options.getFlowParameters().containsKey( ExecutionOptions.USE_EXECUTOR)) { try { int executorId = Integer.valueOf(options.getFlowParameters().get( ExecutionOptions.USE_EXECUTOR)); executor = fetchExecutor(executorId); if (executor == null) { logger .warn(String .format( "User specified executor id: %d for execution id: %d is not active, Looking up db.", executorId, executionId)); executor = executorLoader.fetchExecutor(executorId); if (executor == null) { logger .warn(String .format( "User specified executor id: %d for execution id: %d is missing from db. Defaulting to availableExecutors", executorId, executionId)); } } } catch (ExecutorManagerException ex) { logger.error("Failed to fetch user specified executor for exec_id = " + executionId, ex); } } return executor; } /* Choose Executor for exflow among the available executors */ private Executor selectExecutor(ExecutableFlow exflow, Set<Executor> availableExecutors) { Executor choosenExecutor = getUserSpecifiedExecutor(exflow.getExecutionOptions(), exflow.getExecutionId()); // If no executor was specified by admin if (choosenExecutor == null) { logger.info("Using dispatcher for execution id :" + exflow.getExecutionId()); ExecutorSelector selector = new ExecutorSelector(filterList, comparatorWeightsMap); choosenExecutor = selector.getBest(availableExecutors, exflow); } return choosenExecutor; } private void handleDispatchExceptionCase(ExecutionReference reference, ExecutableFlow exflow, Executor lastSelectedExecutor, Set<Executor> remainingExecutors) throws ExecutorManagerException { logger .info(String .format( "Reached handleDispatchExceptionCase stage for exec %d with error count %d", exflow.getExecutionId(), reference.getNumErrors())); reference.setNumErrors(reference.getNumErrors() + 1); if (reference.getNumErrors() > this.maxDispatchingErrors || remainingExecutors.size() <= 1) { logger.error("Failed to process queued flow"); finalizeFlows(exflow); } else { remainingExecutors.remove(lastSelectedExecutor); // try other executors except chosenExecutor selectExecutorAndDispatchFlow(reference, exflow, remainingExecutors); } } private void handleNoExecutorSelectedCase(ExecutionReference reference, ExecutableFlow exflow) throws ExecutorManagerException { logger .info(String .format( "Reached handleNoExecutorSelectedCase stage for exec %d with error count %d", exflow.getExecutionId(), reference.getNumErrors())); // TODO: handle scenario where a high priority flow failing to get // schedule can starve all others queuedFlows.enqueue(exflow, reference); } } }
1
12,605
This is smart but hacky! I would probably prefer an alternate solution that would be more obvious to read/understand.
azkaban-azkaban
java
@@ -115,7 +115,7 @@ type Client interface { S3StreamingGet(ctx context.Context, region string, bucket string, key string) (io.ReadCloser, error) DescribeTable(ctx context.Context, region string, tableName string) (*dynamodbv1.Table, error) - UpdateTableCapacity(ctx context.Context, region string, tableName string, targetIndexRcu int64, targetIndexWcu int64) error + UpdateTableCapacity(ctx context.Context, region string, tableName string, targetTableRcu int64, targetTableWcu int64) (*dynamodbv1.Status, error) Regions() []string }
1
package aws // <!-- START clutchdoc --> // description: Multi-region client for Amazon Web Services. // <!-- END clutchdoc --> import ( "context" "io" "net/http" "strings" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/config" "github.com/aws/aws-sdk-go-v2/service/autoscaling" astypes "github.com/aws/aws-sdk-go-v2/service/autoscaling/types" "github.com/aws/aws-sdk-go-v2/service/dynamodb" "github.com/aws/aws-sdk-go-v2/service/ec2" ec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/aws/aws-sdk-go-v2/service/kinesis" "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/iancoleman/strcase" "github.com/uber-go/tally" "go.uber.org/zap" "golang.org/x/sync/semaphore" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "google.golang.org/protobuf/types/known/anypb" dynamodbv1 "github.com/lyft/clutch/backend/api/aws/dynamodb/v1" ec2v1 "github.com/lyft/clutch/backend/api/aws/ec2/v1" kinesisv1 "github.com/lyft/clutch/backend/api/aws/kinesis/v1" awsv1 "github.com/lyft/clutch/backend/api/config/service/aws/v1" topologyv1 "github.com/lyft/clutch/backend/api/topology/v1" "github.com/lyft/clutch/backend/service" ) const ( Name = "clutch.service.aws" ) func New(cfg *anypb.Any, logger *zap.Logger, scope tally.Scope) (service.Service, error) { ac := &awsv1.Config{} err := cfg.UnmarshalTo(ac) if err != nil { return nil, err } c := &client{ clients: make(map[string]*regionalClient, len(ac.Regions)), topologyObjectChan: make(chan *topologyv1.UpdateCacheRequest, topologyObjectChanBufferSize), topologyLock: semaphore.NewWeighted(1), log: logger, scope: scope, } clientRetries := 0 if ac.ClientConfig != nil && ac.ClientConfig.Retries >= 0 { clientRetries = int(ac.ClientConfig.Retries) } ds := getScalingLimits(ac) awsHTTPClient := &http.Client{} for _, region := range ac.Regions { regionCfg, err := config.LoadDefaultConfig(context.TODO(), config.WithHTTPClient(awsHTTPClient), config.WithRegion(region), config.WithRetryer(func() aws.Retryer { customRetryer := retry.NewStandard(func(so *retry.StandardOptions) { so.MaxAttempts = clientRetries }) return customRetryer }), ) if err != nil { return nil, err } c.clients[region] = &regionalClient{ region: region, dynamodbCfg: &awsv1.DynamodbConfig{ ScalingLimits: &awsv1.ScalingLimits{ MaxReadCapacityUnits: ds.MaxReadCapacityUnits, MaxWriteCapacityUnits: ds.MaxWriteCapacityUnits, MaxScaleFactor: ds.MaxScaleFactor, EnableOverride: ds.EnableOverride, }, }, s3: s3.NewFromConfig(regionCfg), kinesis: kinesis.NewFromConfig(regionCfg), ec2: ec2.NewFromConfig(regionCfg), autoscaling: autoscaling.NewFromConfig(regionCfg), dynamodb: dynamodb.NewFromConfig(regionCfg), } } return c, nil } type Client interface { DescribeInstances(ctx context.Context, region string, ids []string) ([]*ec2v1.Instance, error) TerminateInstances(ctx context.Context, region string, ids []string) error RebootInstances(ctx context.Context, region string, ids []string) error DescribeAutoscalingGroups(ctx context.Context, region string, names []string) ([]*ec2v1.AutoscalingGroup, error) ResizeAutoscalingGroup(ctx context.Context, region string, name string, size *ec2v1.AutoscalingGroupSize) error DescribeKinesisStream(ctx context.Context, region string, streamName string) (*kinesisv1.Stream, error) UpdateKinesisShardCount(ctx context.Context, region string, streamName string, targetShardCount int32) error S3StreamingGet(ctx context.Context, region string, bucket string, key string) (io.ReadCloser, error) DescribeTable(ctx context.Context, region string, tableName string) (*dynamodbv1.Table, error) UpdateTableCapacity(ctx context.Context, region string, tableName string, targetIndexRcu int64, targetIndexWcu int64) error Regions() []string } type client struct { clients map[string]*regionalClient topologyObjectChan chan *topologyv1.UpdateCacheRequest topologyLock *semaphore.Weighted log *zap.Logger scope tally.Scope } type regionalClient struct { region string dynamodbCfg *awsv1.DynamodbConfig s3 s3Client kinesis kinesisClient ec2 ec2Client autoscaling autoscalingClient dynamodb dynamodbClient } // Implement the interface provided by errorintercept, so errors are caught at middleware and converted to gRPC status. func (c *client) InterceptError(e error) error { return ConvertError(e) } func (c *client) getRegionalClient(region string) (*regionalClient, error) { rc, ok := c.clients[region] if !ok { return nil, status.Errorf(codes.NotFound, "no client found for region '%s'", region) } return rc, nil } func (c *client) ResizeAutoscalingGroup(ctx context.Context, region string, name string, size *ec2v1.AutoscalingGroupSize) error { rc, err := c.getRegionalClient(region) if err != nil { return err } input := &autoscaling.UpdateAutoScalingGroupInput{ AutoScalingGroupName: aws.String(name), DesiredCapacity: aws.Int32(int32(size.Desired)), MaxSize: aws.Int32(int32(size.Max)), MinSize: aws.Int32(int32(size.Min)), } _, err = rc.autoscaling.UpdateAutoScalingGroup(ctx, input) return err } func (c *client) DescribeAutoscalingGroups(ctx context.Context, region string, names []string) ([]*ec2v1.AutoscalingGroup, error) { cl, err := c.getRegionalClient(region) if err != nil { return nil, err } input := &autoscaling.DescribeAutoScalingGroupsInput{ AutoScalingGroupNames: names, } result, err := cl.autoscaling.DescribeAutoScalingGroups(ctx, input) if err != nil { return nil, err } ret := make([]*ec2v1.AutoscalingGroup, len(result.AutoScalingGroups)) for idx, group := range result.AutoScalingGroups { ret[idx] = newProtoForAutoscalingGroup(group) } return ret, nil } // Shave off the trailing zone identifier to get the region func zoneToRegion(zone string) string { if zone == "" { return "UNKNOWN" } return zone[:len(zone)-1] } func protoForTerminationPolicy(policy string) ec2v1.AutoscalingGroup_TerminationPolicy { policy = strcase.ToScreamingSnake(policy) val, ok := ec2v1.AutoscalingGroup_TerminationPolicy_value[policy] if !ok { return ec2v1.AutoscalingGroup_UNKNOWN } return ec2v1.AutoscalingGroup_TerminationPolicy(val) } func protoForAutoscalingGroupInstanceLifecycleState(state string) ec2v1.AutoscalingGroup_Instance_LifecycleState { state = strcase.ToScreamingSnake(strings.ReplaceAll(state, ":", "")) val, ok := ec2v1.AutoscalingGroup_Instance_LifecycleState_value[state] if !ok { return ec2v1.AutoscalingGroup_Instance_UNKNOWN } return ec2v1.AutoscalingGroup_Instance_LifecycleState(val) } func newProtoForAutoscalingGroupInstance(instance astypes.Instance) *ec2v1.AutoscalingGroup_Instance { return &ec2v1.AutoscalingGroup_Instance{ Id: aws.ToString(instance.InstanceId), Zone: aws.ToString(instance.AvailabilityZone), LaunchConfigurationName: aws.ToString(instance.LaunchConfigurationName), Healthy: aws.ToString(instance.HealthStatus) == "HEALTHY", LifecycleState: protoForAutoscalingGroupInstanceLifecycleState(string(instance.LifecycleState)), } } func newProtoForAutoscalingGroup(group astypes.AutoScalingGroup) *ec2v1.AutoscalingGroup { pb := &ec2v1.AutoscalingGroup{ Name: aws.ToString(group.AutoScalingGroupName), Zones: group.AvailabilityZones, Size: &ec2v1.AutoscalingGroupSize{ Min: uint32(aws.ToInt32(group.MinSize)), Max: uint32(aws.ToInt32(group.MaxSize)), Desired: uint32(aws.ToInt32(group.DesiredCapacity)), }, } if len(pb.Zones) > 0 { pb.Region = zoneToRegion(pb.Zones[0]) } pb.TerminationPolicies = make([]ec2v1.AutoscalingGroup_TerminationPolicy, len(group.TerminationPolicies)) for idx, p := range group.TerminationPolicies { pb.TerminationPolicies[idx] = protoForTerminationPolicy(p) } pb.Instances = make([]*ec2v1.AutoscalingGroup_Instance, len(group.Instances)) for idx, i := range group.Instances { pb.Instances[idx] = newProtoForAutoscalingGroupInstance(i) } return pb } func (c *client) Regions() []string { regions := make([]string, 0, len(c.clients)) for region := range c.clients { regions = append(regions, region) } return regions } func (c *client) DescribeInstances(ctx context.Context, region string, ids []string) ([]*ec2v1.Instance, error) { cl, err := c.getRegionalClient(region) if err != nil { return nil, err } input := &ec2.DescribeInstancesInput{InstanceIds: ids} result, err := cl.ec2.DescribeInstances(ctx, input) if err != nil { return nil, err } var ret []*ec2v1.Instance for _, r := range result.Reservations { for _, i := range r.Instances { ret = append(ret, newProtoForInstance(i)) } } return ret, nil } func (c *client) TerminateInstances(ctx context.Context, region string, ids []string) error { cl, err := c.getRegionalClient(region) if err != nil { return err } input := &ec2.TerminateInstancesInput{InstanceIds: ids} _, err = cl.ec2.TerminateInstances(ctx, input) return err } func (c *client) RebootInstances(ctx context.Context, region string, ids []string) error { cl, err := c.getRegionalClient(region) if err != nil { return err } input := &ec2.RebootInstancesInput{InstanceIds: ids} _, err = cl.ec2.RebootInstances(ctx, input) return err } func protoForInstanceState(state string) ec2v1.Instance_State { // Transform kebab case 'shutting-down' to upper snake case 'SHUTTING_DOWN'. state = strings.ReplaceAll(strings.ToUpper(state), "-", "_") // Look up value in generated enum map. val, ok := ec2v1.Instance_State_value[state] if !ok { return ec2v1.Instance_UNKNOWN } return ec2v1.Instance_State(val) } func newProtoForInstance(i ec2types.Instance) *ec2v1.Instance { ret := &ec2v1.Instance{ InstanceId: aws.ToString(i.InstanceId), State: protoForInstanceState(string(i.State.Name)), InstanceType: string(i.InstanceType), PublicIpAddress: aws.ToString(i.PublicIpAddress), PrivateIpAddress: aws.ToString(i.PrivateIpAddress), AvailabilityZone: aws.ToString(i.Placement.AvailabilityZone), } ret.Region = zoneToRegion(ret.AvailabilityZone) // Transform tag list to map. ret.Tags = make(map[string]string, len(i.Tags)) for _, tag := range i.Tags { ret.Tags[aws.ToString(tag.Key)] = aws.ToString(tag.Value) } return ret }
1
11,691
i would not return a pointer for status here which is an int
lyft-clutch
go
@@ -67,6 +67,10 @@ exclude = ["AuthalicMatrixCoefficients", "vnl_file_matrix", "vnl_file_vector", "vnl_fortran_copy", + "CosineWindowFunction", + "HammingWindowFunction", + "LanczosWindowFunction", + "WelchWindowFunction", ] total = 0
1
#========================================================================== # # Copyright Insight Software Consortium # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0.txt # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # #==========================================================================*/ from __future__ import print_function import itk import sys from itkTemplate import itkTemplate itk.auto_progress(2) itk.force_load() def isEmpty(o): for i in dir(o): if i[0].isupper(): return False return True exclude = ["AuthalicMatrixCoefficients", "MatrixCoefficients", "OnesMatrixCoefficients", "IntrinsicMatrixCoefficients", "HarmonicMatrixCoefficients", "ConformalMatrixCoefficients", "InverseEuclideanDistanceMatrixCoefficients", "BandNode", "NormalBandNode", "CellTraitsInfo", "DefaultDynamicMeshTraits", "DefaultStaticMeshTraits", "ParallelSparseFieldLevelSetNode", "SparseFieldLevelSetNode", "QuadEdgeMeshCellTraitsInfo", "QuadEdgeMeshTraits", "complex", "list", "map", "numeric_limits", "set", "vector", "vnl_c_vector", "vnl_diag_matrix", "vnl_matrix", "vnl_matrix_fixed", "vnl_matrix_fixed_ref", "vnl_matrix_fixed_ref_const", "vnl_matrix_ref", "vnl_vector", "vnl_vector_ref", "vnl_file_matrix", "vnl_file_vector", "vnl_fortran_copy", ] total = 0 empty = 0 for t in dir(itk): if t not in exclude: T = itk.__dict__[t] if isinstance(T, itkTemplate): for I in T.values(): total += 1 if isEmpty(I): empty += 1 print("%s: empty class" % I) print("%s classes checked." % total) if empty: print("%s empty classes found" % empty, file=sys.stderr) sys.exit(1)
1
10,418
Those functions are not currently wrapped, so I don't think it is necessary to exclude them (at least for now).
InsightSoftwareConsortium-ITK
cpp
@@ -89,6 +89,7 @@ class Uppy { enterTextToSearch: 'Enter text to search for images', backToSearch: 'Back to Search', emptyFolderAdded: 'No files were added from empty folder', + folderAlreadyAdded: 'The folder was already added', folderAdded: { 0: 'Added %{smart_count} file from %{folder}', 1: 'Added %{smart_count} files from %{folder}',
1
/* global AggregateError */ const Translator = require('@uppy/utils/lib/Translator') const ee = require('namespace-emitter') const cuid = require('cuid') const throttle = require('lodash.throttle') const prettierBytes = require('@transloadit/prettier-bytes') const match = require('mime-match') const DefaultStore = require('@uppy/store-default') const getFileType = require('@uppy/utils/lib/getFileType') const getFileNameAndExtension = require('@uppy/utils/lib/getFileNameAndExtension') const generateFileID = require('@uppy/utils/lib/generateFileID') const findIndex = require('@uppy/utils/lib/findIndex') const supportsUploadProgress = require('./supportsUploadProgress') const { justErrorsLogger, debugLogger } = require('./loggers') const UIPlugin = require('./UIPlugin') const BasePlugin = require('./BasePlugin') const { version } = require('../package.json') // Exported from here. class RestrictionError extends Error { constructor (...args) { super(...args) this.isRestriction = true } } /** * Uppy Core module. * Manages plugins, state updates, acts as an event bus, * adds/removes files and metadata. */ class Uppy { static VERSION = version /** * Instantiate Uppy * * @param {object} opts — Uppy options */ constructor (opts) { this.defaultLocale = { strings: { addBulkFilesFailed: { 0: 'Failed to add %{smart_count} file due to an internal error', 1: 'Failed to add %{smart_count} files due to internal errors', }, youCanOnlyUploadX: { 0: 'You can only upload %{smart_count} file', 1: 'You can only upload %{smart_count} files', }, youHaveToAtLeastSelectX: { 0: 'You have to select at least %{smart_count} file', 1: 'You have to select at least %{smart_count} files', }, // The default `exceedsSize2` string only combines the `exceedsSize` string (%{backwardsCompat}) with the size. // Locales can override `exceedsSize2` to specify a different word order. This is for backwards compat with // Uppy 1.9.x and below which did a naive concatenation of `exceedsSize2 + size` instead of using a locale-specific // substitution. // TODO: In 2.0 `exceedsSize2` should be removed in and `exceedsSize` updated to use substitution. exceedsSize2: '%{backwardsCompat} %{size}', exceedsSize: '%{file} exceeds maximum allowed size of', inferiorSize: 'This file is smaller than the allowed size of %{size}', youCanOnlyUploadFileTypes: 'You can only upload: %{types}', noNewAlreadyUploading: 'Cannot add new files: already uploading', noDuplicates: 'Cannot add the duplicate file \'%{fileName}\', it already exists', companionError: 'Connection with Companion failed', companionUnauthorizeHint: 'To unauthorize to your %{provider} account, please go to %{url}', failedToUpload: 'Failed to upload %{file}', noInternetConnection: 'No Internet connection', connectedToInternet: 'Connected to the Internet', // Strings for remote providers noFilesFound: 'You have no files or folders here', selectX: { 0: 'Select %{smart_count}', 1: 'Select %{smart_count}', }, allFilesFromFolderNamed: 'All files from folder %{name}', selectFileNamed: 'Select file %{name}', unselectFileNamed: 'Unselect file %{name}', openFolderNamed: 'Open folder %{name}', cancel: 'Cancel', logOut: 'Log out', filter: 'Filter', resetFilter: 'Reset filter', loading: 'Loading...', authenticateWithTitle: 'Please authenticate with %{pluginName} to select files', authenticateWith: 'Connect to %{pluginName}', searchImages: 'Search for images', enterTextToSearch: 'Enter text to search for images', backToSearch: 'Back to Search', emptyFolderAdded: 'No files were added from empty folder', folderAdded: { 0: 'Added %{smart_count} file from %{folder}', 1: 'Added %{smart_count} files from %{folder}', }, }, } const defaultOptions = { id: 'uppy', autoProceed: false, allowMultipleUploads: true, debug: false, restrictions: { maxFileSize: null, minFileSize: null, maxTotalFileSize: null, maxNumberOfFiles: null, minNumberOfFiles: null, allowedFileTypes: null, }, meta: {}, onBeforeFileAdded: (currentFile) => currentFile, onBeforeUpload: (files) => files, store: DefaultStore(), logger: justErrorsLogger, infoTimeout: 5000, } // Merge default options with the ones set by user, // making sure to merge restrictions too this.opts = { ...defaultOptions, ...opts, restrictions: { ...defaultOptions.restrictions, ...(opts && opts.restrictions), }, } // Support debug: true for backwards-compatability, unless logger is set in opts // opts instead of this.opts to avoid comparing objects — we set logger: justErrorsLogger in defaultOptions if (opts && opts.logger && opts.debug) { this.log('You are using a custom `logger`, but also set `debug: true`, which uses built-in logger to output logs to console. Ignoring `debug: true` and using your custom `logger`.', 'warning') } else if (opts && opts.debug) { this.opts.logger = debugLogger } this.log(`Using Core v${this.constructor.VERSION}`) if (this.opts.restrictions.allowedFileTypes && this.opts.restrictions.allowedFileTypes !== null && !Array.isArray(this.opts.restrictions.allowedFileTypes)) { throw new TypeError('`restrictions.allowedFileTypes` must be an array') } this.i18nInit() // Container for different types of plugins this.plugins = {} this.getState = this.getState.bind(this) this.getPlugin = this.getPlugin.bind(this) this.setFileMeta = this.setFileMeta.bind(this) this.setFileState = this.setFileState.bind(this) this.log = this.log.bind(this) this.info = this.info.bind(this) this.hideInfo = this.hideInfo.bind(this) this.addFile = this.addFile.bind(this) this.removeFile = this.removeFile.bind(this) this.pauseResume = this.pauseResume.bind(this) this.validateRestrictions = this.validateRestrictions.bind(this) // ___Why throttle at 500ms? // - We must throttle at >250ms for superfocus in Dashboard to work well // (because animation takes 0.25s, and we want to wait for all animations to be over before refocusing). // [Practical Check]: if thottle is at 100ms, then if you are uploading a file, // and click 'ADD MORE FILES', - focus won't activate in Firefox. // - We must throttle at around >500ms to avoid performance lags. // [Practical Check] Firefox, try to upload a big file for a prolonged period of time. Laptop will start to heat up. this.calculateProgress = throttle(this.calculateProgress.bind(this), 500, { leading: true, trailing: true }) this.updateOnlineStatus = this.updateOnlineStatus.bind(this) this.resetProgress = this.resetProgress.bind(this) this.pauseAll = this.pauseAll.bind(this) this.resumeAll = this.resumeAll.bind(this) this.retryAll = this.retryAll.bind(this) this.cancelAll = this.cancelAll.bind(this) this.retryUpload = this.retryUpload.bind(this) this.upload = this.upload.bind(this) this.emitter = ee() this.on = this.on.bind(this) this.off = this.off.bind(this) this.once = this.emitter.once.bind(this.emitter) this.emit = this.emitter.emit.bind(this.emitter) this.preProcessors = [] this.uploaders = [] this.postProcessors = [] this.store = this.opts.store this.setState({ plugins: {}, files: {}, currentUploads: {}, allowNewUpload: true, capabilities: { uploadProgress: supportsUploadProgress(), individualCancellation: true, resumableUploads: false, }, totalProgress: 0, meta: { ...this.opts.meta }, info: { isHidden: true, type: 'info', message: '', }, recoveredState: null, }) this.storeUnsubscribe = this.store.subscribe((prevState, nextState, patch) => { this.emit('state-update', prevState, nextState, patch) this.updateAll(nextState) }) // Exposing uppy object on window for debugging and testing if (this.opts.debug && typeof window !== 'undefined') { window[this.opts.id] = this } this.addListeners() } on (event, callback) { this.emitter.on(event, callback) return this } off (event, callback) { this.emitter.off(event, callback) return this } /** * Iterate on all plugins and run `update` on them. * Called each time state changes. * */ updateAll (state) { this.iteratePlugins(plugin => { plugin.update(state) }) } /** * Updates state with a patch * * @param {object} patch {foo: 'bar'} */ setState (patch) { this.store.setState(patch) } /** * Returns current state. * * @returns {object} */ getState () { return this.store.getState() } /** * Back compat for when uppy.state is used instead of uppy.getState(). */ get state () { return this.getState() } /** * Shorthand to set state for a specific file. */ setFileState (fileID, state) { if (!this.getState().files[fileID]) { throw new Error(`Can’t set state for ${fileID} (the file could have been removed)`) } this.setState({ files: { ...this.getState().files, [fileID]: { ...this.getState().files[fileID], ...state } }, }) } i18nInit () { this.translator = new Translator([this.defaultLocale, this.opts.locale]) this.locale = this.translator.locale this.i18n = this.translator.translate.bind(this.translator) this.i18nArray = this.translator.translateArray.bind(this.translator) } setOptions (newOpts) { this.opts = { ...this.opts, ...newOpts, restrictions: { ...this.opts.restrictions, ...(newOpts && newOpts.restrictions), }, } if (newOpts.meta) { this.setMeta(newOpts.meta) } this.i18nInit() if (newOpts.locale) { this.iteratePlugins((plugin) => { plugin.setOptions() }) } // Note: this is not the preact `setState`, it's an internal function that has the same name. this.setState() // so that UI re-renders with new options } resetProgress () { const defaultProgress = { percentage: 0, bytesUploaded: 0, uploadComplete: false, uploadStarted: null, } const files = { ...this.getState().files } const updatedFiles = {} Object.keys(files).forEach(fileID => { const updatedFile = { ...files[fileID] } updatedFile.progress = { ...updatedFile.progress, ...defaultProgress } updatedFiles[fileID] = updatedFile }) this.setState({ files: updatedFiles, totalProgress: 0, }) this.emit('reset-progress') } addPreProcessor (fn) { this.preProcessors.push(fn) } removePreProcessor (fn) { const i = this.preProcessors.indexOf(fn) if (i !== -1) { this.preProcessors.splice(i, 1) } } addPostProcessor (fn) { this.postProcessors.push(fn) } removePostProcessor (fn) { const i = this.postProcessors.indexOf(fn) if (i !== -1) { this.postProcessors.splice(i, 1) } } addUploader (fn) { this.uploaders.push(fn) } removeUploader (fn) { const i = this.uploaders.indexOf(fn) if (i !== -1) { this.uploaders.splice(i, 1) } } setMeta (data) { const updatedMeta = { ...this.getState().meta, ...data } const updatedFiles = { ...this.getState().files } Object.keys(updatedFiles).forEach((fileID) => { updatedFiles[fileID] = { ...updatedFiles[fileID], meta: { ...updatedFiles[fileID].meta, ...data } } }) this.log('Adding metadata:') this.log(data) this.setState({ meta: updatedMeta, files: updatedFiles, }) } setFileMeta (fileID, data) { const updatedFiles = { ...this.getState().files } if (!updatedFiles[fileID]) { this.log('Was trying to set metadata for a file that has been removed: ', fileID) return } const newMeta = { ...updatedFiles[fileID].meta, ...data } updatedFiles[fileID] = { ...updatedFiles[fileID], meta: newMeta } this.setState({ files: updatedFiles }) } /** * Get a file object. * * @param {string} fileID The ID of the file object to return. */ getFile (fileID) { return this.getState().files[fileID] } /** * Get all files in an array. */ getFiles () { const { files } = this.getState() return Object.values(files) } getObjectOfFilesPerState () { const { files: filesObject, totalProgress, error } = this.getState() const files = Object.values(filesObject) const inProgressFiles = files.filter(({ progress }) => !progress.uploadComplete && progress.uploadStarted) const newFiles = files.filter((file) => !file.progress.uploadStarted) const startedFiles = files.filter( file => file.progress.uploadStarted || file.progress.preprocess || file.progress.postprocess ) const uploadStartedFiles = files.filter((file) => file.progress.uploadStarted) const pausedFiles = files.filter((file) => file.isPaused) const completeFiles = files.filter((file) => file.progress.uploadComplete) const erroredFiles = files.filter((file) => file.error) const inProgressNotPausedFiles = inProgressFiles.filter((file) => !file.isPaused) const processingFiles = files.filter((file) => file.progress.preprocess || file.progress.postprocess) return { newFiles, startedFiles, uploadStartedFiles, pausedFiles, completeFiles, erroredFiles, inProgressFiles, inProgressNotPausedFiles, processingFiles, isUploadStarted: uploadStartedFiles.length > 0, isAllComplete: totalProgress === 100 && completeFiles.length === files.length && processingFiles.length === 0, isAllErrored: !!error && erroredFiles.length === files.length, isAllPaused: inProgressFiles.length !== 0 && pausedFiles.length === inProgressFiles.length, isUploadInProgress: inProgressFiles.length > 0, isSomeGhost: files.some(file => file.isGhost), } } /** * A public wrapper for _checkRestrictions — checks if a file passes a set of restrictions. * For use in UI pluigins (like Providers), to disallow selecting files that won’t pass restrictions. * * @param {object} file object to check * @param {Array} [files] array to check maxNumberOfFiles and maxTotalFileSize * @returns {object} { result: true/false, reason: why file didn’t pass restrictions } */ validateRestrictions (file, files) { try { this.checkRestrictions(file, files) return { result: true, } } catch (err) { return { result: false, reason: err.message, } } } /** * Check if file passes a set of restrictions set in options: maxFileSize, minFileSize, * maxNumberOfFiles and allowedFileTypes. * * @param {object} file object to check * @param {Array} [files] array to check maxNumberOfFiles and maxTotalFileSize * @private */ checkRestrictions (file, files = this.getFiles()) { const { maxFileSize, minFileSize, maxTotalFileSize, maxNumberOfFiles, allowedFileTypes } = this.opts.restrictions if (maxNumberOfFiles) { if (files.length + 1 > maxNumberOfFiles) { throw new RestrictionError(`${this.i18n('youCanOnlyUploadX', { smart_count: maxNumberOfFiles })}`) } } if (allowedFileTypes) { const isCorrectFileType = allowedFileTypes.some((type) => { // check if this is a mime-type if (type.indexOf('/') > -1) { if (!file.type) return false return match(file.type.replace(/;.*?$/, ''), type) } // otherwise this is likely an extension if (type[0] === '.' && file.extension) { return file.extension.toLowerCase() === type.substr(1).toLowerCase() } return false }) if (!isCorrectFileType) { const allowedFileTypesString = allowedFileTypes.join(', ') throw new RestrictionError(this.i18n('youCanOnlyUploadFileTypes', { types: allowedFileTypesString })) } } // We can't check maxTotalFileSize if the size is unknown. if (maxTotalFileSize && file.size != null) { let totalFilesSize = 0 totalFilesSize += file.size files.forEach((f) => { totalFilesSize += f.size }) if (totalFilesSize > maxTotalFileSize) { throw new RestrictionError(this.i18n('exceedsSize2', { backwardsCompat: this.i18n('exceedsSize'), size: prettierBytes(maxTotalFileSize), file: file.name, })) } } // We can't check maxFileSize if the size is unknown. if (maxFileSize && file.size != null) { if (file.size > maxFileSize) { throw new RestrictionError(this.i18n('exceedsSize2', { backwardsCompat: this.i18n('exceedsSize'), size: prettierBytes(maxFileSize), file: file.name, })) } } // We can't check minFileSize if the size is unknown. if (minFileSize && file.size != null) { if (file.size < minFileSize) { throw new RestrictionError(this.i18n('inferiorSize', { size: prettierBytes(minFileSize), })) } } } /** * Check if minNumberOfFiles restriction is reached before uploading. * * @private */ checkMinNumberOfFiles (files) { const { minNumberOfFiles } = this.opts.restrictions if (Object.keys(files).length < minNumberOfFiles) { throw new RestrictionError(`${this.i18n('youHaveToAtLeastSelectX', { smart_count: minNumberOfFiles })}`) } } /** * Logs an error, sets Informer message, then throws the error. * Emits a 'restriction-failed' event if it’s a restriction error * * @param {object | string} err — Error object or plain string message * @param {object} [options] * @param {boolean} [options.showInformer=true] — Sometimes developer might want to show Informer manually * @param {object} [options.file=null] — File object used to emit the restriction error * @param {boolean} [options.throwErr=true] — Errors shouldn’t be thrown, for example, in `upload-error` event * @private */ showOrLogErrorAndThrow (err, { showInformer = true, file = null, throwErr = true } = {}) { const message = typeof err === 'object' ? err.message : err const details = (typeof err === 'object' && err.details) ? err.details : '' // Restriction errors should be logged, but not as errors, // as they are expected and shown in the UI. let logMessageWithDetails = message if (details) { logMessageWithDetails += ` ${details}` } if (err.isRestriction) { this.log(logMessageWithDetails) this.emit('restriction-failed', file, err) } else { this.log(logMessageWithDetails, 'error') } // Sometimes informer has to be shown manually by the developer, // for example, in `onBeforeFileAdded`. if (showInformer) { this.info({ message, details }, 'error', this.opts.infoTimeout) } if (throwErr) { throw (typeof err === 'object' ? err : new Error(err)) } } assertNewUploadAllowed (file) { const { allowNewUpload } = this.getState() if (allowNewUpload === false) { this.showOrLogErrorAndThrow(new RestrictionError(this.i18n('noNewAlreadyUploading')), { file }) } } /** * Create a file state object based on user-provided `addFile()` options. * * Note this is extremely side-effectful and should only be done when a file state object will be added to state immediately afterward! * * The `files` value is passed in because it may be updated by the caller without updating the store. */ checkAndCreateFileStateObject (files, fileDescriptor) { const fileType = getFileType(fileDescriptor) let fileName if (fileDescriptor.name) { fileName = fileDescriptor.name } else if (fileType.split('/')[0] === 'image') { fileName = `${fileType.split('/')[0]}.${fileType.split('/')[1]}` } else { fileName = 'noname' } const fileExtension = getFileNameAndExtension(fileName).extension const isRemote = Boolean(fileDescriptor.isRemote) const fileID = generateFileID({ ...fileDescriptor, type: fileType, }) if (files[fileID] && !files[fileID].isGhost) { this.showOrLogErrorAndThrow( new RestrictionError(this.i18n('noDuplicates', { fileName })), { fileDescriptor } ) } const meta = fileDescriptor.meta || {} meta.name = fileName meta.type = fileType // `null` means the size is unknown. const size = Number.isFinite(fileDescriptor.data.size) ? fileDescriptor.data.size : null let newFile = { source: fileDescriptor.source || '', id: fileID, name: fileName, extension: fileExtension || '', meta: { ...this.getState().meta, ...meta, }, type: fileType, data: fileDescriptor.data, progress: { percentage: 0, bytesUploaded: 0, bytesTotal: size, uploadComplete: false, uploadStarted: null, }, size, isRemote, remote: fileDescriptor.remote || '', preview: fileDescriptor.preview, } const onBeforeFileAddedResult = this.opts.onBeforeFileAdded(newFile, files) if (onBeforeFileAddedResult === false) { // Don’t show UI info for this error, as it should be done by the developer this.showOrLogErrorAndThrow(new RestrictionError('Cannot add the file because onBeforeFileAdded returned false.'), { showInformer: false, fileDescriptor }) } else if (typeof onBeforeFileAddedResult === 'object' && onBeforeFileAddedResult !== null) { newFile = onBeforeFileAddedResult } try { const filesArray = Object.keys(files).map(i => files[i]) this.checkRestrictions(newFile, filesArray) } catch (err) { this.showOrLogErrorAndThrow(err, { file: newFile }) } return newFile } // Schedule an upload if `autoProceed` is enabled. startIfAutoProceed () { if (this.opts.autoProceed && !this.scheduledAutoProceed) { this.scheduledAutoProceed = setTimeout(() => { this.scheduledAutoProceed = null this.upload().catch((err) => { if (!err.isRestriction) { this.log(err.stack || err.message || err) } }) }, 4) } } /** * Add a new file to `state.files`. This will run `onBeforeFileAdded`, * try to guess file type in a clever way, check file against restrictions, * and start an upload if `autoProceed === true`. * * @param {object} file object to add * @returns {string} id for the added file */ addFile (file) { this.assertNewUploadAllowed(file) const { files } = this.getState() let newFile = this.checkAndCreateFileStateObject(files, file) // Users are asked to re-select recovered files without data, // and to keep the progress, meta and everthing else, we only replace said data if (files[newFile.id] && files[newFile.id].isGhost) { newFile = { ...files[newFile.id], data: file.data, isGhost: false, } this.log(`Replaced the blob in the restored ghost file: ${newFile.name}, ${newFile.id}`) } this.setState({ files: { ...files, [newFile.id]: newFile, }, }) this.emit('file-added', newFile) this.emit('files-added', [newFile]) this.log(`Added file: ${newFile.name}, ${newFile.id}, mime type: ${newFile.type}`) this.startIfAutoProceed() return newFile.id } /** * Add multiple files to `state.files`. See the `addFile()` documentation. * * If an error occurs while adding a file, it is logged and the user is notified. * This is good for UI plugins, but not for programmatic use. * Programmatic users should usually still use `addFile()` on individual files. */ addFiles (fileDescriptors) { this.assertNewUploadAllowed() // create a copy of the files object only once const files = { ...this.getState().files } const newFiles = [] const errors = [] for (let i = 0; i < fileDescriptors.length; i++) { try { let newFile = this.checkAndCreateFileStateObject(files, fileDescriptors[i]) // Users are asked to re-select recovered files without data, // and to keep the progress, meta and everthing else, we only replace said data if (files[newFile.id] && files[newFile.id].isGhost) { newFile = { ...files[newFile.id], data: fileDescriptors[i].data, isGhost: false, } this.log(`Replaced blob in a ghost file: ${newFile.name}, ${newFile.id}`) } files[newFile.id] = newFile newFiles.push(newFile) } catch (err) { if (!err.isRestriction) { errors.push(err) } } } this.setState({ files }) newFiles.forEach((newFile) => { this.emit('file-added', newFile) }) this.emit('files-added', newFiles) if (newFiles.length > 5) { this.log(`Added batch of ${newFiles.length} files`) } else { Object.keys(newFiles).forEach(fileID => { this.log(`Added file: ${newFiles[fileID].name}\n id: ${newFiles[fileID].id}\n type: ${newFiles[fileID].type}`) }) } if (newFiles.length > 0) { this.startIfAutoProceed() } if (errors.length > 0) { let message = 'Multiple errors occurred while adding files:\n' errors.forEach((subError) => { message += `\n * ${subError.message}` }) this.info({ message: this.i18n('addBulkFilesFailed', { smart_count: errors.length }), details: message, }, 'error', this.opts.infoTimeout) if (typeof AggregateError === 'function') { throw new AggregateError(errors, message) } else { const err = new Error(message) err.errors = errors throw err } } } removeFiles (fileIDs, reason) { const { files, currentUploads } = this.getState() const updatedFiles = { ...files } const updatedUploads = { ...currentUploads } const removedFiles = Object.create(null) fileIDs.forEach((fileID) => { if (files[fileID]) { removedFiles[fileID] = files[fileID] delete updatedFiles[fileID] } }) // Remove files from the `fileIDs` list in each upload. function fileIsNotRemoved (uploadFileID) { return removedFiles[uploadFileID] === undefined } Object.keys(updatedUploads).forEach((uploadID) => { const newFileIDs = currentUploads[uploadID].fileIDs.filter(fileIsNotRemoved) // Remove the upload if no files are associated with it anymore. if (newFileIDs.length === 0) { delete updatedUploads[uploadID] return } updatedUploads[uploadID] = { ...currentUploads[uploadID], fileIDs: newFileIDs, } }) const stateUpdate = { currentUploads: updatedUploads, files: updatedFiles, } // If all files were removed - allow new uploads, // and clear recoveredState if (Object.keys(updatedFiles).length === 0) { stateUpdate.allowNewUpload = true stateUpdate.error = null stateUpdate.recoveredState = null } this.setState(stateUpdate) this.calculateTotalProgress() const removedFileIDs = Object.keys(removedFiles) removedFileIDs.forEach((fileID) => { this.emit('file-removed', removedFiles[fileID], reason) }) if (removedFileIDs.length > 5) { this.log(`Removed ${removedFileIDs.length} files`) } else { this.log(`Removed files: ${removedFileIDs.join(', ')}`) } } removeFile (fileID, reason = null) { this.removeFiles([fileID], reason) } pauseResume (fileID) { if (!this.getState().capabilities.resumableUploads || this.getFile(fileID).uploadComplete) { return undefined } const wasPaused = this.getFile(fileID).isPaused || false const isPaused = !wasPaused this.setFileState(fileID, { isPaused, }) this.emit('upload-pause', fileID, isPaused) return isPaused } pauseAll () { const updatedFiles = { ...this.getState().files } const inProgressUpdatedFiles = Object.keys(updatedFiles).filter((file) => { return !updatedFiles[file].progress.uploadComplete && updatedFiles[file].progress.uploadStarted }) inProgressUpdatedFiles.forEach((file) => { const updatedFile = { ...updatedFiles[file], isPaused: true } updatedFiles[file] = updatedFile }) this.setState({ files: updatedFiles }) this.emit('pause-all') } resumeAll () { const updatedFiles = { ...this.getState().files } const inProgressUpdatedFiles = Object.keys(updatedFiles).filter((file) => { return !updatedFiles[file].progress.uploadComplete && updatedFiles[file].progress.uploadStarted }) inProgressUpdatedFiles.forEach((file) => { const updatedFile = { ...updatedFiles[file], isPaused: false, error: null, } updatedFiles[file] = updatedFile }) this.setState({ files: updatedFiles }) this.emit('resume-all') } retryAll () { const updatedFiles = { ...this.getState().files } const filesToRetry = Object.keys(updatedFiles).filter(file => { return updatedFiles[file].error }) filesToRetry.forEach((file) => { const updatedFile = { ...updatedFiles[file], isPaused: false, error: null, } updatedFiles[file] = updatedFile }) this.setState({ files: updatedFiles, error: null, }) this.emit('retry-all', filesToRetry) if (filesToRetry.length === 0) { return Promise.resolve({ successful: [], failed: [], }) } const uploadID = this.createUpload(filesToRetry, { forceAllowNewUpload: true, // create new upload even if allowNewUpload: false }) return this.runUpload(uploadID) } cancelAll () { this.emit('cancel-all') const { files } = this.getState() const fileIDs = Object.keys(files) if (fileIDs.length) { this.removeFiles(fileIDs, 'cancel-all') } this.setState({ totalProgress: 0, error: null, recoveredState: null, }) } retryUpload (fileID) { this.setFileState(fileID, { error: null, isPaused: false, }) this.emit('upload-retry', fileID) const uploadID = this.createUpload([fileID], { forceAllowNewUpload: true, // create new upload even if allowNewUpload: false }) return this.runUpload(uploadID) } reset () { this.cancelAll() } logout () { this.iteratePlugins(plugin => { if (plugin.provider && plugin.provider.logout) { plugin.provider.logout() } }) } calculateProgress (file, data) { if (!this.getFile(file.id)) { this.log(`Not setting progress for a file that has been removed: ${file.id}`) return } // bytesTotal may be null or zero; in that case we can't divide by it const canHavePercentage = Number.isFinite(data.bytesTotal) && data.bytesTotal > 0 this.setFileState(file.id, { progress: { ...this.getFile(file.id).progress, bytesUploaded: data.bytesUploaded, bytesTotal: data.bytesTotal, percentage: canHavePercentage ? Math.round((data.bytesUploaded / data.bytesTotal) * 100) : 0, }, }) this.calculateTotalProgress() } calculateTotalProgress () { // calculate total progress, using the number of files currently uploading, // multiplied by 100 and the summ of individual progress of each file const files = this.getFiles() const inProgress = files.filter((file) => { return file.progress.uploadStarted || file.progress.preprocess || file.progress.postprocess }) if (inProgress.length === 0) { this.emit('progress', 0) this.setState({ totalProgress: 0 }) return } const sizedFiles = inProgress.filter((file) => file.progress.bytesTotal != null) const unsizedFiles = inProgress.filter((file) => file.progress.bytesTotal == null) if (sizedFiles.length === 0) { const progressMax = inProgress.length * 100 const currentProgress = unsizedFiles.reduce((acc, file) => { return acc + file.progress.percentage }, 0) const totalProgress = Math.round((currentProgress / progressMax) * 100) this.setState({ totalProgress }) return } let totalSize = sizedFiles.reduce((acc, file) => { return acc + file.progress.bytesTotal }, 0) const averageSize = totalSize / sizedFiles.length totalSize += averageSize * unsizedFiles.length let uploadedSize = 0 sizedFiles.forEach((file) => { uploadedSize += file.progress.bytesUploaded }) unsizedFiles.forEach((file) => { uploadedSize += (averageSize * (file.progress.percentage || 0)) / 100 }) let totalProgress = totalSize === 0 ? 0 : Math.round((uploadedSize / totalSize) * 100) // hot fix, because: // uploadedSize ended up larger than totalSize, resulting in 1325% total if (totalProgress > 100) { totalProgress = 100 } this.setState({ totalProgress }) this.emit('progress', totalProgress) } /** * Registers listeners for all global actions, like: * `error`, `file-removed`, `upload-progress` */ addListeners () { /** * @param {Error} error * @param {object} [file] * @param {object} [response] */ const errorHandler = (error, file, response) => { let errorMsg = error.message || 'Unknown error' if (error.details) { errorMsg += ` ${error.details}` } this.setState({ error: errorMsg }) if (file != null && file.id in this.getState().files) { this.setFileState(file.id, { error: errorMsg, response, }) } } this.on('error', errorHandler) this.on('upload-error', (file, error, response) => { errorHandler(error, file, response) if (typeof error === 'object' && error.message) { const newError = new Error(error.message) newError.details = error.message if (error.details) { newError.details += ` ${error.details}` } newError.message = this.i18n('failedToUpload', { file: file.name }) this.showOrLogErrorAndThrow(newError, { throwErr: false, }) } else { this.showOrLogErrorAndThrow(error, { throwErr: false, }) } }) this.on('upload', () => { this.setState({ error: null }) }) this.on('upload-started', (file) => { if (!this.getFile(file.id)) { this.log(`Not setting progress for a file that has been removed: ${file.id}`) return } this.setFileState(file.id, { progress: { uploadStarted: Date.now(), uploadComplete: false, percentage: 0, bytesUploaded: 0, bytesTotal: file.size, }, }) }) this.on('upload-progress', this.calculateProgress) this.on('upload-success', (file, uploadResp) => { if (!this.getFile(file.id)) { this.log(`Not setting progress for a file that has been removed: ${file.id}`) return } const currentProgress = this.getFile(file.id).progress this.setFileState(file.id, { progress: { ...currentProgress, postprocess: this.postProcessors.length > 0 ? { mode: 'indeterminate', } : null, uploadComplete: true, percentage: 100, bytesUploaded: currentProgress.bytesTotal, }, response: uploadResp, uploadURL: uploadResp.uploadURL, isPaused: false, }) this.calculateTotalProgress() }) this.on('preprocess-progress', (file, progress) => { if (!this.getFile(file.id)) { this.log(`Not setting progress for a file that has been removed: ${file.id}`) return } this.setFileState(file.id, { progress: { ...this.getFile(file.id).progress, preprocess: progress }, }) }) this.on('preprocess-complete', (file) => { if (!this.getFile(file.id)) { this.log(`Not setting progress for a file that has been removed: ${file.id}`) return } const files = { ...this.getState().files } files[file.id] = { ...files[file.id], progress: { ...files[file.id].progress } } delete files[file.id].progress.preprocess this.setState({ files }) }) this.on('postprocess-progress', (file, progress) => { if (!this.getFile(file.id)) { this.log(`Not setting progress for a file that has been removed: ${file.id}`) return } this.setFileState(file.id, { progress: { ...this.getState().files[file.id].progress, postprocess: progress }, }) }) this.on('postprocess-complete', (file) => { if (!this.getFile(file.id)) { this.log(`Not setting progress for a file that has been removed: ${file.id}`) return } const files = { ...this.getState().files, } files[file.id] = { ...files[file.id], progress: { ...files[file.id].progress, }, } delete files[file.id].progress.postprocess this.setState({ files }) }) this.on('restored', () => { // Files may have changed--ensure progress is still accurate. this.calculateTotalProgress() }) // show informer if offline if (typeof window !== 'undefined' && window.addEventListener) { window.addEventListener('online', () => this.updateOnlineStatus()) window.addEventListener('offline', () => this.updateOnlineStatus()) setTimeout(() => this.updateOnlineStatus(), 3000) } } updateOnlineStatus () { const online = typeof window.navigator.onLine !== 'undefined' ? window.navigator.onLine : true if (!online) { this.emit('is-offline') this.info(this.i18n('noInternetConnection'), 'error', 0) this.wasOffline = true } else { this.emit('is-online') if (this.wasOffline) { this.emit('back-online') this.info(this.i18n('connectedToInternet'), 'success', 3000) this.wasOffline = false } } } getID () { return this.opts.id } /** * Registers a plugin with Core. * * @param {object} Plugin object * @param {object} [opts] object with options to be passed to Plugin * @returns {object} self for chaining */ // eslint-disable-next-line no-shadow use (Plugin, opts) { if (typeof Plugin !== 'function') { const msg = `Expected a plugin class, but got ${Plugin === null ? 'null' : typeof Plugin}.` + ' Please verify that the plugin was imported and spelled correctly.' throw new TypeError(msg) } // Instantiate const plugin = new Plugin(this, opts) const pluginId = plugin.id this.plugins[plugin.type] = this.plugins[plugin.type] || [] if (!pluginId) { throw new Error('Your plugin must have an id') } if (!plugin.type) { throw new Error('Your plugin must have a type') } const existsPluginAlready = this.getPlugin(pluginId) if (existsPluginAlready) { const msg = `Already found a plugin named '${existsPluginAlready.id}'. ` + `Tried to use: '${pluginId}'.\n` + 'Uppy plugins must have unique `id` options. See https://uppy.io/docs/plugins/#id.' throw new Error(msg) } if (Plugin.VERSION) { this.log(`Using ${pluginId} v${Plugin.VERSION}`) } this.plugins[plugin.type].push(plugin) plugin.install() return this } /** * Find one Plugin by name. * * @param {string} id plugin id * @returns {object|boolean} */ getPlugin (id) { let foundPlugin = null this.iteratePlugins((plugin) => { if (plugin.id === id) { foundPlugin = plugin return false } }) return foundPlugin } /** * Iterate through all `use`d plugins. * * @param {Function} method that will be run on each plugin */ iteratePlugins (method) { Object.keys(this.plugins).forEach(pluginType => { this.plugins[pluginType].forEach(method) }) } /** * Uninstall and remove a plugin. * * @param {object} instance The plugin instance to remove. */ removePlugin (instance) { this.log(`Removing plugin ${instance.id}`) this.emit('plugin-remove', instance) if (instance.uninstall) { instance.uninstall() } const list = this.plugins[instance.type].slice() // list.indexOf failed here, because Vue3 converted the plugin instance // to a Proxy object, which failed the strict comparison test: // obj !== objProxy const index = findIndex(list, item => item.id === instance.id) if (index !== -1) { list.splice(index, 1) this.plugins[instance.type] = list } const state = this.getState() const updatedState = { plugins: { ...state.plugins, [instance.id]: undefined, }, } this.setState(updatedState) } /** * Uninstall all plugins and close down this Uppy instance. */ close () { this.log(`Closing Uppy instance ${this.opts.id}: removing all files and uninstalling plugins`) this.reset() this.storeUnsubscribe() this.iteratePlugins((plugin) => { this.removePlugin(plugin) }) } /** * Set info message in `state.info`, so that UI plugins like `Informer` * can display the message. * * @param {string | object} message Message to be displayed by the informer * @param {string} [type] * @param {number} [duration] */ info (message, type = 'info', duration = 3000) { const isComplexMessage = typeof message === 'object' this.setState({ info: { isHidden: false, type, message: isComplexMessage ? message.message : message, details: isComplexMessage ? message.details : null, }, }) this.emit('info-visible') clearTimeout(this.infoTimeoutID) if (duration === 0) { this.infoTimeoutID = undefined return } // hide the informer after `duration` milliseconds this.infoTimeoutID = setTimeout(this.hideInfo, duration) } hideInfo () { const newInfo = { ...this.getState().info, isHidden: true } this.setState({ info: newInfo, }) this.emit('info-hidden') } /** * Passes messages to a function, provided in `opts.logger`. * If `opts.logger: Uppy.debugLogger` or `opts.debug: true`, logs to the browser console. * * @param {string|object} message to log * @param {string} [type] optional `error` or `warning` */ log (message, type) { const { logger } = this.opts switch (type) { case 'error': logger.error(message); break case 'warning': logger.warn(message); break default: logger.debug(message); break } } /** * Restore an upload by its ID. */ restore (uploadID) { this.log(`Core: attempting to restore upload "${uploadID}"`) if (!this.getState().currentUploads[uploadID]) { this.removeUpload(uploadID) return Promise.reject(new Error('Nonexistent upload')) } return this.runUpload(uploadID) } /** * Create an upload for a bunch of files. * * @param {Array<string>} fileIDs File IDs to include in this upload. * @returns {string} ID of this upload. */ createUpload (fileIDs, opts = {}) { // uppy.retryAll sets this to true — when retrying we want to ignore `allowNewUpload: false` const { forceAllowNewUpload = false } = opts const { allowNewUpload, currentUploads } = this.getState() if (!allowNewUpload && !forceAllowNewUpload) { throw new Error('Cannot create a new upload: already uploading.') } const uploadID = cuid() this.emit('upload', { id: uploadID, fileIDs, }) this.setState({ allowNewUpload: this.opts.allowMultipleUploads !== false, currentUploads: { ...currentUploads, [uploadID]: { fileIDs, step: 0, result: {}, }, }, }) return uploadID } getUpload (uploadID) { const { currentUploads } = this.getState() return currentUploads[uploadID] } /** * Add data to an upload's result object. * * @param {string} uploadID The ID of the upload. * @param {object} data Data properties to add to the result object. */ addResultData (uploadID, data) { if (!this.getUpload(uploadID)) { this.log(`Not setting result for an upload that has been removed: ${uploadID}`) return } const { currentUploads } = this.getState() const currentUpload = { ...currentUploads[uploadID], result: { ...currentUploads[uploadID].result, ...data } } this.setState({ currentUploads: { ...currentUploads, [uploadID]: currentUpload }, }) } /** * Remove an upload, eg. if it has been canceled or completed. * * @param {string} uploadID The ID of the upload. */ removeUpload (uploadID) { const currentUploads = { ...this.getState().currentUploads } delete currentUploads[uploadID] this.setState({ currentUploads, }) } /** * Run an upload. This picks up where it left off in case the upload is being restored. * * @private */ runUpload (uploadID) { const uploadData = this.getState().currentUploads[uploadID] const restoreStep = uploadData.step const steps = [ ...this.preProcessors, ...this.uploaders, ...this.postProcessors, ] let lastStep = Promise.resolve() steps.forEach((fn, step) => { // Skip this step if we are restoring and have already completed this step before. if (step < restoreStep) { return } lastStep = lastStep.then(() => { const { currentUploads } = this.getState() const currentUpload = currentUploads[uploadID] if (!currentUpload) { return } const updatedUpload = { ...currentUpload, step, } this.setState({ currentUploads: { ...currentUploads, [uploadID]: updatedUpload, }, }) // TODO give this the `updatedUpload` object as its only parameter maybe? // Otherwise when more metadata may be added to the upload this would keep getting more parameters // eslint-disable-next-line consistent-return return fn(updatedUpload.fileIDs, uploadID) }).then(() => { return null }) }) // Not returning the `catch`ed promise, because we still want to return a rejected // promise from this method if the upload failed. lastStep.catch((err) => { this.emit('error', err, uploadID) this.removeUpload(uploadID) }) return lastStep.then(() => { // Set result data. const { currentUploads } = this.getState() const currentUpload = currentUploads[uploadID] if (!currentUpload) { return } // Mark postprocessing step as complete if necessary; this addresses a case where we might get // stuck in the postprocessing UI while the upload is fully complete. // If the postprocessing steps do not do any work, they may not emit postprocessing events at // all, and never mark the postprocessing as complete. This is fine on its own but we // introduced code in the @uppy/core upload-success handler to prepare postprocessing progress // state if any postprocessors are registered. That is to avoid a "flash of completed state" // before the postprocessing plugins can emit events. // // So, just in case an upload with postprocessing plugins *has* completed *without* emitting // postprocessing completion, we do it instead. currentUpload.fileIDs.forEach((fileID) => { const file = this.getFile(fileID) if (file && file.progress.postprocess) { this.emit('postprocess-complete', file) } }) const files = currentUpload.fileIDs.map((fileID) => this.getFile(fileID)) const successful = files.filter((file) => !file.error) const failed = files.filter((file) => file.error) this.addResultData(uploadID, { successful, failed, uploadID }) }).then(() => { // Emit completion events. // This is in a separate function so that the `currentUploads` variable // always refers to the latest state. In the handler right above it refers // to an outdated object without the `.result` property. const { currentUploads } = this.getState() if (!currentUploads[uploadID]) { return } const currentUpload = currentUploads[uploadID] const { result } = currentUpload this.emit('complete', result) this.removeUpload(uploadID) // eslint-disable-next-line consistent-return return result }).then((result) => { if (result == null) { this.log(`Not setting result for an upload that has been removed: ${uploadID}`) } return result }) } /** * Start an upload for all the files that are not currently being uploaded. * * @returns {Promise} */ upload () { if (!this.plugins.uploader) { this.log('No uploader type plugins are used', 'warning') } let { files } = this.getState() const onBeforeUploadResult = this.opts.onBeforeUpload(files) if (onBeforeUploadResult === false) { return Promise.reject(new Error('Not starting the upload because onBeforeUpload returned false')) } if (onBeforeUploadResult && typeof onBeforeUploadResult === 'object') { files = onBeforeUploadResult // Updating files in state, because uploader plugins receive file IDs, // and then fetch the actual file object from state this.setState({ files, }) } return Promise.resolve() .then(() => this.checkMinNumberOfFiles(files)) .catch((err) => { this.showOrLogErrorAndThrow(err) }) .then(() => { const { currentUploads } = this.getState() // get a list of files that are currently assigned to uploads const currentlyUploadingFiles = Object.keys(currentUploads) .reduce((prev, curr) => prev.concat(currentUploads[curr].fileIDs), []) const waitingFileIDs = [] Object.keys(files).forEach((fileID) => { const file = this.getFile(fileID) // if the file hasn't started uploading and hasn't already been assigned to an upload.. if ((!file.progress.uploadStarted) && (currentlyUploadingFiles.indexOf(fileID) === -1)) { waitingFileIDs.push(file.id) } }) const uploadID = this.createUpload(waitingFileIDs) return this.runUpload(uploadID) }) .catch((err) => { this.showOrLogErrorAndThrow(err, { showInformer: false, }) }) } } // Expose class constructor. module.exports = Uppy module.exports.Uppy = Uppy module.exports.UIPlugin = UIPlugin module.exports.BasePlugin = BasePlugin module.exports.debugLogger = debugLogger
1
14,321
Should we specify the name of the folder?
transloadit-uppy
js
@@ -14,7 +14,7 @@ from rdkit import RDConfig, rdBase from rdkit import DataStructs from rdkit import Chem import rdkit.Chem.rdDepictor -from rdkit.Chem import rdqueries +from rdkit.Chem import rdqueries, rdmolops from rdkit import __version__
1
# # Copyright (C) 2003-2019 Greg Landrum and Rational Discovery LLC # All Rights Reserved # """ This is a rough coverage test of the python wrapper it's intended to be shallow, but broad """ import os, sys, tempfile, gzip, gc import unittest, doctest from rdkit import RDConfig, rdBase from rdkit import DataStructs from rdkit import Chem import rdkit.Chem.rdDepictor from rdkit.Chem import rdqueries from rdkit import __version__ # Boost functions are NOT found by doctest, this "fixes" them # by adding the doctests to a fake module import importlib.util spec = importlib.util.spec_from_loader("TestReplaceCore", loader=None) TestReplaceCore = importlib.util.module_from_spec(spec) code = """ from rdkit.Chem import ReplaceCore def ReplaceCore(*a, **kw): '''%s ''' return Chem.ReplaceCore(*a, **kw) """ % "\n".join([x.lstrip() for x in Chem.ReplaceCore.__doc__.split("\n")]) exec(code, TestReplaceCore.__dict__) def load_tests(loader, tests, ignore): tests.addTests(doctest.DocTestSuite(TestReplaceCore)) return tests def feq(v1, v2, tol2=1e-4): return abs(v1 - v2) <= tol2 def getTotalFormalCharge(mol): totalFormalCharge = 0 for atom in mol.GetAtoms(): totalFormalCharge += atom.GetFormalCharge() return totalFormalCharge def cmpFormalChargeBondOrder(self, mol1, mol2): self.assertEqual(mol1.GetNumAtoms(), mol2.GetNumAtoms()) self.assertEqual(mol1.GetNumBonds(), mol2.GetNumBonds()) for i in range(mol1.GetNumAtoms()): self.assertEqual( mol1.GetAtomWithIdx(i).GetFormalCharge(), mol2.GetAtomWithIdx(i).GetFormalCharge()) for i in range(mol1.GetNumBonds()): self.assertEqual(mol1.GetBondWithIdx(i).GetBondType(), mol2.GetBondWithIdx(i).GetBondType()) def setResidueFormalCharge(mol, res, fc): for query in res: matches = mol.GetSubstructMatches(query) for match in matches: mol.GetAtomWithIdx(match[-1]).SetFormalCharge(fc) def getBtList2(resMolSuppl): btList2 = [] while (not resMolSuppl.atEnd()): resMol = next(resMolSuppl) bt = [] for bond in resMol.GetBonds(): bt.append(int(bond.GetBondTypeAsDouble())) btList2.append(bt) for i in range(len(btList2)): same = True for j in range(len(btList2[i])): if (not i): continue if (same): same = (btList2[i][j] == btList2[i - 1][j]) if (i and same): return None return btList2 class TestCase(unittest.TestCase): def test0Except(self): with self.assertRaises(IndexError): Chem.tossit() def test1Table(self): tbl = Chem.GetPeriodicTable() self.assertTrue(tbl) self.assertTrue(feq(tbl.GetAtomicWeight(6), 12.011)) self.assertTrue(feq(tbl.GetAtomicWeight("C"), 12.011)) self.assertTrue(tbl.GetAtomicNumber('C') == 6) self.assertTrue(feq(tbl.GetRvdw(6), 1.7)) self.assertTrue(feq(tbl.GetRvdw("C"), 1.7)) self.assertTrue(feq(tbl.GetRcovalent(6), 0.680)) self.assertTrue(feq(tbl.GetRcovalent("C"), 0.680)) self.assertTrue(tbl.GetDefaultValence(6) == 4) self.assertTrue(tbl.GetDefaultValence("C") == 4) self.assertTrue(tuple(tbl.GetValenceList(6)) == (4, )) self.assertTrue(tuple(tbl.GetValenceList("C")) == (4, )) self.assertTrue(tuple(tbl.GetValenceList(16)) == (2, 4, 6)) self.assertTrue(tuple(tbl.GetValenceList("S")) == (2, 4, 6)) self.assertTrue(tbl.GetNOuterElecs(6) == 4) self.assertTrue(tbl.GetNOuterElecs("C") == 4) self.assertTrue(tbl.GetMostCommonIsotope(6) == 12) self.assertTrue(tbl.GetMostCommonIsotope('C') == 12) self.assertTrue(tbl.GetMostCommonIsotopeMass(6) == 12.0) self.assertTrue(tbl.GetMostCommonIsotopeMass('C') == 12.0) self.assertTrue(tbl.GetAbundanceForIsotope(6, 12) == 98.93) self.assertTrue(tbl.GetAbundanceForIsotope('C', 12) == 98.93) self.assertTrue(feq(tbl.GetRb0(6), 0.77)) self.assertTrue(feq(tbl.GetRb0("C"), 0.77)) self.assertTrue(tbl.GetElementSymbol(6) == 'C') def test2Atom(self): atom = Chem.Atom(6) self.assertTrue(atom) self.assertTrue(atom.GetAtomicNum() == 6) atom.SetAtomicNum(8) self.assertTrue(atom.GetAtomicNum() == 8) atom = Chem.Atom("C") self.assertTrue(atom) self.assertTrue(atom.GetAtomicNum() == 6) def test3Bond(self): # No longer relevant, bonds are not constructible from Python pass def test4Mol(self): mol = Chem.Mol() self.assertTrue(mol) def test5Smiles(self): mol = Chem.MolFromSmiles('n1ccccc1') self.assertTrue(mol) self.assertTrue(mol.GetNumAtoms() == 6) self.assertTrue(mol.GetNumAtoms(1) == 6) self.assertTrue(mol.GetNumAtoms(0) == 11) at = mol.GetAtomWithIdx(2) self.assertTrue(at.GetAtomicNum() == 6) at = mol.GetAtomWithIdx(0) self.assertTrue(at.GetAtomicNum() == 7) def _test6Bookmarks(self): mol = Chem.MolFromSmiles('n1ccccc1') self.assertTrue(mol) self.assertTrue(not mol.HasAtomBookmark(0)) mol.SetAtomBookmark(mol.GetAtomWithIdx(0), 0) mol.SetAtomBookmark(mol.GetAtomWithIdx(1), 1) self.assertTrue(mol.HasAtomBookmark(0)) self.assertTrue(mol.HasAtomBookmark(1)) if 1: self.assertTrue(not mol.HasBondBookmark(0)) self.assertTrue(not mol.HasBondBookmark(1)) mol.SetBondBookmark(mol.GetBondWithIdx(0), 0) mol.SetBondBookmark(mol.GetBondWithIdx(1), 1) self.assertTrue(mol.HasBondBookmark(0)) self.assertTrue(mol.HasBondBookmark(1)) at = mol.GetAtomWithBookmark(0) self.assertTrue(at) self.assertTrue(at.GetAtomicNum() == 7) mol.ClearAtomBookmark(0) self.assertTrue(not mol.HasAtomBookmark(0)) self.assertTrue(mol.HasAtomBookmark(1)) mol.ClearAllAtomBookmarks() self.assertTrue(not mol.HasAtomBookmark(0)) self.assertTrue(not mol.HasAtomBookmark(1)) mol.SetAtomBookmark(mol.GetAtomWithIdx(1), 1) if 1: self.assertTrue(mol.HasBondBookmark(0)) self.assertTrue(mol.HasBondBookmark(1)) bond = mol.GetBondWithBookmark(0) self.assertTrue(bond) mol.ClearBondBookmark(0) self.assertTrue(not mol.HasBondBookmark(0)) self.assertTrue(mol.HasBondBookmark(1)) mol.ClearAllBondBookmarks() self.assertTrue(not mol.HasBondBookmark(0)) self.assertTrue(not mol.HasBondBookmark(1)) self.assertTrue(mol.HasAtomBookmark(1)) def test7Atom(self): mol = Chem.MolFromSmiles('n1ccccc1C[CH2-]') self.assertTrue(mol) Chem.SanitizeMol(mol) a0 = mol.GetAtomWithIdx(0) a1 = mol.GetAtomWithIdx(1) a6 = mol.GetAtomWithIdx(6) a7 = mol.GetAtomWithIdx(7) self.assertTrue(a0.GetAtomicNum() == 7) self.assertTrue(a0.GetSymbol() == 'N') self.assertTrue(a0.GetIdx() == 0) aList = [a0, a1, a6, a7] self.assertTrue(a0.GetDegree() == 2) self.assertTrue(a1.GetDegree() == 2) self.assertTrue(a6.GetDegree() == 2) self.assertTrue(a7.GetDegree() == 1) self.assertTrue([x.GetDegree() for x in aList] == [2, 2, 2, 1]) self.assertTrue([x.GetTotalNumHs() for x in aList] == [0, 1, 2, 2]) self.assertTrue([x.GetNumImplicitHs() for x in aList] == [0, 1, 2, 0]) self.assertTrue([x.GetExplicitValence() for x in aList] == [3, 3, 2, 3]) self.assertTrue([x.GetImplicitValence() for x in aList] == [0, 1, 2, 0]) self.assertTrue([x.GetFormalCharge() for x in aList] == [0, 0, 0, -1]) self.assertTrue([x.GetNoImplicit() for x in aList] == [0, 0, 0, 1]) self.assertTrue([x.GetNumExplicitHs() for x in aList] == [0, 0, 0, 2]) self.assertTrue([x.GetIsAromatic() for x in aList] == [1, 1, 0, 0]) self.assertTrue([x.GetHybridization() for x in aList]==[Chem.HybridizationType.SP2,Chem.HybridizationType.SP2, Chem.HybridizationType.SP3,Chem.HybridizationType.SP3],\ [x.GetHybridization() for x in aList]) def test8Bond(self): mol = Chem.MolFromSmiles('n1ccccc1CC(=O)O') self.assertTrue(mol) Chem.SanitizeMol(mol) # note bond numbering is funny because of ring closure b0 = mol.GetBondWithIdx(0) b6 = mol.GetBondWithIdx(6) b7 = mol.GetBondWithIdx(7) b8 = mol.GetBondWithIdx(8) bList = [b0, b6, b7, b8] self.assertTrue( [x.GetBondType() for x in bList] == [Chem.BondType.AROMATIC, Chem.BondType.SINGLE, Chem.BondType.DOUBLE, Chem.BondType.SINGLE]) self.assertTrue([x.GetIsAromatic() for x in bList] == [1, 0, 0, 0]) self.assertEqual(bList[0].GetBondTypeAsDouble(), 1.5) self.assertEqual(bList[1].GetBondTypeAsDouble(), 1.0) self.assertEqual(bList[2].GetBondTypeAsDouble(), 2.0) self.assertTrue([x.GetIsConjugated() != 0 for x in bList] == [1, 0, 1, 1], [x.GetIsConjugated() != 0 for x in bList]) self.assertTrue([x.GetBeginAtomIdx() for x in bList] == [0, 6, 7, 7], [x.GetBeginAtomIdx() for x in bList]) self.assertTrue([x.GetBeginAtom().GetIdx() for x in bList] == [0, 6, 7, 7]) self.assertTrue([x.GetEndAtomIdx() for x in bList] == [1, 7, 8, 9]) self.assertTrue([x.GetEndAtom().GetIdx() for x in bList] == [1, 7, 8, 9]) def test9Smarts(self): query1 = Chem.MolFromSmarts('C(=O)O') self.assertTrue(query1) query2 = Chem.MolFromSmarts('C(=O)[O,N]') self.assertTrue(query2) query3 = Chem.MolFromSmarts('[$(C(=O)O)]') self.assertTrue(query3) mol = Chem.MolFromSmiles('CCC(=O)O') self.assertTrue(mol) self.assertTrue(mol.HasSubstructMatch(query1)) self.assertTrue(mol.HasSubstructMatch(query2)) self.assertTrue(mol.HasSubstructMatch(query3)) mol = Chem.MolFromSmiles('CCC(=O)N') self.assertTrue(mol) self.assertTrue(not mol.HasSubstructMatch(query1)) self.assertTrue(mol.HasSubstructMatch(query2)) self.assertTrue(not mol.HasSubstructMatch(query3)) def test10Iterators(self): mol = Chem.MolFromSmiles('CCOC') self.assertTrue(mol) for atom in mol.GetAtoms(): self.assertTrue(atom) ats = mol.GetAtoms() ats[1] with self.assertRaisesRegex(IndexError, ""): ats[12] for bond in mol.GetBonds(): self.assertTrue(bond) bonds = mol.GetBonds() bonds[1] with self.assertRaisesRegex(IndexError, ""): bonds[12] def test11MolOps(self): mol = Chem.MolFromSmiles('C1=CC=C(C=C1)P(C2=CC=CC=C2)C3=CC=CC=C3') self.assertTrue(mol) smi = Chem.MolToSmiles(mol) Chem.SanitizeMol(mol) nr = Chem.GetSymmSSSR(mol) self.assertTrue((len(nr) == 3)) def test12Smarts(self): query1 = Chem.MolFromSmarts('C(=O)O') self.assertTrue(query1) query2 = Chem.MolFromSmarts('C(=O)[O,N]') self.assertTrue(query2) query3 = Chem.MolFromSmarts('[$(C(=O)O)]') self.assertTrue(query3) mol = Chem.MolFromSmiles('CCC(=O)O') self.assertTrue(mol) self.assertTrue(mol.HasSubstructMatch(query1)) self.assertTrue(mol.GetSubstructMatch(query1) == (2, 3, 4)) self.assertTrue(mol.HasSubstructMatch(query2)) self.assertTrue(mol.GetSubstructMatch(query2) == (2, 3, 4)) self.assertTrue(mol.HasSubstructMatch(query3)) self.assertTrue(mol.GetSubstructMatch(query3) == (2, )) mol = Chem.MolFromSmiles('CCC(=O)N') self.assertTrue(mol) self.assertTrue(not mol.HasSubstructMatch(query1)) self.assertTrue(not mol.GetSubstructMatch(query1)) self.assertTrue(mol.HasSubstructMatch(query2)) self.assertTrue(mol.GetSubstructMatch(query2) == (2, 3, 4)) self.assertTrue(not mol.HasSubstructMatch(query3)) mol = Chem.MolFromSmiles('OC(=O)CC(=O)O') self.assertTrue(mol) self.assertTrue(mol.HasSubstructMatch(query1)) self.assertTrue(mol.GetSubstructMatch(query1) == (1, 2, 0)) self.assertTrue(mol.GetSubstructMatches(query1) == ((1, 2, 0), (4, 5, 6))) self.assertTrue(mol.HasSubstructMatch(query2)) self.assertTrue(mol.GetSubstructMatch(query2) == (1, 2, 0)) self.assertTrue(mol.GetSubstructMatches(query2) == ((1, 2, 0), (4, 5, 6))) self.assertTrue(mol.HasSubstructMatch(query3)) self.assertTrue(mol.GetSubstructMatches(query3) == ((1, ), (4, ))) def test13Smarts(self): # previous smarts problems: query = Chem.MolFromSmarts('N(=,-C)') self.assertTrue(query) mol = Chem.MolFromSmiles('N#C') self.assertTrue(not mol.HasSubstructMatch(query)) mol = Chem.MolFromSmiles('N=C') self.assertTrue(mol.HasSubstructMatch(query)) mol = Chem.MolFromSmiles('NC') self.assertTrue(mol.HasSubstructMatch(query)) query = Chem.MolFromSmarts('[Cl,$(O)]') mol = Chem.MolFromSmiles('C(=O)O') self.assertTrue(len(mol.GetSubstructMatches(query)) == 2) mol = Chem.MolFromSmiles('C(=N)N') self.assertTrue(len(mol.GetSubstructMatches(query)) == 0) query = Chem.MolFromSmarts('[$([O,S]-[!$(*=O)])]') mol = Chem.MolFromSmiles('CC(S)C(=O)O') self.assertTrue(len(mol.GetSubstructMatches(query)) == 1) mol = Chem.MolFromSmiles('C(=O)O') self.assertTrue(len(mol.GetSubstructMatches(query)) == 0) def test14Hs(self): m = Chem.MolFromSmiles('CC(=O)[OH]') self.assertEqual(m.GetNumAtoms(), 4) m2 = Chem.AddHs(m) self.assertEqual(m2.GetNumAtoms(), 8) m2 = Chem.RemoveHs(m2) self.assertEqual(m2.GetNumAtoms(), 4) m = Chem.MolFromSmiles('CC[H]', False) self.assertEqual(m.GetNumAtoms(), 3) m2 = Chem.MergeQueryHs(m) self.assertEqual(m2.GetNumAtoms(), 2) self.assertTrue(m2.GetAtomWithIdx(1).HasQuery()) m = Chem.MolFromSmiles('CC[H]', False) self.assertEqual(m.GetNumAtoms(), 3) m1 = Chem.RemoveHs(m) self.assertEqual(m1.GetNumAtoms(), 2) self.assertEqual(m1.GetAtomWithIdx(1).GetNumExplicitHs(), 0) m1 = Chem.RemoveHs(m, updateExplicitCount=True) self.assertEqual(m1.GetNumAtoms(), 2) self.assertEqual(m1.GetAtomWithIdx(1).GetNumExplicitHs(), 1) # test merging of mapped hydrogens m = Chem.MolFromSmiles('CC[H]', False) m.GetAtomWithIdx(2).SetProp("molAtomMapNumber", "1") self.assertEqual(m.GetNumAtoms(), 3) m2 = Chem.MergeQueryHs(m, mergeUnmappedOnly=True) self.assertTrue(m2 is not None) self.assertEqual(m2.GetNumAtoms(), 3) self.assertFalse(m2.GetAtomWithIdx(1).HasQuery()) # here the hydrogen is unmapped # should be the same as merging all hydrogens m = Chem.MolFromSmiles('CC[H]', False) m.GetAtomWithIdx(1).SetProp("molAtomMapNumber", "1") self.assertEqual(m.GetNumAtoms(), 3) m2 = Chem.MergeQueryHs(m, mergeUnmappedOnly=True) self.assertTrue(m2 is not None) self.assertEqual(m2.GetNumAtoms(), 2) self.assertTrue(m2.GetAtomWithIdx(1).HasQuery()) # test github758 m = Chem.MolFromSmiles('CCC') self.assertEqual(m.GetNumAtoms(), 3) m = Chem.AddHs(m, onlyOnAtoms=(0, 2)) self.assertEqual(m.GetNumAtoms(), 9) self.assertEqual(m.GetAtomWithIdx(0).GetDegree(), 4) self.assertEqual(m.GetAtomWithIdx(2).GetDegree(), 4) self.assertEqual(m.GetAtomWithIdx(1).GetDegree(), 2) def test15Neighbors(self): m = Chem.MolFromSmiles('CC(=O)[OH]') self.assertTrue(m.GetNumAtoms() == 4) a = m.GetAtomWithIdx(1) ns = a.GetNeighbors() self.assertTrue(len(ns) == 3) bs = a.GetBonds() self.assertTrue(len(bs) == 3) for b in bs: try: a2 = b.GetOtherAtom(a) except Exception: a2 = None self.assertTrue(a2) self.assertTrue(len(bs) == 3) def test16Pickle(self): import pickle m = Chem.MolFromSmiles('C1=CN=CC=C1') pkl = pickle.dumps(m) m2 = pickle.loads(pkl) self.assertTrue(type(m2) == Chem.Mol) smi1 = Chem.MolToSmiles(m) smi2 = Chem.MolToSmiles(m2) self.assertTrue(smi1 == smi2) pkl = pickle.dumps(Chem.RWMol(m)) m2 = pickle.loads(pkl) self.assertTrue(type(m2) == Chem.RWMol) smi1 = Chem.MolToSmiles(m) smi2 = Chem.MolToSmiles(m2) self.assertTrue(smi1 == smi2) def test16Props(self): m = Chem.MolFromSmiles('C1=CN=CC=C1') self.assertTrue(not m.HasProp('prop1')) self.assertTrue(not m.HasProp('prop2')) self.assertTrue(not m.HasProp('prop2')) m.SetProp('prop1', 'foob') self.assertTrue(not m.HasProp('prop2')) self.assertTrue(m.HasProp('prop1')) self.assertTrue(m.GetProp('prop1') == 'foob') self.assertTrue(not m.HasProp('propo')) try: m.GetProp('prop2') except KeyError: ok = 1 else: ok = 0 self.assertTrue(ok) # test computed properties m.SetProp('cprop1', 'foo', 1) m.SetProp('cprop2', 'foo2', 1) m.ClearComputedProps() self.assertTrue(not m.HasProp('cprop1')) self.assertTrue(not m.HasProp('cprop2')) m.SetDoubleProp("a", 2.0) self.assertTrue(m.GetDoubleProp("a") == 2.0) try: self.assertTrue(m.GetIntProp("a") == 2.0) raise Exception("Expected runtime exception") except ValueError: pass try: self.assertTrue(m.GetUnsignedProp("a") == 2.0) raise Exception("Expected runtime exception") except ValueError: pass m.SetDoubleProp("a", -2) self.assertTrue(m.GetDoubleProp("a") == -2.0) m.SetIntProp("a", -2) self.assertTrue(m.GetIntProp("a") == -2) try: m.SetUnsignedProp("a", -2) raise Exception("Expected failure with negative unsigned number") except OverflowError: pass m.SetBoolProp("a", False) self.assertTrue(m.GetBoolProp("a") == False) self.assertEqual(m.GetPropsAsDict(), {'a': False, 'prop1': 'foob'}) m.SetDoubleProp("b", 1000.0) m.SetUnsignedProp("c", 2000) m.SetIntProp("d", -2) m.SetUnsignedProp("e", 2, True) self.assertEqual( m.GetPropsAsDict(False, True), { 'a': False, 'c': 2000, 'b': 1000.0, 'e': 2, 'd': -2, 'prop1': 'foob' }) m = Chem.MolFromSmiles('C1=CN=CC=C1') m.SetProp("int", "1000") m.SetProp("double", "10000.123") self.assertEqual(m.GetPropsAsDict(), {"int": 1000, "double": 10000.123}) self.assertEqual(type(m.GetPropsAsDict()['int']), int) self.assertEqual(type(m.GetPropsAsDict()['double']), float) def test17Kekulize(self): m = Chem.MolFromSmiles('c1ccccc1') smi = Chem.MolToSmiles(m) self.assertTrue(smi == 'c1ccccc1') Chem.Kekulize(m) smi = Chem.MolToSmiles(m) self.assertTrue(smi == 'c1ccccc1') m = Chem.MolFromSmiles('c1ccccc1') smi = Chem.MolToSmiles(m) self.assertTrue(smi == 'c1ccccc1') Chem.Kekulize(m, 1) smi = Chem.MolToSmiles(m) self.assertTrue(smi == 'C1=CC=CC=C1', smi) def test18Paths(self): m = Chem.MolFromSmiles("C1CC2C1CC2") #self.assertTrue(len(Chem.FindAllPathsOfLengthN(m,1,useBonds=1))==7) #print(Chem.FindAllPathsOfLengthN(m,3,useBonds=0)) self.assertTrue( len(Chem.FindAllPathsOfLengthN(m, 2, useBonds=1)) == 10, Chem.FindAllPathsOfLengthN(m, 2, useBonds=1)) self.assertTrue(len(Chem.FindAllPathsOfLengthN(m, 3, useBonds=1)) == 14) m = Chem.MolFromSmiles('C1CC1C') self.assertTrue(m) self.assertTrue(len(Chem.FindAllPathsOfLengthN(m, 1, useBonds=1)) == 4) self.assertTrue(len(Chem.FindAllPathsOfLengthN(m, 2, useBonds=1)) == 5) self.assertTrue( len(Chem.FindAllPathsOfLengthN(m, 3, useBonds=1)) == 3, Chem.FindAllPathsOfLengthN(m, 3, useBonds=1)) self.assertTrue( len(Chem.FindAllPathsOfLengthN(m, 4, useBonds=1)) == 1, Chem.FindAllPathsOfLengthN(m, 4, useBonds=1)) self.assertTrue( len(Chem.FindAllPathsOfLengthN(m, 5, useBonds=1)) == 0, Chem.FindAllPathsOfLengthN(m, 5, useBonds=1)) # # Hexane example from Hall-Kier Rev.Comp.Chem. paper # Rev. Comp. Chem. vol 2, 367-422, (1991) # m = Chem.MolFromSmiles("CCCCCC") self.assertTrue(len(Chem.FindAllPathsOfLengthN(m, 1, useBonds=1)) == 5) self.assertTrue(len(Chem.FindAllPathsOfLengthN(m, 2, useBonds=1)) == 4) self.assertTrue(len(Chem.FindAllPathsOfLengthN(m, 3, useBonds=1)) == 3) m = Chem.MolFromSmiles("CCC(C)CC") self.assertTrue(len(Chem.FindAllPathsOfLengthN(m, 1, useBonds=1)) == 5) self.assertTrue(len(Chem.FindAllPathsOfLengthN(m, 2, useBonds=1)) == 5) self.assertTrue( len(Chem.FindAllPathsOfLengthN(m, 3, useBonds=1)) == 4, Chem.FindAllPathsOfLengthN(m, 3, useBonds=1)) m = Chem.MolFromSmiles("CCCC(C)C") self.assertTrue(len(Chem.FindAllPathsOfLengthN(m, 1, useBonds=1)) == 5) self.assertTrue(len(Chem.FindAllPathsOfLengthN(m, 2, useBonds=1)) == 5) self.assertTrue(len(Chem.FindAllPathsOfLengthN(m, 3, useBonds=1)) == 3) m = Chem.MolFromSmiles("CC(C)C(C)C") self.assertTrue(len(Chem.FindAllPathsOfLengthN(m, 1, useBonds=1)) == 5) self.assertTrue(len(Chem.FindAllPathsOfLengthN(m, 2, useBonds=1)) == 6) self.assertTrue(len(Chem.FindAllPathsOfLengthN(m, 3, useBonds=1)) == 4) m = Chem.MolFromSmiles("CC(C)(C)CC") self.assertTrue(len(Chem.FindAllPathsOfLengthN(m, 1, useBonds=1)) == 5) self.assertTrue(len(Chem.FindAllPathsOfLengthN(m, 2, useBonds=1)) == 7) self.assertTrue( len(Chem.FindAllPathsOfLengthN(m, 3, useBonds=1)) == 3, Chem.FindAllPathsOfLengthN(m, 3, useBonds=1)) m = Chem.MolFromSmiles("C1CCCCC1") self.assertTrue(len(Chem.FindAllPathsOfLengthN(m, 1, useBonds=1)) == 6) self.assertTrue(len(Chem.FindAllPathsOfLengthN(m, 2, useBonds=1)) == 6) self.assertTrue(len(Chem.FindAllPathsOfLengthN(m, 3, useBonds=1)) == 6) m = Chem.MolFromSmiles("C1CC2C1CC2") self.assertTrue(len(Chem.FindAllPathsOfLengthN(m, 1, useBonds=1)) == 7) self.assertTrue( len(Chem.FindAllPathsOfLengthN(m, 2, useBonds=1)) == 10, Chem.FindAllPathsOfLengthN(m, 2, useBonds=1)) self.assertTrue(len(Chem.FindAllPathsOfLengthN(m, 3, useBonds=1)) == 14) m = Chem.MolFromSmiles("CC2C1CCC12") self.assertTrue(len(Chem.FindAllPathsOfLengthN(m, 1, useBonds=1)) == 7) self.assertTrue(len(Chem.FindAllPathsOfLengthN(m, 2, useBonds=1)) == 11) # FIX: this result disagrees with the paper (which says 13), # but it seems right self.assertTrue( len(Chem.FindAllPathsOfLengthN(m, 3, useBonds=1)) == 15, Chem.FindAllPathsOfLengthN(m, 3, useBonds=1)) def test19Subgraphs(self): m = Chem.MolFromSmiles('C1CC1C') self.assertTrue(m) self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m, 1, 0)) == 4) self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m, 2)) == 5) self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m, 3)) == 4) self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m, 4)) == 1) self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m, 5)) == 0) # # Hexane example from Hall-Kier Rev.Comp.Chem. paper # Rev. Comp. Chem. vol 2, 367-422, (1991) # m = Chem.MolFromSmiles("CCCCCC") self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m, 1)) == 5) self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m, 2)) == 4) self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m, 3)) == 3) l = Chem.FindAllSubgraphsOfLengthMToN(m, 1, 3) self.assertEqual(len(l), 3) self.assertEqual(len(l[0]), 5) self.assertEqual(len(l[1]), 4) self.assertEqual(len(l[2]), 3) self.assertRaises(ValueError, lambda: Chem.FindAllSubgraphsOfLengthMToN(m, 4, 3)) m = Chem.MolFromSmiles("CCC(C)CC") self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m, 1)) == 5) self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m, 2)) == 5) self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m, 3)) == 5) m = Chem.MolFromSmiles("CCCC(C)C") self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m, 1)) == 5) self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m, 2)) == 5) self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m, 3)) == 4) m = Chem.MolFromSmiles("CC(C)C(C)C") self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m, 1)) == 5) self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m, 2)) == 6) self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m, 3)) == 6) m = Chem.MolFromSmiles("CC(C)(C)CC") self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m, 1)) == 5) self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m, 2)) == 7) self.assertTrue( len(Chem.FindAllSubgraphsOfLengthN(m, 3)) == 7, Chem.FindAllSubgraphsOfLengthN(m, 3)) m = Chem.MolFromSmiles("C1CCCCC1") self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m, 1)) == 6) self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m, 2)) == 6) self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m, 3)) == 6) #self.assertTrue(len(Chem.FindUniqueSubgraphsOfLengthN(m,1))==1) self.assertTrue(len(Chem.FindUniqueSubgraphsOfLengthN(m, 2)) == 1) self.assertTrue(len(Chem.FindUniqueSubgraphsOfLengthN(m, 3)) == 1) m = Chem.MolFromSmiles("C1CC2C1CC2") self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m, 1)) == 7) self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m, 2)) == 10) self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m, 3)) == 16) m = Chem.MolFromSmiles("CC2C1CCC12") self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m, 1)) == 7) self.assertTrue(len(Chem.FindAllSubgraphsOfLengthN(m, 2)) == 11) self.assertTrue( len(Chem.FindAllSubgraphsOfLengthN(m, 3)) == 18, len(Chem.FindAllSubgraphsOfLengthN(m, 3))) def test20IsInRing(self): m = Chem.MolFromSmiles('C1CCC1C') self.assertTrue(m) self.assertTrue(m.GetAtomWithIdx(0).IsInRingSize(4)) self.assertTrue(m.GetAtomWithIdx(1).IsInRingSize(4)) self.assertTrue(m.GetAtomWithIdx(2).IsInRingSize(4)) self.assertTrue(m.GetAtomWithIdx(3).IsInRingSize(4)) self.assertTrue(not m.GetAtomWithIdx(4).IsInRingSize(4)) self.assertTrue(not m.GetAtomWithIdx(0).IsInRingSize(3)) self.assertTrue(not m.GetAtomWithIdx(1).IsInRingSize(3)) self.assertTrue(not m.GetAtomWithIdx(2).IsInRingSize(3)) self.assertTrue(not m.GetAtomWithIdx(3).IsInRingSize(3)) self.assertTrue(not m.GetAtomWithIdx(4).IsInRingSize(3)) self.assertTrue(m.GetBondWithIdx(0).IsInRingSize(4)) self.assertTrue(not m.GetBondWithIdx(3).IsInRingSize(4)) self.assertTrue(not m.GetBondWithIdx(0).IsInRingSize(3)) self.assertTrue(not m.GetBondWithIdx(3).IsInRingSize(3)) def test21Robustification(self): ok = False # FIX: at the moment I can't figure out how to catch the # actual exception that BPL is throwing when it gets # invalid arguments (Boost.Python.ArgumentError) try: Chem.MolFromSmiles('C=O').HasSubstructMatch(Chem.MolFromSmarts('fiib')) #except ValueError: # ok=True except Exception: ok = True self.assertTrue(ok) def test22DeleteSubstruct(self): query = Chem.MolFromSmarts('C(=O)O') mol = Chem.MolFromSmiles('CCC(=O)O') nmol = Chem.DeleteSubstructs(mol, query) self.assertTrue(Chem.MolToSmiles(nmol) == 'CC') mol = Chem.MolFromSmiles('CCC(=O)O.O=CO') # now delete only fragments nmol = Chem.DeleteSubstructs(mol, query, 1) self.assertTrue(Chem.MolToSmiles(nmol) == 'CCC(=O)O', Chem.MolToSmiles(nmol)) mol = Chem.MolFromSmiles('CCC(=O)O.O=CO') nmol = Chem.DeleteSubstructs(mol, query, 0) self.assertTrue(Chem.MolToSmiles(nmol) == 'CC') mol = Chem.MolFromSmiles('CCCO') nmol = Chem.DeleteSubstructs(mol, query, 0) self.assertTrue(Chem.MolToSmiles(nmol) == 'CCCO') # Issue 96 prevented this from working: mol = Chem.MolFromSmiles('CCC(=O)O.O=CO') nmol = Chem.DeleteSubstructs(mol, query, 1) self.assertTrue(Chem.MolToSmiles(nmol) == 'CCC(=O)O') nmol = Chem.DeleteSubstructs(nmol, query, 1) self.assertTrue(Chem.MolToSmiles(nmol) == 'CCC(=O)O') nmol = Chem.DeleteSubstructs(nmol, query, 0) self.assertTrue(Chem.MolToSmiles(nmol) == 'CC') def test23MolFileParsing(self): fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'FileParsers', 'test_data', 'triazine.mol') #fileN = "../FileParsers/test_data/triazine.mol" with open(fileN, 'r') as inF: inD = inF.read() m1 = Chem.MolFromMolBlock(inD) self.assertTrue(m1 is not None) self.assertTrue(m1.GetNumAtoms() == 9) m1 = Chem.MolFromMolFile(fileN) self.assertTrue(m1 is not None) self.assertTrue(m1.GetNumAtoms() == 9) fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'FileParsers', 'test_data', 'triazine.mof') self.assertRaises(IOError, lambda: Chem.MolFromMolFile(fileN)) fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'FileParsers', 'test_data', 'list-query.mol') query = Chem.MolFromMolFile(fileN) smi = Chem.MolToSmiles(query) self.assertEqual(smi, 'c1ccccc1') smi = Chem.MolToSmarts(query) self.assertEqual(smi, '[#6]1:[#6]:[#6]:[#6]:[#6]:[#6,#7,#15]:1', smi) query = Chem.MolFromMolFile(fileN, sanitize=False) smi = Chem.MolToSmiles(query) self.assertEqual(smi, 'C1=CC=CC=C1') query.UpdatePropertyCache() smi = Chem.MolToSmarts(query) self.assertEqual(smi, '[#6]1=[#6]-[#6]=[#6]-[#6]=[#6,#7,#15]-1') smi = "C1=CC=CC=C1" mol = Chem.MolFromSmiles(smi, 0) self.assertTrue(mol.HasSubstructMatch(query)) Chem.SanitizeMol(mol) self.assertTrue(not mol.HasSubstructMatch(query)) mol = Chem.MolFromSmiles('N1=CC=CC=C1', 0) self.assertTrue(mol.HasSubstructMatch(query)) mol = Chem.MolFromSmiles('S1=CC=CC=C1', 0) self.assertTrue(not mol.HasSubstructMatch(query)) mol = Chem.MolFromSmiles('P1=CC=CC=C1', 0) self.assertTrue(mol.HasSubstructMatch(query)) fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'FileParsers', 'test_data', 'issue123.mol') mol = Chem.MolFromMolFile(fileN) self.assertTrue(mol) self.assertEqual(mol.GetNumAtoms(), 23) mol = Chem.MolFromMolFile(fileN, removeHs=False) self.assertTrue(mol) self.assertEqual(mol.GetNumAtoms(), 39) # test23 was for Chem.DaylightFingerprint, which is deprecated def test24RDKFingerprint(self): from rdkit import DataStructs m1 = Chem.MolFromSmiles('C1=CC=CC=C1') fp1 = Chem.RDKFingerprint(m1) self.assertTrue(len(fp1) == 2048) m2 = Chem.MolFromSmiles('C1=CC=CC=C1') fp2 = Chem.RDKFingerprint(m2) tmp = DataStructs.TanimotoSimilarity(fp1, fp2) self.assertTrue(tmp == 1.0, tmp) m2 = Chem.MolFromSmiles('C1=CC=CC=N1') fp2 = Chem.RDKFingerprint(m2) self.assertTrue(len(fp2) == 2048) tmp = DataStructs.TanimotoSimilarity(fp1, fp2) self.assertTrue(tmp < 1.0, tmp) self.assertTrue(tmp > 0.0, tmp) fp3 = Chem.RDKFingerprint(m1, tgtDensity=0.3) self.assertTrue(len(fp3) < 2048) m1 = Chem.MolFromSmiles('C1=CC=CC=C1') fp1 = Chem.RDKFingerprint(m1) m2 = Chem.MolFromSmiles('C1=CC=CC=N1') fp2 = Chem.RDKFingerprint(m2) self.assertNotEqual(fp1, fp2) atomInvariants = [1] * 6 fp1 = Chem.RDKFingerprint(m1, atomInvariants=atomInvariants) fp2 = Chem.RDKFingerprint(m2, atomInvariants=atomInvariants) self.assertEqual(fp1, fp2) m2 = Chem.MolFromSmiles('C1CCCCN1') fp1 = Chem.RDKFingerprint(m1, atomInvariants=atomInvariants, useBondOrder=False) fp2 = Chem.RDKFingerprint(m2, atomInvariants=atomInvariants, useBondOrder=False) self.assertEqual(fp1, fp2) # rooted at atom m1 = Chem.MolFromSmiles('CCCCCO') fp1 = Chem.RDKFingerprint(m1, 1, 4, nBitsPerHash=1, fromAtoms=[0]) self.assertEqual(fp1.GetNumOnBits(), 4) m1 = Chem.MolFromSmiles('CCCCCO') fp1 = Chem.RDKFingerprint(m1, 1, 4, nBitsPerHash=1, fromAtoms=[0, 5]) self.assertEqual(fp1.GetNumOnBits(), 8) # test sf.net issue 270: fp1 = Chem.RDKFingerprint(m1, atomInvariants=[x.GetAtomicNum() + 10 for x in m1.GetAtoms()]) # atomBits m1 = Chem.MolFromSmiles('CCCO') l = [] fp1 = Chem.RDKFingerprint(m1, minPath=1, maxPath=2, nBitsPerHash=1, atomBits=l) self.assertEqual(fp1.GetNumOnBits(), 4) self.assertEqual(len(l), m1.GetNumAtoms()) self.assertEqual(len(l[0]), 2) self.assertEqual(len(l[1]), 3) self.assertEqual(len(l[2]), 4) self.assertEqual(len(l[3]), 2) def test25SDMolSupplier(self): fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'FileParsers', 'test_data', 'NCI_aids_few.sdf') #fileN = "../FileParsers/test_data/NCI_aids_few.sdf" sdSup = Chem.SDMolSupplier(fileN) molNames = [ "48", "78", "128", "163", "164", "170", "180", "186", "192", "203", "210", "211", "213", "220", "229", "256" ] chgs192 = {8: 1, 11: 1, 15: -1, 18: -1, 20: 1, 21: 1, 23: -1, 25: -1} i = 0 for mol in sdSup: self.assertTrue(mol) self.assertTrue(mol.GetProp("_Name") == molNames[i]) i += 1 if (mol.GetProp("_Name") == "192"): # test parsed charges on one of the molecules for id in chgs192.keys(): self.assertTrue(mol.GetAtomWithIdx(id).GetFormalCharge() == chgs192[id]) self.assertRaises(StopIteration, lambda: next(sdSup)) sdSup.reset() ns = [mol.GetProp("_Name") for mol in sdSup] self.assertTrue(ns == molNames) sdSup = Chem.SDMolSupplier(fileN, 0) for mol in sdSup: self.assertTrue(not mol.HasProp("numArom")) sdSup = Chem.SDMolSupplier(fileN) self.assertTrue(len(sdSup) == 16) mol = sdSup[5] self.assertTrue(mol.GetProp("_Name") == "170") # test handling of H removal: fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'FileParsers', 'test_data', 'withHs.sdf') sdSup = Chem.SDMolSupplier(fileN) m = next(sdSup) self.assertTrue(m) self.assertTrue(m.GetNumAtoms() == 23) m = next(sdSup) self.assertTrue(m) self.assertTrue(m.GetNumAtoms() == 28) sdSup = Chem.SDMolSupplier(fileN, removeHs=False) m = next(sdSup) self.assertTrue(m) self.assertTrue(m.GetNumAtoms() == 39) m = next(sdSup) self.assertTrue(m) self.assertTrue(m.GetNumAtoms() == 30) with open(fileN, 'rb') as dFile: d = dFile.read() sdSup.SetData(d) m = next(sdSup) self.assertTrue(m) self.assertTrue(m.GetNumAtoms() == 23) m = next(sdSup) self.assertTrue(m) self.assertTrue(m.GetNumAtoms() == 28) sdSup.SetData(d, removeHs=False) m = next(sdSup) self.assertTrue(m) self.assertTrue(m.GetNumAtoms() == 39) m = next(sdSup) self.assertTrue(m) self.assertTrue(m.GetNumAtoms() == 30) # test strictParsing1: fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'FileParsers', 'test_data', 'strictLax1.sdf') #strict from file sdSup = Chem.SDMolSupplier(fileN, strictParsing=True) i = 0 for mol in sdSup: self.assertTrue(mol.HasProp("_Name")) if (i == 0): self.assertTrue(not mol.HasProp("ID")) self.assertTrue(not mol.HasProp("ANOTHER_PROPERTY")) i += 1 self.assertTrue(i == 2) #lax from file sdSup = Chem.SDMolSupplier(fileN, strictParsing=False) i = 0 for mol in sdSup: self.assertTrue(mol.HasProp("_Name")) self.assertTrue(mol.HasProp("ID")) self.assertTrue(mol.HasProp("ANOTHER_PROPERTY")) i += 1 self.assertTrue(i == 2) #strict from text with open(fileN, 'rb') as dFile: d = dFile.read() sdSup = Chem.SDMolSupplier() sdSup.SetData(d, strictParsing=True) i = 0 for mol in sdSup: self.assertTrue(mol.HasProp("_Name")) if (i == 0): self.assertTrue(not mol.HasProp("ID")) self.assertTrue(not mol.HasProp("ANOTHER_PROPERTY")) i += 1 self.assertTrue(i == 2) #lax from text sdSup = Chem.SDMolSupplier() sdSup.SetData(d, strictParsing=False) i = 0 for mol in sdSup: self.assertTrue(mol.HasProp("_Name")) self.assertTrue(mol.HasProp("ID")) self.assertTrue(mol.HasProp("ANOTHER_PROPERTY")) i += 1 self.assertTrue(i == 2) # test strictParsing2: fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'FileParsers', 'test_data', 'strictLax2.sdf') #strict from file sdSup = Chem.SDMolSupplier(fileN, strictParsing=True) i = 0 for mol in sdSup: self.assertTrue(mol.HasProp("_Name")) self.assertTrue(mol.HasProp("ID")) self.assertTrue(mol.GetProp("ID") == "Lig1") self.assertTrue(mol.HasProp("ANOTHER_PROPERTY")) self.assertTrue(mol.GetProp("ANOTHER_PROPERTY") == \ "No blank line before dollars\n" \ "$$$$\n" \ "Structure1\n" \ "csChFnd70/05230312262D") i += 1 self.assertTrue(i == 1) #lax from file sdSup = Chem.SDMolSupplier(fileN, strictParsing=False) i = 0 for mol in sdSup: self.assertTrue(mol.HasProp("_Name")) self.assertTrue(mol.HasProp("ID")) self.assertTrue(mol.GetProp("ID") == "Lig2") self.assertTrue(mol.HasProp("ANOTHER_PROPERTY")) self.assertTrue(mol.GetProp("ANOTHER_PROPERTY") == "Value2") i += 1 self.assertTrue(i == 1) #strict from text with open(fileN, 'rb') as dFile: d = dFile.read() sdSup = Chem.SDMolSupplier() sdSup.SetData(d, strictParsing=True) i = 0 for mol in sdSup: self.assertTrue(mol.HasProp("_Name")) self.assertTrue(mol.HasProp("ID")) self.assertTrue(mol.GetProp("ID") == "Lig1") self.assertTrue(mol.HasProp("ANOTHER_PROPERTY")) self.assertTrue(mol.GetProp("ANOTHER_PROPERTY") == \ "No blank line before dollars\n" \ "$$$$\n" \ "Structure1\n" \ "csChFnd70/05230312262D") i += 1 self.assertTrue(i == 1) #lax from text sdSup = Chem.SDMolSupplier() sdSup.SetData(d, strictParsing=False) i = 0 for mol in sdSup: self.assertTrue(mol.HasProp("_Name")) self.assertTrue(mol.HasProp("ID")) self.assertTrue(mol.GetProp("ID") == "Lig2") self.assertTrue(mol.HasProp("ANOTHER_PROPERTY")) self.assertTrue(mol.GetProp("ANOTHER_PROPERTY") == "Value2") i += 1 self.assertTrue(i == 1) def test26SmiMolSupplier(self): fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'FileParsers', 'test_data', 'first_200.tpsa.csv') #fileN = "../FileParsers/test_data/first_200.tpsa.csv" smiSup = Chem.SmilesMolSupplier(fileN, ",", 0, -1) mol = smiSup[16] self.assertTrue(mol.GetProp("TPSA") == "46.25") mol = smiSup[8] self.assertTrue(mol.GetProp("TPSA") == "65.18") self.assertTrue(len(smiSup) == 200) fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'FileParsers', 'test_data', 'fewSmi.csv') #fileN = "../FileParsers/test_data/fewSmi.csv" smiSup = Chem.SmilesMolSupplier(fileN, delimiter=",", smilesColumn=1, nameColumn=0, titleLine=0) names = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"] i = 0 for mol in smiSup: self.assertTrue(mol.GetProp("_Name") == names[i]) i += 1 mol = smiSup[3] self.assertTrue(mol.GetProp("_Name") == "4") self.assertTrue(mol.GetProp("Column_2") == "82.78") # and test doing a supplier from text: with open(fileN, 'r') as inF: inD = inF.read() smiSup.SetData(inD, delimiter=",", smilesColumn=1, nameColumn=0, titleLine=0) names = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"] i = 0 # iteration interface: for mol in smiSup: self.assertTrue(mol.GetProp("_Name") == names[i]) i += 1 self.assertTrue(i == 10) # random access: mol = smiSup[3] self.assertTrue(len(smiSup) == 10) self.assertTrue(mol.GetProp("_Name") == "4") self.assertTrue(mol.GetProp("Column_2") == "82.78") # issue 113: smiSup.SetData(inD, delimiter=",", smilesColumn=1, nameColumn=0, titleLine=0) self.assertTrue(len(smiSup) == 10) # and test failure handling: inD = """mol-1,CCC mol-2,CCCC mol-3,fail mol-4,CCOC """ smiSup.SetData(inD, delimiter=",", smilesColumn=1, nameColumn=0, titleLine=0) # there are 4 entries in the supplier: self.assertTrue(len(smiSup) == 4) # but the 3rd is a None: self.assertTrue(smiSup[2] is None) text="Id SMILES Column_2\n"+\ "mol-1 C 1.0\n"+\ "mol-2 CC 4.0\n"+\ "mol-4 CCCC 16.0" smiSup.SetData(text, delimiter=" ", smilesColumn=1, nameColumn=0, titleLine=1) self.assertTrue(len(smiSup) == 3) self.assertTrue(smiSup[0]) self.assertTrue(smiSup[1]) self.assertTrue(smiSup[2]) m = [x for x in smiSup] self.assertTrue(smiSup[2]) self.assertTrue(len(m) == 3) self.assertTrue(m[0].GetProp("Column_2") == "1.0") # test simple parsing and Issue 114: smis = ['CC', 'CCC', 'CCOC', 'CCCOCC', 'CCCOCCC'] inD = '\n'.join(smis) smiSup.SetData(inD, delimiter=",", smilesColumn=0, nameColumn=-1, titleLine=0) self.assertTrue(len(smiSup) == 5) m = [x for x in smiSup] self.assertTrue(smiSup[4]) self.assertTrue(len(m) == 5) # order dependence: smiSup.SetData(inD, delimiter=",", smilesColumn=0, nameColumn=-1, titleLine=0) self.assertTrue(smiSup[4]) self.assertTrue(len(smiSup) == 5) # this was a nasty BC: # asking for a particular entry with a higher index than what we've # already seen resulted in a duplicate: smis = ['CC', 'CCC', 'CCOC', 'CCCCOC'] inD = '\n'.join(smis) smiSup.SetData(inD, delimiter=",", smilesColumn=0, nameColumn=-1, titleLine=0) m = next(smiSup) m = smiSup[3] self.assertTrue(len(smiSup) == 4) with self.assertRaisesRegex(Exception, ""): smiSup[4] smiSup.SetData(inD, delimiter=",", smilesColumn=0, nameColumn=-1, titleLine=0) with self.assertRaisesRegex(Exception, ""): smiSup[4] sys.stderr.write( '>>> This may result in an infinite loop. It should finish almost instantly\n') self.assertEqual(len(smiSup), 4) sys.stderr.write('<<< OK, it finished.\n') def test27SmilesWriter(self): fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'FileParsers', 'test_data', 'fewSmi.csv') #fileN = "../FileParsers/test_data/fewSmi.csv" smiSup = Chem.SmilesMolSupplier(fileN, delimiter=",", smilesColumn=1, nameColumn=0, titleLine=0) propNames = [] propNames.append("Column_2") ofile = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'Wrap', 'test_data', 'outSmiles.txt') writer = Chem.SmilesWriter(ofile) writer.SetProps(propNames) for mol in smiSup: writer.write(mol) writer.flush() def test28SmilesReverse(self): names = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10"] props = [ "34.14", "25.78", "106.51", "82.78", "60.16", "87.74", "37.38", "77.28", "65.18", "0.00" ] ofile = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'Wrap', 'test_data', 'outSmiles.txt') #ofile = "test_data/outSmiles.csv" smiSup = Chem.SmilesMolSupplier(ofile) i = 0 for mol in smiSup: #print([repr(x) for x in mol.GetPropNames()]) self.assertTrue(mol.GetProp("_Name") == names[i]) self.assertTrue(mol.GetProp("Column_2") == props[i]) i += 1 def writerSDFile(self): fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'FileParsers', 'test_data', 'NCI_aids_few.sdf') #fileN = "../FileParsers/test_data/NCI_aids_few.sdf" ofile = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'Wrap', 'test_data', 'outNCI_few.sdf') writer = Chem.SDWriter(ofile) sdSup = Chem.SDMolSupplier(fileN) for mol in sdSup: writer.write(mol) writer.flush() def test29SDWriterLoop(self): self.writerSDFile() fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'Wrap', 'test_data', 'outNCI_few.sdf') sdSup = Chem.SDMolSupplier(fileN) molNames = [ "48", "78", "128", "163", "164", "170", "180", "186", "192", "203", "210", "211", "213", "220", "229", "256" ] chgs192 = {8: 1, 11: 1, 15: -1, 18: -1, 20: 1, 21: 1, 23: -1, 25: -1} i = 0 for mol in sdSup: #print('mol:',mol) #print('\t',molNames[i]) self.assertTrue(mol.GetProp("_Name") == molNames[i]) i += 1 if (mol.GetProp("_Name") == "192"): # test parsed charges on one of the molecules for id in chgs192.keys(): self.assertTrue(mol.GetAtomWithIdx(id).GetFormalCharge() == chgs192[id]) def test30Issues109and110(self): """ issues 110 and 109 were both related to handling of explicit Hs in SMILES input. """ m1 = Chem.MolFromSmiles('N12[CH](SC(C)(C)[CH]1C(O)=O)[CH](C2=O)NC(=O)[CH](N)c3ccccc3') self.assertTrue(m1.GetNumAtoms() == 24) m2 = Chem.MolFromSmiles( 'C1C=C([CH](N)C(=O)N[C]2([H])[C]3([H])SC(C)(C)[CH](C(=O)O)N3C(=O)2)C=CC=1') self.assertTrue(m2.GetNumAtoms() == 24) smi1 = Chem.MolToSmiles(m1) smi2 = Chem.MolToSmiles(m2) self.assertTrue(smi1 == smi2) m1 = Chem.MolFromSmiles('[H]CCl') self.assertTrue(m1.GetNumAtoms() == 2) self.assertTrue(m1.GetAtomWithIdx(0).GetNumExplicitHs() == 1) m1 = Chem.MolFromSmiles('[H][CH2]Cl') self.assertTrue(m1.GetNumAtoms() == 2) self.assertTrue(m1.GetAtomWithIdx(0).GetNumExplicitHs() == 3) m2 = Chem.AddHs(m1) self.assertTrue(m2.GetNumAtoms() == 5) m2 = Chem.RemoveHs(m2) self.assertTrue(m2.GetNumAtoms() == 2) def test31ChiralitySmiles(self): m1 = Chem.MolFromSmiles('F[C@](Br)(I)Cl') self.assertTrue(m1 is not None) self.assertTrue(m1.GetNumAtoms() == 5) self.assertTrue(Chem.MolToSmiles(m1, 1) == 'F[C@](Cl)(Br)I', Chem.MolToSmiles(m1, 1)) m1 = Chem.MolFromSmiles('CC1C[C@@]1(Cl)F') self.assertTrue(m1 is not None) self.assertTrue(m1.GetNumAtoms() == 6) self.assertTrue(Chem.MolToSmiles(m1, 1) == 'CC1C[C@]1(F)Cl', Chem.MolToSmiles(m1, 1)) m1 = Chem.MolFromSmiles('CC1C[C@]1(Cl)F') self.assertTrue(m1 is not None) self.assertTrue(m1.GetNumAtoms() == 6) self.assertTrue(Chem.MolToSmiles(m1, 1) == 'CC1C[C@@]1(F)Cl', Chem.MolToSmiles(m1, 1)) def test31aChiralitySubstructs(self): m1 = Chem.MolFromSmiles('CC1C[C@@]1(Cl)F') self.assertTrue(m1 is not None) self.assertTrue(m1.GetNumAtoms() == 6) self.assertTrue(Chem.MolToSmiles(m1, 1) == 'CC1C[C@]1(F)Cl', Chem.MolToSmiles(m1, 1)) m2 = Chem.MolFromSmiles('CC1C[C@]1(Cl)F') self.assertTrue(m2 is not None) self.assertTrue(m2.GetNumAtoms() == 6) self.assertTrue(Chem.MolToSmiles(m2, 1) == 'CC1C[C@@]1(F)Cl', Chem.MolToSmiles(m2, 1)) self.assertTrue(m1.HasSubstructMatch(m1)) self.assertTrue(m1.HasSubstructMatch(m2)) self.assertTrue(m1.HasSubstructMatch(m1, useChirality=True)) self.assertTrue(not m1.HasSubstructMatch(m2, useChirality=True)) def _test32MolFilesWithChirality(self): inD = """chiral1.mol ChemDraw10160313232D 5 4 0 0 0 0 0 0 0 0999 V2000 0.0553 0.6188 0.0000 F 0 0 0 0 0 0 0 0 0 0 0 0 0.0553 -0.2062 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 0.7697 -0.6188 0.0000 I 0 0 0 0 0 0 0 0 0 0 0 0 -0.6592 -0.6188 0.0000 Br 0 0 0 0 0 0 0 0 0 0 0 0 -0.7697 -0.2062 0.0000 Cl 0 0 0 0 0 0 0 0 0 0 0 0 1 2 1 0 2 3 1 0 2 4 1 1 2 5 1 0 M END """ m1 = Chem.MolFromMolBlock(inD) self.assertTrue(m1 is not None) self.assertTrue(m1.GetNumAtoms() == 5) self.assertTrue(smi == 'F[C@](Cl)(Br)I', smi) inD = """chiral2.cdxml ChemDraw10160314052D 5 4 0 0 0 0 0 0 0 0999 V2000 0.0553 0.6188 0.0000 F 0 0 0 0 0 0 0 0 0 0 0 0 0.0553 -0.2062 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 0.7697 -0.6188 0.0000 I 0 0 0 0 0 0 0 0 0 0 0 0 -0.6592 -0.6188 0.0000 Br 0 0 0 0 0 0 0 0 0 0 0 0 -0.7697 -0.2062 0.0000 Cl 0 0 0 0 0 0 0 0 0 0 0 0 1 2 1 0 2 3 1 0 2 4 1 6 2 5 1 0 M END """ m1 = Chem.MolFromMolBlock(inD) self.assertTrue(m1 is not None) self.assertTrue(m1.GetNumAtoms() == 5) self.assertTrue(Chem.MolToSmiles(m1, 1) == 'F[C@@](Cl)(Br)I') inD = """chiral1.mol ChemDraw10160313232D 5 4 0 0 0 0 0 0 0 0999 V2000 0.0553 0.6188 0.0000 F 0 0 0 0 0 0 0 0 0 0 0 0 0.0553 -0.2062 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 -0.7697 -0.2062 0.0000 Cl 0 0 0 0 0 0 0 0 0 0 0 0 -0.6592 -0.6188 0.0000 Br 0 0 0 0 0 0 0 0 0 0 0 0 0.7697 -0.6188 0.0000 I 0 0 0 0 0 0 0 0 0 0 0 0 1 2 1 0 2 3 1 0 2 4 1 1 2 5 1 0 M END """ m1 = Chem.MolFromMolBlock(inD) self.assertTrue(m1 is not None) self.assertTrue(m1.GetNumAtoms() == 5) self.assertTrue(Chem.MolToSmiles(m1, 1) == 'F[C@](Cl)(Br)I') inD = """chiral1.mol ChemDraw10160313232D 5 4 0 0 0 0 0 0 0 0999 V2000 0.0553 -0.2062 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 -0.7697 -0.2062 0.0000 Cl 0 0 0 0 0 0 0 0 0 0 0 0 -0.6592 -0.6188 0.0000 Br 0 0 0 0 0 0 0 0 0 0 0 0 0.7697 -0.6188 0.0000 I 0 0 0 0 0 0 0 0 0 0 0 0 0.0553 0.6188 0.0000 F 0 0 0 0 0 0 0 0 0 0 0 0 1 2 1 0 1 3 1 1 1 4 1 0 1 5 1 0 M END """ m1 = Chem.MolFromMolBlock(inD) self.assertTrue(m1 is not None) self.assertTrue(m1.GetNumAtoms() == 5) self.assertTrue(Chem.MolToSmiles(m1, 1) == 'F[C@](Cl)(Br)I') inD = """chiral3.mol ChemDraw10160314362D 4 3 0 0 0 0 0 0 0 0999 V2000 0.4125 0.6188 0.0000 F 0 0 0 0 0 0 0 0 0 0 0 0 0.4125 -0.2062 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 -0.3020 -0.6188 0.0000 Br 0 0 0 0 0 0 0 0 0 0 0 0 -0.4125 -0.2062 0.0000 Cl 0 0 0 0 0 0 0 0 0 0 0 0 1 2 1 0 2 3 1 1 2 4 1 0 M END """ m1 = Chem.MolFromMolBlock(inD) self.assertTrue(m1 is not None) self.assertTrue(m1.GetNumAtoms() == 4) self.assertTrue(Chem.MolToSmiles(m1, 1) == 'F[C@H](Cl)Br') inD = """chiral4.mol ChemDraw10160314362D 4 3 0 0 0 0 0 0 0 0999 V2000 0.4125 0.6188 0.0000 F 0 0 0 0 0 0 0 0 0 0 0 0 0.4125 -0.2062 0.0000 N 0 0 0 0 0 0 0 0 0 0 0 0 -0.3020 -0.6188 0.0000 Br 0 0 0 0 0 0 0 0 0 0 0 0 -0.4125 -0.2062 0.0000 Cl 0 0 0 0 0 0 0 0 0 0 0 0 1 2 1 0 2 3 1 1 2 4 1 0 M END """ m1 = Chem.MolFromMolBlock(inD) self.assertTrue(m1 is not None) self.assertTrue(m1.GetNumAtoms() == 4) self.assertTrue(Chem.MolToSmiles(m1, 1) == 'FN(Cl)Br') inD = """chiral5.mol ChemDraw10160314362D 4 3 0 0 0 0 0 0 0 0999 V2000 0.4125 0.6188 0.0000 F 0 0 0 0 0 0 0 0 0 0 0 0 0.4125 -0.2062 0.0000 N 0 0 0 0 0 0 0 0 0 0 0 0 -0.3020 -0.6188 0.0000 Br 0 0 0 0 0 0 0 0 0 0 0 0 -0.4125 -0.2062 0.0000 Cl 0 0 0 0 0 0 0 0 0 0 0 0 1 2 1 0 2 3 1 1 2 4 1 0 M CHG 1 2 1 M END """ m1 = Chem.MolFromMolBlock(inD) self.assertTrue(m1 is not None) self.assertTrue(m1.GetNumAtoms() == 4) self.assertTrue(Chem.MolToSmiles(m1, 1) == 'F[N@H+](Cl)Br') inD = """Case 10-14-3 ChemDraw10140308512D 4 3 0 0 0 0 0 0 0 0999 V2000 -0.8250 -0.4125 0.0000 F 0 0 0 0 0 0 0 0 0 0 0 0 0.0000 -0.4125 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 0.8250 -0.4125 0.0000 Cl 0 0 0 0 0 0 0 0 0 0 0 0 0.0000 0.4125 0.0000 Br 0 0 0 0 0 0 0 0 0 0 0 0 1 2 1 0 2 3 1 0 2 4 1 1 M END """ m1 = Chem.MolFromMolBlock(inD) self.assertTrue(m1 is not None) self.assertTrue(m1.GetNumAtoms() == 4) self.assertTrue(Chem.MolToSmiles(m1, 1) == 'F[C@H](Cl)Br') inD = """Case 10-14-4 ChemDraw10140308512D 4 3 0 0 0 0 0 0 0 0999 V2000 -0.8250 -0.4125 0.0000 F 0 0 0 0 0 0 0 0 0 0 0 0 0.0000 -0.4125 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 0.8250 -0.4125 0.0000 Cl 0 0 0 0 0 0 0 0 0 0 0 0 0.0000 0.4125 0.0000 Br 0 0 0 0 0 0 0 0 0 0 0 0 1 2 1 0 2 3 1 1 2 4 1 0 M END """ m1 = Chem.MolFromMolBlock(inD) self.assertTrue(m1 is not None) self.assertTrue(m1.GetNumAtoms() == 4) self.assertTrue(Chem.MolToSmiles(m1, 1) == 'F[C@H](Cl)Br') inD = """chiral4.mol ChemDraw10160315172D 6 6 0 0 0 0 0 0 0 0999 V2000 -0.4422 0.1402 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 -0.4422 -0.6848 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 0.2723 -0.2723 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 -0.8547 0.8547 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 0.6848 0.4422 0.0000 F 0 0 0 0 0 0 0 0 0 0 0 0 0.8547 -0.8547 0.0000 Cl 0 0 0 0 0 0 0 0 0 0 0 0 1 2 1 0 2 3 1 0 3 1 1 0 1 4 1 0 3 5 1 1 3 6 1 0 M END """ m1 = Chem.MolFromMolBlock(inD) self.assertTrue(m1 is not None) self.assertTrue(m1.GetNumAtoms() == 6) self.assertTrue(Chem.MolToSmiles(m1, 1) == 'CC1C[C@@]1(F)Cl', Chem.MolToSmiles(m1, 1)) inD = """chiral4.mol ChemDraw10160315172D 6 6 0 0 0 0 0 0 0 0999 V2000 -0.4422 0.1402 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 -0.4422 -0.6848 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 0.2723 -0.2723 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 -0.8547 0.8547 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 0.6848 0.4422 0.0000 F 0 0 0 0 0 0 0 0 0 0 0 0 0.8547 -0.8547 0.0000 Cl 0 0 0 0 0 0 0 0 0 0 0 0 1 2 1 0 2 3 1 0 3 1 1 0 1 4 1 0 3 5 1 6 3 6 1 0 M END """ m1 = Chem.MolFromMolBlock(inD) self.assertTrue(m1 is not None) self.assertTrue(m1.GetNumAtoms() == 6) self.assertTrue(Chem.MolToSmiles(m1, 1) == 'CC1C[C@]1(F)Cl', Chem.MolToSmiles(m1, 1)) def test33Issue65(self): """ issue 65 relates to handling of [H] in SMARTS """ m1 = Chem.MolFromSmiles('OC(O)(O)O') m2 = Chem.MolFromSmiles('OC(O)O') m3 = Chem.MolFromSmiles('OCO') q1 = Chem.MolFromSmarts('OC[H]', 1) q2 = Chem.MolFromSmarts('O[C;H1]', 1) q3 = Chem.MolFromSmarts('O[C;H1][H]', 1) self.assertTrue(not m1.HasSubstructMatch(q1)) self.assertTrue(not m1.HasSubstructMatch(q2)) self.assertTrue(not m1.HasSubstructMatch(q3)) self.assertTrue(m2.HasSubstructMatch(q1)) self.assertTrue(m2.HasSubstructMatch(q2)) self.assertTrue(m2.HasSubstructMatch(q3)) self.assertTrue(m3.HasSubstructMatch(q1)) self.assertTrue(not m3.HasSubstructMatch(q2)) self.assertTrue(not m3.HasSubstructMatch(q3)) m1H = Chem.AddHs(m1) m2H = Chem.AddHs(m2) m3H = Chem.AddHs(m3) q1 = Chem.MolFromSmarts('OC[H]') q2 = Chem.MolFromSmarts('O[C;H1]') q3 = Chem.MolFromSmarts('O[C;H1][H]') self.assertTrue(not m1H.HasSubstructMatch(q1)) self.assertTrue(not m1H.HasSubstructMatch(q2)) self.assertTrue(not m1H.HasSubstructMatch(q3)) #m2H.Debug() self.assertTrue(m2H.HasSubstructMatch(q1)) self.assertTrue(m2H.HasSubstructMatch(q2)) self.assertTrue(m2H.HasSubstructMatch(q3)) self.assertTrue(m3H.HasSubstructMatch(q1)) self.assertTrue(not m3H.HasSubstructMatch(q2)) self.assertTrue(not m3H.HasSubstructMatch(q3)) def test34Issue124(self): """ issue 124 relates to calculation of the distance matrix """ m = Chem.MolFromSmiles('CC=C') d = Chem.GetDistanceMatrix(m, 0) self.assertTrue(feq(d[0, 1], 1.0)) self.assertTrue(feq(d[0, 2], 2.0)) # force an update: d = Chem.GetDistanceMatrix(m, 1, 0, 1) self.assertTrue(feq(d[0, 1], 1.0)) self.assertTrue(feq(d[0, 2], 1.5)) def test35ChiralityPerception(self): """ Test perception of chirality and CIP encoding """ m = Chem.MolFromSmiles('F[C@]([C@])(Cl)Br') Chem.AssignStereochemistry(m, 1) self.assertTrue(m.GetAtomWithIdx(1).HasProp('_CIPCode')) self.assertFalse(m.GetAtomWithIdx(2).HasProp('_CIPCode')) Chem.RemoveStereochemistry(m) self.assertFalse(m.GetAtomWithIdx(1).HasProp('_CIPCode')) m = Chem.MolFromSmiles('F[C@H](C)C') Chem.AssignStereochemistry(m, 1) self.assertTrue(m.GetAtomWithIdx(1).GetChiralTag() == Chem.ChiralType.CHI_UNSPECIFIED) self.assertFalse(m.GetAtomWithIdx(1).HasProp('_CIPCode')) m = Chem.MolFromSmiles('F\\C=C/Cl') self.assertTrue(m.GetBondWithIdx(0).GetStereo() == Chem.BondStereo.STEREONONE) self.assertTrue(m.GetBondWithIdx(1).GetStereo() == Chem.BondStereo.STEREOZ) atoms = m.GetBondWithIdx(1).GetStereoAtoms() self.assertTrue(0 in atoms) self.assertTrue(3 in atoms) self.assertTrue(m.GetBondWithIdx(2).GetStereo() == Chem.BondStereo.STEREONONE) Chem.RemoveStereochemistry(m) self.assertTrue(m.GetBondWithIdx(1).GetStereo() == Chem.BondStereo.STEREONONE) m = Chem.MolFromSmiles('F\\C=CCl') self.assertTrue(m.GetBondWithIdx(1).GetStereo() == Chem.BondStereo.STEREONONE) def checkDefaultBondProperties(self, m): for bond in m.GetBonds(): self.assertIn(bond.GetBondType(), [Chem.BondType.SINGLE, Chem.BondType.DOUBLE]) self.assertEqual(bond.GetBondDir(), Chem.BondDir.NONE) self.assertEqual(list(bond.GetStereoAtoms()), []) self.assertEqual(bond.GetStereo(), Chem.BondStereo.STEREONONE) def assertHasDoubleBondStereo(self, smi): m = Chem.MolFromSmiles(smi) self.checkDefaultBondProperties(m) Chem.FindPotentialStereoBonds(m) for bond in m.GetBonds(): self.assertIn(bond.GetBondType(), [Chem.BondType.SINGLE, Chem.BondType.DOUBLE]) self.assertEqual(bond.GetBondDir(), Chem.BondDir.NONE) if bond.GetBondType() == Chem.BondType.DOUBLE: self.assertEqual(bond.GetStereo(), Chem.BondStereo.STEREOANY) self.assertEqual(len(list(bond.GetStereoAtoms())), 2) else: self.assertEqual(list(bond.GetStereoAtoms()), []) self.assertEqual(bond.GetStereo(), Chem.BondStereo.STEREONONE) def testFindPotentialStereoBonds(self): self.assertHasDoubleBondStereo("FC=CF") self.assertHasDoubleBondStereo("FC(Cl)=C(Br)I") self.assertHasDoubleBondStereo("FC=CC=CC=CCl") self.assertHasDoubleBondStereo("C1CCCCC1C=CC1CCCCC1") def assertDoesNotHaveDoubleBondStereo(self, smi): m = Chem.MolFromSmiles(smi) self.checkDefaultBondProperties(m) Chem.FindPotentialStereoBonds(m) self.checkDefaultBondProperties(m) def testFindPotentialStereoBondsShouldNotFindThisDoubleBondAsStereo(self): self.assertDoesNotHaveDoubleBondStereo("FC(F)=CF") self.assertDoesNotHaveDoubleBondStereo("C=C") self.assertDoesNotHaveDoubleBondStereo("C1CCCCC1C(C1CCCCC1)=CC1CCCCC1") def assertDoubleBondStereo(self, smi, stereo): mol = Chem.MolFromSmiles(smi) bond = mol.GetBondWithIdx(1) self.assertEqual(bond.GetBondType(), Chem.BondType.DOUBLE) self.assertEqual(bond.GetStereo(), stereo) self.assertEqual(list(bond.GetStereoAtoms()), [0, 3]) def allStereoBonds(self, bonds): for bond in bonds: self.assertEqual(len(list(bond.GetStereoAtoms())), 2) def testBondSetStereo(self): for testAssignStereo in [False, True]: mol = Chem.MolFromSmiles("FC=CF") Chem.FindPotentialStereoBonds(mol) for bond in mol.GetBonds(): if (bond.GetBondType() == Chem.BondType.DOUBLE and bond.GetStereo() == Chem.BondStereo.STEREOANY): break self.assertEqual(bond.GetBondType(), Chem.BondType.DOUBLE) self.assertEqual(bond.GetStereo(), Chem.BondStereo.STEREOANY) self.assertEqual(list(bond.GetStereoAtoms()), [0, 3]) bond.SetStereo(Chem.BondStereo.STEREOTRANS) self.assertEqual(bond.GetStereo(), Chem.BondStereo.STEREOTRANS) if testAssignStereo: # should be invariant of Chem.AssignStereochemistry being called Chem.AssignStereochemistry(mol, force=True) smi = Chem.MolToSmiles(mol, isomericSmiles=True) self.allStereoBonds([bond]) self.assertEqual(smi, "F/C=C/F") self.assertDoubleBondStereo(smi, Chem.BondStereo.STEREOE) bond.SetStereo(Chem.BondStereo.STEREOCIS) self.assertEqual(bond.GetStereo(), Chem.BondStereo.STEREOCIS) if testAssignStereo: Chem.AssignStereochemistry(mol, force=True) smi = Chem.MolToSmiles(mol, isomericSmiles=True) self.allStereoBonds([bond]) self.assertEqual(smi, r"F/C=C\F") self.assertDoubleBondStereo(smi, Chem.BondStereo.STEREOZ) def recursive_enumerate_stereo_bonds(self, mol, done_bonds, bonds): if not bonds: yield done_bonds, Chem.Mol(mol) return bond = bonds[0] child_bonds = bonds[1:] self.assertEqual(len(list(bond.GetStereoAtoms())), 2) bond.SetStereo(Chem.BondStereo.STEREOTRANS) for isomer in self.recursive_enumerate_stereo_bonds(mol, done_bonds + [Chem.BondStereo.STEREOE], child_bonds): yield isomer self.assertEqual(len(list(bond.GetStereoAtoms())), 2) bond.SetStereo(Chem.BondStereo.STEREOCIS) for isomer in self.recursive_enumerate_stereo_bonds(mol, done_bonds + [Chem.BondStereo.STEREOZ], child_bonds): yield isomer def testBondSetStereoDifficultCase(self): unspec_smiles = "CCC=CC(CO)=C(C)CC" mol = Chem.MolFromSmiles(unspec_smiles) Chem.FindPotentialStereoBonds(mol) stereo_bonds = [] for bond in mol.GetBonds(): if bond.GetStereo() == Chem.BondStereo.STEREOANY: stereo_bonds.append(bond) isomers = set() for bond_stereo, isomer in self.recursive_enumerate_stereo_bonds(mol, [], stereo_bonds): self.allStereoBonds(stereo_bonds) isosmi = Chem.MolToSmiles(isomer, isomericSmiles=True) self.allStereoBonds(stereo_bonds) self.assertNotIn(isosmi, isomers) isomers.add(isosmi) isomol = Chem.MolFromSmiles(isosmi) round_trip_stereo = [ b.GetStereo() for b in isomol.GetBonds() if b.GetStereo() != Chem.BondStereo.STEREONONE ] self.assertEqual(bond_stereo, round_trip_stereo) self.assertEqual(len(isomers), 4) def getNumUnspecifiedBondStereo(self, smi): mol = Chem.MolFromSmiles(smi) Chem.FindPotentialStereoBonds(mol) count = 0 for bond in mol.GetBonds(): if bond.GetStereo() == Chem.BondStereo.STEREOANY: count += 1 return count def testBondSetStereoReallyDifficultCase(self): # this one is much trickier because a double bond can gain and # lose it's stereochemistry based upon whether 2 other double # bonds have the same or different stereo chemistry. unspec_smiles = "CCC=CC(C=CCC)=C(CO)CC" mol = Chem.MolFromSmiles(unspec_smiles) Chem.FindPotentialStereoBonds(mol) stereo_bonds = [] for bond in mol.GetBonds(): if bond.GetStereo() == Chem.BondStereo.STEREOANY: stereo_bonds.append(bond) self.assertEqual(len(stereo_bonds), 2) isomers = set() for bond_stereo, isomer in self.recursive_enumerate_stereo_bonds(mol, [], stereo_bonds): isosmi = Chem.MolToSmiles(isomer, isomericSmiles=True) isomers.add(isosmi) self.assertEqual(len(isomers), 3) # one of these then gains a new stereo bond due to the # introduction of a new symmetry counts = {} for isosmi in isomers: num_unspecified = self.getNumUnspecifiedBondStereo(isosmi) counts[num_unspecified] = counts.get(num_unspecified, 0) + 1 # 2 of the isomers don't have any unspecified bond stereo centers # left, 1 does self.assertEqual(counts, {0: 2, 1: 1}) def assertBondSetStereoIsAlwaysEquivalent(self, all_smiles, desired_stereo, bond_idx): refSmiles = None for smi in all_smiles: mol = Chem.MolFromSmiles(smi) doubleBond = None for bond in mol.GetBonds(): if bond.GetBondType() == Chem.BondType.DOUBLE: doubleBond = bond self.assertTrue(doubleBond is not None) Chem.FindPotentialStereoBonds(mol) doubleBond.SetStereo(desired_stereo) isosmi = Chem.MolToSmiles(mol, isomericSmiles=True) if refSmiles is None: refSmiles = isosmi self.assertEqual(refSmiles, isosmi) def testBondSetStereoAllHalogens(self): # can't get much more brutal than this test from itertools import combinations, permutations halogens = ['F', 'Cl', 'Br', 'I'] # binary double bond stereo for unique_set in combinations(halogens, 2): all_smiles = [] for fmt in ['%sC=C%s', 'C(%s)=C%s']: for ordering in permutations(unique_set): all_smiles.append(fmt % ordering) #print(fmt, all_smiles) for desired_stereo in [Chem.BondStereo.STEREOTRANS, Chem.BondStereo.STEREOCIS]: self.assertBondSetStereoIsAlwaysEquivalent(all_smiles, desired_stereo, 1) # tertiary double bond stereo for unique_set in combinations(halogens, 3): for mono_side in unique_set: halogens_left = list(unique_set) halogens_left.remove(mono_side) for binary_side in combinations(halogens_left, 2): all_smiles = [] for binary_side_permutation in permutations(binary_side): all_smiles.append('%sC=C(%s)%s' % ((mono_side, ) + binary_side_permutation)) all_smiles.append('C(%s)=C(%s)%s' % ((mono_side, ) + binary_side_permutation)) all_smiles.append('%sC(%s)=C%s' % (binary_side_permutation + (mono_side, ))) all_smiles.append('C(%s)(%s)=C%s' % (binary_side_permutation + (mono_side, ))) #print(all_smiles) for desired_stereo in [Chem.BondStereo.STEREOTRANS, Chem.BondStereo.STEREOCIS]: self.assertBondSetStereoIsAlwaysEquivalent(all_smiles, desired_stereo, 1) # quaternary double bond stereo for unique_ordering in permutations(halogens): left_side = unique_ordering[:2] rght_side = unique_ordering[2:] all_smiles = [] for left_side_permutation in permutations(left_side): for rght_side_permutation in permutations(rght_side): for smifmt in ['%sC(%s)=C(%s)%s', 'C(%s)(%s)=C(%s)%s']: all_smiles.append(smifmt % (left_side_permutation + rght_side_permutation)) #print(all_smiles) for desired_stereo in [Chem.BondStereo.STEREOTRANS, Chem.BondStereo.STEREOCIS]: self.assertBondSetStereoIsAlwaysEquivalent(all_smiles, desired_stereo, 1) def testBondSetStereoAtoms(self): # use this difficult molecule that only generates 4 isomers, but # assume all double bonds are stereo! unspec_smiles = "CCC=CC(C=CCC)=C(CO)CC" mol = Chem.MolFromSmiles(unspec_smiles) def getNbr(atom, exclude): for nbr in atom.GetNeighbors(): if nbr.GetIdx() not in exclude: return nbr raise ValueError("No neighbor found!") double_bonds = [] for bond in mol.GetBonds(): if bond.GetBondType() == 2: double_bonds.append(bond) exclude = {bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()} bgnNbr = getNbr(bond.GetBeginAtom(), exclude) endNbr = getNbr(bond.GetEndAtom(), exclude) bond.SetStereoAtoms(bgnNbr.GetIdx(), endNbr.GetIdx()) self.assertEqual(len(double_bonds), 3) import itertools stereos = [Chem.BondStereo.STEREOE, Chem.BondStereo.STEREOZ] isomers = set() for stereo_config in itertools.product(stereos, repeat=len(double_bonds)): for bond, stereo in zip(double_bonds, stereo_config): bond.SetStereo(stereo) smi = Chem.MolToSmiles(mol, True) isomers.add(smi) # the dependent double bond stereo isn't picked up by this, should it? self.assertEqual(len(isomers), 6) # round tripping them through one more time does pick up the dependency, so meh? round_trip_isomers = set() for smi in isomers: isosmi = Chem.MolToSmiles(Chem.MolFromSmiles(smi), True) round_trip_isomers.add(isosmi) self.assertEqual(len(round_trip_isomers), 4) def test36SubstructMatchStr(self): """ test the _SubstructMatchStr function """ query = Chem.MolFromSmarts('[n,p]1ccccc1') self.assertTrue(query) mol = Chem.MolFromSmiles('N1=CC=CC=C1') self.assertTrue(mol.HasSubstructMatch(query)) self.assertTrue(Chem._HasSubstructMatchStr(mol.ToBinary(), query)) mol = Chem.MolFromSmiles('S1=CC=CC=C1') self.assertTrue(not Chem._HasSubstructMatchStr(mol.ToBinary(), query)) self.assertTrue(not mol.HasSubstructMatch(query)) mol = Chem.MolFromSmiles('P1=CC=CC=C1') self.assertTrue(mol.HasSubstructMatch(query)) self.assertTrue(Chem._HasSubstructMatchStr(mol.ToBinary(), query)) def test37SanitException(self): mol = Chem.MolFromSmiles('CC(C)(C)(C)C', 0) self.assertTrue(mol) self.assertRaises(ValueError, lambda: Chem.SanitizeMol(mol)) def test38TDTSuppliers(self): data = """$SMI<Cc1nnc(N)nc1C> CAS<17584-12-2> | $SMI<Cc1n[nH]c(=O)nc1N> CAS<~> | $SMI<Cc1n[nH]c(=O)[nH]c1=O> CAS<932-53-6> | $SMI<Cc1nnc(NN)nc1O> CAS<~> |""" suppl = Chem.TDTMolSupplier() suppl.SetData(data, "CAS") i = 0 for mol in suppl: self.assertTrue(mol) self.assertTrue(mol.GetNumAtoms()) self.assertTrue(mol.HasProp("CAS")) self.assertTrue(mol.HasProp("_Name")) self.assertTrue(mol.GetProp("CAS") == mol.GetProp("_Name")) self.assertTrue(mol.GetNumConformers() == 0) i += 1 self.assertTrue(i == 4) self.assertTrue(len(suppl) == 4) def test38Issue266(self): """ test issue 266: generation of kekulized smiles""" mol = Chem.MolFromSmiles('c1ccccc1') Chem.Kekulize(mol) smi = Chem.MolToSmiles(mol) self.assertTrue(smi == 'c1ccccc1') smi = Chem.MolToSmiles(mol, kekuleSmiles=True) self.assertTrue(smi == 'C1=CC=CC=C1') def test39Issue273(self): """ test issue 273: MolFileComments and MolFileInfo props ending up in SD files """ fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'Wrap', 'test_data', 'outNCI_few.sdf') suppl = Chem.SDMolSupplier(fileN) ms = [x for x in suppl] for m in ms: self.assertTrue(m.HasProp('_MolFileInfo')) self.assertTrue(m.HasProp('_MolFileComments')) fName = tempfile.NamedTemporaryFile(suffix='.sdf', delete=False).name w = Chem.SDWriter(fName) w.SetProps(ms[0].GetPropNames()) for m in ms: w.write(m) w = None with open(fName, 'r') as txtFile: txt = txtFile.read() os.unlink(fName) self.assertTrue(txt.find('MolFileInfo') == -1) self.assertTrue(txt.find('MolFileComments') == -1) def test40SmilesRootedAtAtom(self): """ test the rootAtAtom functionality """ smi = 'CN(C)C' m = Chem.MolFromSmiles(smi) self.assertTrue(Chem.MolToSmiles(m) == 'CN(C)C') self.assertTrue(Chem.MolToSmiles(m, rootedAtAtom=1) == 'N(C)(C)C') def test41SetStreamIndices(self): fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'FileParsers', 'test_data', 'NCI_aids_few.sdf') allIndices = [] ifs = open(fileN, 'rb') addIndex = True line = True pos = 0 while (line): if (addIndex): pos = ifs.tell() line = ifs.readline().decode('utf-8') if (line): if (addIndex): allIndices.append(pos) addIndex = (line[:4] == '$$$$') ifs.close() indices = allIndices sdSup = Chem.SDMolSupplier(fileN) molNames = [ "48", "78", "128", "163", "164", "170", "180", "186", "192", "203", "210", "211", "213", "220", "229", "256" ] sdSup._SetStreamIndices(indices) self.assertTrue(len(sdSup) == 16) mol = sdSup[5] self.assertTrue(mol.GetProp("_Name") == "170") i = 0 for mol in sdSup: self.assertTrue(mol) self.assertTrue(mol.GetProp("_Name") == molNames[i]) i += 1 ns = [mol.GetProp("_Name") for mol in sdSup] self.assertTrue(ns == molNames) # this can also be used to skip molecules in the file: indices = [allIndices[0], allIndices[2], allIndices[5]] sdSup._SetStreamIndices(indices) self.assertTrue(len(sdSup) == 3) mol = sdSup[2] self.assertTrue(mol.GetProp("_Name") == "170") # or to reorder them: indices = [allIndices[0], allIndices[5], allIndices[2]] sdSup._SetStreamIndices(indices) self.assertTrue(len(sdSup) == 3) mol = sdSup[1] self.assertTrue(mol.GetProp("_Name") == "170") def test42LifeTheUniverseAndEverything(self): self.assertTrue(True) def test43TplFileParsing(self): fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'FileParsers', 'test_data', 'cmpd2.tpl') m1 = Chem.MolFromTPLFile(fileN) self.assertTrue(m1 is not None) self.assertTrue(m1.GetNumAtoms() == 12) self.assertTrue(m1.GetNumConformers() == 2) m1 = Chem.MolFromTPLFile(fileN, skipFirstConf=True) self.assertTrue(m1 is not None) self.assertTrue(m1.GetNumAtoms() == 12) self.assertTrue(m1.GetNumConformers() == 1) with open(fileN, 'r') as blockFile: block = blockFile.read() m1 = Chem.MolFromTPLBlock(block) self.assertTrue(m1 is not None) self.assertTrue(m1.GetNumAtoms() == 12) self.assertTrue(m1.GetNumConformers() == 2) def test44TplFileWriting(self): fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'FileParsers', 'test_data', 'cmpd2.tpl') m1 = Chem.MolFromTPLFile(fileN) self.assertTrue(m1 is not None) self.assertTrue(m1.GetNumAtoms() == 12) self.assertTrue(m1.GetNumConformers() == 2) block = Chem.MolToTPLBlock(m1) m1 = Chem.MolFromTPLBlock(block) self.assertTrue(m1 is not None) self.assertTrue(m1.GetNumAtoms() == 12) self.assertTrue(m1.GetNumConformers() == 2) def test45RingInfo(self): """ test the RingInfo class """ smi = 'CNC' m = Chem.MolFromSmiles(smi) ri = m.GetRingInfo() self.assertTrue(ri) self.assertTrue(ri.NumRings() == 0) self.assertFalse(ri.IsAtomInRingOfSize(0, 3)) self.assertFalse(ri.IsAtomInRingOfSize(1, 3)) self.assertFalse(ri.IsAtomInRingOfSize(2, 3)) self.assertFalse(ri.IsBondInRingOfSize(1, 3)) self.assertFalse(ri.IsBondInRingOfSize(2, 3)) if hasattr(Chem,'FindRingFamilies'): self.assertEquals(ri.AtomRingFamilies(),()) smi = 'C1CC2C1C2' m = Chem.MolFromSmiles(smi) ri = m.GetRingInfo() self.assertTrue(ri) self.assertEquals(ri.NumRings(), 2) self.assertFalse(ri.IsAtomInRingOfSize(0, 3)) self.assertTrue(ri.IsAtomInRingOfSize(0, 4)) self.assertFalse(ri.IsBondInRingOfSize(0, 3)) self.assertTrue(ri.IsBondInRingOfSize(0, 4)) self.assertTrue(ri.IsAtomInRingOfSize(2, 4)) self.assertTrue(ri.IsAtomInRingOfSize(2, 3)) self.assertTrue(ri.IsBondInRingOfSize(2, 3)) self.assertTrue(ri.IsBondInRingOfSize(2, 4)) if hasattr(Chem,'FindRingFamilies'): ri = m.GetRingInfo() self.assertFalse(ri.AreRingFamiliesInitialized()) Chem.FindRingFamilies(m) ri = m.GetRingInfo() self.assertTrue(ri.AreRingFamiliesInitialized()) self.assertEquals(ri.NumRingFamilies(),2) self.assertEquals(sorted(ri.AtomRingFamilies()),[(0, 1, 2, 3), (2, 3, 4)]) def test46ReplaceCore(self): """ test the ReplaceCore functionality """ core = Chem.MolFromSmiles('C=O') smi = 'CCC=O' m = Chem.MolFromSmiles(smi) r = Chem.ReplaceCore(m, core) self.assertTrue(r) self.assertEqual(Chem.MolToSmiles(r, True), '[1*]CC') smi = 'C1CC(=O)CC1' m = Chem.MolFromSmiles(smi) r = Chem.ReplaceCore(m, core) self.assertTrue(r) self.assertEqual(Chem.MolToSmiles(r, True), '[1*]CCCC[2*]') smi = 'C1CC(=N)CC1' m = Chem.MolFromSmiles(smi) r = Chem.ReplaceCore(m, core) self.assertFalse(r) # smiles, smarts, replaceDummies, labelByIndex, useChirality expected = { ('C1O[C@@]1(OC)NC', 'C1O[C@]1(*)*', False, False, False): '[1*]OC.[2*]NC', ('C1O[C@@]1(OC)NC', 'C1O[C@]1(*)*', False, False, True): '[1*]NC.[2*]OC', ('C1O[C@@]1(OC)NC', 'C1O[C@]1(*)*', False, True, False): '[3*]OC.[4*]NC', ('C1O[C@@]1(OC)NC', 'C1O[C@]1(*)*', False, True, True): '[3*]NC.[4*]OC', ('C1O[C@@]1(OC)NC', 'C1O[C@]1(*)*', True, False, False): '[1*]C.[2*]C', ('C1O[C@@]1(OC)NC', 'C1O[C@]1(*)*', True, False, True): '[1*]C.[2*]C', ('C1O[C@@]1(OC)NC', 'C1O[C@]1(*)*', True, True, False): '[3*]C.[4*]C', ('C1O[C@@]1(OC)NC', 'C1O[C@]1(*)*', True, True, True): '[3*]C.[4*]C', ('C1O[C@]1(OC)NC', 'C1O[C@]1(*)*', False, False, False): '[1*]OC.[2*]NC', ('C1O[C@]1(OC)NC', 'C1O[C@]1(*)*', False, False, True): '[1*]OC.[2*]NC', ('C1O[C@]1(OC)NC', 'C1O[C@]1(*)*', False, True, False): '[3*]OC.[4*]NC', ('C1O[C@]1(OC)NC', 'C1O[C@]1(*)*', False, True, True): '[3*]OC.[4*]NC', ('C1O[C@]1(OC)NC', 'C1O[C@]1(*)*', True, False, False): '[1*]C.[2*]C', ('C1O[C@]1(OC)NC', 'C1O[C@]1(*)*', True, False, True): '[1*]C.[2*]C', ('C1O[C@]1(OC)NC', 'C1O[C@]1(*)*', True, True, False): '[3*]C.[4*]C', ('C1O[C@]1(OC)NC', 'C1O[C@]1(*)*', True, True, True): '[3*]C.[4*]C', } for (smiles, smarts, replaceDummies, labelByIndex, useChirality), expected_smiles in expected.items(): mol = Chem.MolFromSmiles(smiles) core = Chem.MolFromSmarts(smarts) nm = Chem.ReplaceCore(mol, core, replaceDummies=replaceDummies, labelByIndex=labelByIndex, useChirality=useChirality) if Chem.MolToSmiles(nm, True) != expected_smiles: print( "ReplaceCore(%r, %r, replaceDummies=%r, labelByIndex=%r, useChirality=%r" % (smiles, smarts, replaceDummies, labelByIndex, useChirality), file=sys.stderr) print("expected: %s\ngot: %s" % (expected_smiles, Chem.MolToSmiles(nm, True)), file=sys.stderr) self.assertEqual(expected_smiles, Chem.MolToSmiles(nm, True)) matchVect = mol.GetSubstructMatch(core, useChirality=useChirality) nm = Chem.ReplaceCore(mol, core, matchVect, replaceDummies=replaceDummies, labelByIndex=labelByIndex) if Chem.MolToSmiles(nm, True) != expected_smiles: print( "ReplaceCore(%r, %r, %r, replaceDummies=%r, labelByIndex=%rr" % (smiles, smarts, matchVect, replaceDummies, labelByIndex), file=sys.stderr) print("expected: %s\ngot: %s" % (expected_smiles, Chem.MolToSmiles(nm, True)), file=sys.stderr) self.assertEqual(expected_smiles, Chem.MolToSmiles(nm, True)) mol = Chem.MolFromSmiles("C") smarts = Chem.MolFromSmarts("C") try: Chem.ReplaceCore(mol, smarts, (3, )) self.asssertFalse(True) except: pass mol = Chem.MolFromSmiles("C") smarts = Chem.MolFromSmarts("C") try: Chem.ReplaceCore(mol, smarts, (0, 0)) self.asssertFalse(True) except: pass def test47RWMols(self): """ test the RWMol class """ mol = Chem.MolFromSmiles('C1CCC1') self.assertTrue(type(mol) == Chem.Mol) for rwmol in [Chem.EditableMol(mol), Chem.RWMol(mol)]: self.assertTrue(type(rwmol) in [Chem.EditableMol, Chem.RWMol]) newAt = Chem.Atom(8) rwmol.ReplaceAtom(0, newAt) self.assertTrue(Chem.MolToSmiles(rwmol.GetMol()) == 'C1COC1') rwmol.RemoveBond(0, 1) self.assertTrue(Chem.MolToSmiles(rwmol.GetMol()) == 'CCCO') a = Chem.Atom(7) idx = rwmol.AddAtom(a) self.assertEqual(rwmol.GetMol().GetNumAtoms(), 5) self.assertEqual(idx, 4) idx = rwmol.AddBond(0, 4, order=Chem.BondType.SINGLE) self.assertEqual(idx, 4) self.assertTrue(Chem.MolToSmiles(rwmol.GetMol()) == 'CCCON') rwmol.AddBond(4, 1, order=Chem.BondType.SINGLE) self.assertTrue(Chem.MolToSmiles(rwmol.GetMol()) == 'C1CNOC1') rwmol.RemoveAtom(3) self.assertTrue(Chem.MolToSmiles(rwmol.GetMol()) == 'CCNO') # practice shooting ourselves in the foot: m = Chem.MolFromSmiles('c1ccccc1') em = Chem.EditableMol(m) em.RemoveAtom(0) m2 = em.GetMol() self.assertRaises(ValueError, lambda: Chem.SanitizeMol(m2)) m = Chem.MolFromSmiles('c1ccccc1') em = Chem.EditableMol(m) em.RemoveBond(0, 1) m2 = em.GetMol() self.assertRaises(ValueError, lambda: Chem.SanitizeMol(m2)) # boundary cases: # removing non-existent bonds: m = Chem.MolFromSmiles('c1ccccc1') em = Chem.EditableMol(m) em.RemoveBond(0, 2) m2 = em.GetMol() Chem.SanitizeMol(m2) self.assertTrue(Chem.MolToSmiles(m2) == 'c1ccccc1') # removing non-existent atoms: m = Chem.MolFromSmiles('c1ccccc1') em = Chem.EditableMol(m) self.assertRaises(RuntimeError, lambda: em.RemoveAtom(12)) # confirm that an RWMol can be constructed without arguments m = Chem.RWMol() # test replaceAtom/Bond preserving properties mol = Chem.MolFromSmiles('C1CCC1') mol2 = Chem.MolFromSmiles('C1CCC1') mol.GetAtomWithIdx(0).SetProp("foo", "bar") mol.GetBondWithIdx(0).SetProp("foo", "bar") newBond = mol2.GetBondWithIdx(0) self.assertTrue(type(mol) == Chem.Mol) for rwmol in [Chem.EditableMol(mol), Chem.RWMol(mol)]: newAt = Chem.Atom(8) rwmol.ReplaceAtom(0, newAt) self.assertTrue(Chem.MolToSmiles(rwmol.GetMol()) == 'C1COC1') self.assertFalse(rwmol.GetMol().GetAtomWithIdx(0).HasProp("foo")) for rwmol in [Chem.EditableMol(mol), Chem.RWMol(mol)]: newAt = Chem.Atom(8) rwmol.ReplaceAtom(0, newAt, preserveProps=True) self.assertTrue(Chem.MolToSmiles(rwmol.GetMol()) == 'C1COC1') self.assertTrue(rwmol.GetMol().GetAtomWithIdx(0).HasProp("foo")) self.assertEqual(rwmol.GetMol().GetAtomWithIdx(0).GetProp("foo"), "bar") for rwmol in [Chem.EditableMol(mol), Chem.RWMol(mol)]: rwmol.ReplaceBond(0, newBond) self.assertTrue(Chem.MolToSmiles(rwmol.GetMol()) == 'C1CCC1') self.assertFalse(rwmol.GetMol().GetBondWithIdx(0).HasProp("foo")) for rwmol in [Chem.EditableMol(mol), Chem.RWMol(mol)]: rwmol.ReplaceBond(0, newBond, preserveProps=True) self.assertTrue(Chem.MolToSmiles(rwmol.GetMol()) == 'C1CCC1') self.assertTrue(rwmol.GetMol().GetBondWithIdx(0).HasProp("foo")) self.assertEqual(rwmol.GetMol().GetBondWithIdx(0).GetProp("foo"), "bar") def test47SmartsPieces(self): """ test the GetAtomSmarts and GetBondSmarts functions """ m = Chem.MolFromSmarts("[C,N]C") self.assertTrue(m.GetAtomWithIdx(0).GetSmarts() == '[C,N]') self.assertTrue(m.GetAtomWithIdx(1).GetSmarts() == 'C') self.assertEqual(m.GetBondBetweenAtoms(0, 1).GetSmarts(), '') m = Chem.MolFromSmarts("[$(C=O)]-O") self.assertTrue(m.GetAtomWithIdx(0).GetSmarts() == '[$(C=O)]') self.assertTrue(m.GetAtomWithIdx(1).GetSmarts() == 'O') self.assertTrue(m.GetBondBetweenAtoms(0, 1).GetSmarts() == '-') m = Chem.MolFromSmiles("CO") self.assertTrue(m.GetAtomWithIdx(0).GetSmarts() == 'C') self.assertTrue(m.GetAtomWithIdx(1).GetSmarts() == 'O') self.assertTrue(m.GetBondBetweenAtoms(0, 1).GetSmarts() == '') self.assertTrue(m.GetBondBetweenAtoms(0, 1).GetSmarts(allBondsExplicit=True) == '-') m = Chem.MolFromSmiles("C=O") self.assertTrue(m.GetAtomWithIdx(0).GetSmarts() == 'C') self.assertTrue(m.GetAtomWithIdx(1).GetSmarts() == 'O') self.assertTrue(m.GetBondBetweenAtoms(0, 1).GetSmarts() == '=') m = Chem.MolFromSmiles('C[C@H](F)[15NH3+]') self.assertEqual(m.GetAtomWithIdx(0).GetSmarts(), 'C') self.assertEqual(m.GetAtomWithIdx(0).GetSmarts(allHsExplicit=True), '[CH3]') self.assertEqual(m.GetAtomWithIdx(3).GetSmarts(), '[15NH3+]') self.assertEqual(m.GetAtomWithIdx(3).GetSmarts(allHsExplicit=True), '[15NH3+]') def test48Issue1928819(self): """ test a crash involving looping directly over mol suppliers """ fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'FileParsers', 'test_data', 'NCI_aids_few.sdf') ms = [x for x in Chem.SDMolSupplier(fileN)] self.assertEqual(len(ms), 16) count = 0 for m in Chem.SDMolSupplier(fileN): count += 1 self.assertEqual(count, 16) fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'FileParsers', 'test_data', 'fewSmi.csv') count = 0 for m in Chem.SmilesMolSupplier(fileN, titleLine=False, smilesColumn=1, delimiter=','): count += 1 self.assertEqual(count, 10) fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'FileParsers', 'test_data', 'acd_few.tdt') count = 0 for m in Chem.TDTMolSupplier(fileN): count += 1 self.assertEqual(count, 10) def test49Issue1932365(self): """ test aromatic Se and Te from smiles/smarts """ m = Chem.MolFromSmiles('c1ccc[se]1') self.assertTrue(m) self.assertTrue(m.GetAtomWithIdx(0).GetIsAromatic()) self.assertTrue(m.GetAtomWithIdx(4).GetIsAromatic()) m = Chem.MolFromSmiles('c1ccc[te]1') self.assertTrue(m) self.assertTrue(m.GetAtomWithIdx(0).GetIsAromatic()) self.assertTrue(m.GetAtomWithIdx(4).GetIsAromatic()) m = Chem.MolFromSmiles('C1=C[Se]C=C1') self.assertTrue(m) self.assertTrue(m.GetAtomWithIdx(0).GetIsAromatic()) self.assertTrue(m.GetAtomWithIdx(2).GetIsAromatic()) m = Chem.MolFromSmiles('C1=C[Te]C=C1') self.assertTrue(m) self.assertTrue(m.GetAtomWithIdx(0).GetIsAromatic()) self.assertTrue(m.GetAtomWithIdx(2).GetIsAromatic()) p = Chem.MolFromSmarts('[se]') self.assertTrue(Chem.MolFromSmiles('c1ccc[se]1').HasSubstructMatch(p)) self.assertFalse(Chem.MolFromSmiles('C1=CCC[Se]1').HasSubstructMatch(p)) p = Chem.MolFromSmarts('[te]') self.assertTrue(Chem.MolFromSmiles('c1ccc[te]1').HasSubstructMatch(p)) self.assertFalse(Chem.MolFromSmiles('C1=CCC[Te]1').HasSubstructMatch(p)) def test50Issue1968608(self): """ test sf.net issue 1968608 """ smarts = Chem.MolFromSmarts("[r5]") mol = Chem.MolFromSmiles("N12CCC36C1CC(C(C2)=CCOC4CC5=O)C4C3N5c7ccccc76") count = len(mol.GetSubstructMatches(smarts, uniquify=0)) self.assertTrue(count == 9) def test51RadicalHandling(self): """ test handling of atoms with radicals """ mol = Chem.MolFromSmiles("[C]C") self.assertTrue(mol) atom = mol.GetAtomWithIdx(0) self.assertTrue(atom.GetNumRadicalElectrons() == 3) self.assertTrue(atom.GetNoImplicit()) atom.SetNoImplicit(False) atom.SetNumRadicalElectrons(1) mol.UpdatePropertyCache() self.assertTrue(atom.GetNumRadicalElectrons() == 1) self.assertTrue(atom.GetNumImplicitHs() == 2) mol = Chem.MolFromSmiles("[c]1ccccc1") self.assertTrue(mol) atom = mol.GetAtomWithIdx(0) self.assertTrue(atom.GetNumRadicalElectrons() == 1) self.assertTrue(atom.GetNoImplicit()) mol = Chem.MolFromSmiles("[n]1ccccc1") self.assertTrue(mol) atom = mol.GetAtomWithIdx(0) self.assertTrue(atom.GetNumRadicalElectrons() == 0) self.assertTrue(atom.GetNoImplicit()) def test52MolFrags(self): """ test GetMolFrags functionality """ mol = Chem.MolFromSmiles("C.CC") self.assertTrue(mol) fs = Chem.GetMolFrags(mol) self.assertTrue(len(fs) == 2) self.assertTrue(len(fs[0]) == 1) self.assertTrue(tuple(fs[0]) == (0, )) self.assertTrue(len(fs[1]) == 2) self.assertTrue(tuple(fs[1]) == (1, 2)) fs = Chem.GetMolFrags(mol, True) self.assertTrue(len(fs) == 2) self.assertTrue(fs[0].GetNumAtoms() == 1) self.assertTrue(fs[1].GetNumAtoms() == 2) mol = Chem.MolFromSmiles("CCC") self.assertTrue(mol) fs = Chem.GetMolFrags(mol) self.assertTrue(len(fs) == 1) self.assertTrue(len(fs[0]) == 3) self.assertTrue(tuple(fs[0]) == (0, 1, 2)) fs = Chem.GetMolFrags(mol, True) self.assertTrue(len(fs) == 1) self.assertTrue(fs[0].GetNumAtoms() == 3) mol = Chem.MolFromSmiles("CO") em = Chem.EditableMol(mol) em.RemoveBond(0, 1) nm = em.GetMol() fs = Chem.GetMolFrags(nm, asMols=True) self.assertEqual([x.GetNumAtoms(onlyExplicit=False) for x in fs], [5, 3]) fs = Chem.GetMolFrags(nm, asMols=True, sanitizeFrags=False) self.assertEqual([x.GetNumAtoms(onlyExplicit=False) for x in fs], [4, 2]) mol = Chem.MolFromSmiles("CC.CCC") fs = Chem.GetMolFrags(mol, asMols=True) self.assertEqual([x.GetNumAtoms() for x in fs], [2, 3]) frags = [] fragsMolAtomMapping = [] fs = Chem.GetMolFrags(mol, asMols=True, frags=frags, fragsMolAtomMapping=fragsMolAtomMapping) self.assertEqual(mol.GetNumAtoms(onlyExplicit=True), len(frags)) fragsCheck = [] for i, f in enumerate(fs): fragsCheck.extend([i] * f.GetNumAtoms(onlyExplicit=True)) self.assertEqual(frags, fragsCheck) fragsMolAtomMappingCheck = [] i = 0 for f in fs: n = f.GetNumAtoms(onlyExplicit=True) fragsMolAtomMappingCheck.append(tuple(range(i, i + n))) i += n self.assertEqual(fragsMolAtomMapping, fragsMolAtomMappingCheck) def test53Matrices(self): """ test adjacency and distance matrices """ m = Chem.MolFromSmiles('CC=C') d = Chem.GetDistanceMatrix(m, 0) self.assertTrue(feq(d[0, 1], 1.0)) self.assertTrue(feq(d[0, 2], 2.0)) self.assertTrue(feq(d[1, 0], 1.0)) self.assertTrue(feq(d[2, 0], 2.0)) a = Chem.GetAdjacencyMatrix(m, 0) self.assertTrue(a[0, 1] == 1) self.assertTrue(a[0, 2] == 0) self.assertTrue(a[1, 2] == 1) self.assertTrue(a[1, 0] == 1) self.assertTrue(a[2, 0] == 0) m = Chem.MolFromSmiles('C1CC1') d = Chem.GetDistanceMatrix(m, 0) self.assertTrue(feq(d[0, 1], 1.0)) self.assertTrue(feq(d[0, 2], 1.0)) a = Chem.GetAdjacencyMatrix(m, 0) self.assertTrue(a[0, 1] == 1) self.assertTrue(a[0, 2] == 1) self.assertTrue(a[1, 2] == 1) m = Chem.MolFromSmiles('CC.C') d = Chem.GetDistanceMatrix(m, 0) self.assertTrue(feq(d[0, 1], 1.0)) self.assertTrue(d[0, 2] > 1000) self.assertTrue(d[1, 2] > 1000) a = Chem.GetAdjacencyMatrix(m, 0) self.assertTrue(a[0, 1] == 1) self.assertTrue(a[0, 2] == 0) self.assertTrue(a[1, 2] == 0) def test54Mol2Parser(self): """ test the mol2 parser """ fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'FileParsers', 'test_data', 'pyrazole_pyridine.mol2') m = Chem.MolFromMol2File(fileN) self.assertTrue(m.GetNumAtoms() == 5) self.assertTrue(Chem.MolToSmiles(m) == 'c1cn[nH]c1', Chem.MolToSmiles(m)) fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'FileParsers', 'test_data', '3505.mol2') m = Chem.MolFromMol2File(fileN) self.assertTrue(m.GetBondBetweenAtoms(3, 12) is not None) self.assertEqual(m.GetBondBetweenAtoms(3, 12).GetBondType(), Chem.BondType.SINGLE) self.assertEqual(m.GetAtomWithIdx(12).GetFormalCharge(), 0) m = Chem.MolFromMol2File(fileN, cleanupSubstructures=False) self.assertTrue(m.GetBondBetweenAtoms(3, 12) is not None) self.assertEqual(m.GetBondBetweenAtoms(3, 12).GetBondType(), Chem.BondType.DOUBLE) self.assertEqual(m.GetAtomWithIdx(12).GetFormalCharge(), 1) def test55LayeredFingerprint(self): m1 = Chem.MolFromSmiles('CC(C)C') fp1 = Chem.LayeredFingerprint(m1) self.assertEqual(len(fp1), 2048) atomCounts = [0] * m1.GetNumAtoms() fp2 = Chem.LayeredFingerprint(m1, atomCounts=atomCounts) self.assertEqual(fp1, fp2) self.assertEqual(atomCounts, [4, 7, 4, 4]) fp2 = Chem.LayeredFingerprint(m1, atomCounts=atomCounts) self.assertEqual(fp1, fp2) self.assertEqual(atomCounts, [8, 14, 8, 8]) pbv = DataStructs.ExplicitBitVect(2048) fp3 = Chem.LayeredFingerprint(m1, setOnlyBits=pbv) self.assertEqual(fp3.GetNumOnBits(), 0) fp3 = Chem.LayeredFingerprint(m1, setOnlyBits=fp2) self.assertEqual(fp3, fp2) m2 = Chem.MolFromSmiles('CC') fp4 = Chem.LayeredFingerprint(m2) atomCounts = [0] * m1.GetNumAtoms() fp3 = Chem.LayeredFingerprint(m1, setOnlyBits=fp4, atomCounts=atomCounts) self.assertEqual(atomCounts, [1, 3, 1, 1]) m2 = Chem.MolFromSmiles('CCC') fp4 = Chem.LayeredFingerprint(m2) atomCounts = [0] * m1.GetNumAtoms() fp3 = Chem.LayeredFingerprint(m1, setOnlyBits=fp4, atomCounts=atomCounts) self.assertEqual(atomCounts, [3, 6, 3, 3]) def test56LazySDMolSupplier(self): if not hasattr(Chem, 'CompressedSDMolSupplier'): return self.assertRaises(ValueError, lambda: Chem.CompressedSDMolSupplier('nosuchfile.sdf.gz')) fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'FileParsers', 'test_data', 'NCI_aids_few.sdf.gz') sdSup = Chem.CompressedSDMolSupplier(fileN) molNames = [ "48", "78", "128", "163", "164", "170", "180", "186", "192", "203", "210", "211", "213", "220", "229", "256" ] chgs192 = {8: 1, 11: 1, 15: -1, 18: -1, 20: 1, 21: 1, 23: -1, 25: -1} i = 0 for mol in sdSup: self.assertTrue(mol) self.assertTrue(mol.GetProp("_Name") == molNames[i]) i += 1 if (mol.GetProp("_Name") == "192"): # test parsed charges on one of the molecules for id in chgs192.keys(): self.assertTrue(mol.GetAtomWithIdx(id).GetFormalCharge() == chgs192[id]) self.assertEqual(i, 16) sdSup = Chem.CompressedSDMolSupplier(fileN) ns = [mol.GetProp("_Name") for mol in sdSup] self.assertTrue(ns == molNames) sdSup = Chem.CompressedSDMolSupplier(fileN, 0) for mol in sdSup: self.assertTrue(not mol.HasProp("numArom")) def test57AddRecursiveQuery(self): q1 = Chem.MolFromSmiles('CC') q2 = Chem.MolFromSmiles('CO') Chem.AddRecursiveQuery(q1, q2, 1) m1 = Chem.MolFromSmiles('OCC') self.assertTrue(m1.HasSubstructMatch(q2)) self.assertTrue(m1.HasSubstructMatch(q1)) self.assertTrue(m1.HasSubstructMatch(q1)) self.assertTrue(m1.GetSubstructMatch(q1) == (2, 1)) q3 = Chem.MolFromSmiles('CS') Chem.AddRecursiveQuery(q1, q3, 1) self.assertFalse(m1.HasSubstructMatch(q3)) self.assertFalse(m1.HasSubstructMatch(q1)) m2 = Chem.MolFromSmiles('OC(S)C') self.assertTrue(m2.HasSubstructMatch(q1)) self.assertTrue(m2.GetSubstructMatch(q1) == (3, 1)) m3 = Chem.MolFromSmiles('SCC') self.assertTrue(m3.HasSubstructMatch(q3)) self.assertFalse(m3.HasSubstructMatch(q1)) q1 = Chem.MolFromSmiles('CC') Chem.AddRecursiveQuery(q1, q2, 1) Chem.AddRecursiveQuery(q1, q3, 1, False) self.assertTrue(m3.HasSubstructMatch(q1)) self.assertTrue(m3.GetSubstructMatch(q1) == (2, 1)) def test58Issue2983794(self): fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'Wrap', 'test_data', 'issue2983794.sdf') m1 = Chem.MolFromMolFile(fileN) self.assertTrue(m1) em = Chem.EditableMol(m1) em.RemoveAtom(0) m2 = em.GetMol() Chem.Kekulize(m2) def test59Issue3007178(self): m = Chem.MolFromSmiles('CCC') a = m.GetAtomWithIdx(0) m = None self.assertEqual(Chem.MolToSmiles(a.GetOwningMol()), 'CCC') a = None m = Chem.MolFromSmiles('CCC') b = m.GetBondWithIdx(0) m = None self.assertEqual(Chem.MolToSmiles(b.GetOwningMol()), 'CCC') def test60SmilesWriterClose(self): fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'FileParsers', 'test_data', 'fewSmi.csv') smiSup = Chem.SmilesMolSupplier(fileN, delimiter=",", smilesColumn=1, nameColumn=0, titleLine=0) ms = [x for x in smiSup] ofile = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'Wrap', 'test_data', 'outSmiles.txt') writer = Chem.SmilesWriter(ofile) for mol in ms: writer.write(mol) writer.close() newsup = Chem.SmilesMolSupplier(ofile) newms = [x for x in newsup] self.assertEqual(len(ms), len(newms)) def test61PathToSubmol(self): m = Chem.MolFromSmiles('CCCCCC1C(O)CC(O)N1C=CCO') env = Chem.FindAtomEnvironmentOfRadiusN(m, 2, 11) self.assertEqual(len(env), 8) amap = {} submol = Chem.PathToSubmol(m, env, atomMap=amap) self.assertEqual(submol.GetNumAtoms(), len(amap.keys())) self.assertEqual(submol.GetNumAtoms(), 9) smi = Chem.MolToSmiles(submol, rootedAtAtom=amap[11]) self.assertEqual(smi[0], 'N') refsmi = Chem.MolToSmiles(Chem.MolFromSmiles('N(C=C)(C(C)C)C(O)C')) csmi = Chem.MolToSmiles(Chem.MolFromSmiles(smi)) self.assertEqual(refsmi, csmi) def test62SmilesAndSmartsReplacements(self): mol = Chem.MolFromSmiles('C{branch}C', replacements={'{branch}': 'C1(CC1)'}) self.assertEqual(mol.GetNumAtoms(), 5) mol = Chem.MolFromSmarts('C{branch}C', replacements={'{branch}': 'C1(CC1)'}) self.assertEqual(mol.GetNumAtoms(), 5) mol = Chem.MolFromSmiles('C{branch}C{acid}', replacements={ '{branch}': 'C1(CC1)', '{acid}': "C(=O)O" }) self.assertEqual(mol.GetNumAtoms(), 8) def test63Issue3313539(self): fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'FileParsers', 'test_data', 'rgroups1.mol') m = Chem.MolFromMolFile(fileN) self.assertTrue(m is not None) at = m.GetAtomWithIdx(3) self.assertTrue(at is not None) self.assertTrue(at.HasProp('_MolFileRLabel')) p = at.GetProp('_MolFileRLabel') self.assertEqual(p, '2') self.assertEqual(Chem.GetAtomRLabel(at), 2) at = m.GetAtomWithIdx(4) self.assertTrue(at is not None) self.assertTrue(at.HasProp('_MolFileRLabel')) p = at.GetProp('_MolFileRLabel') self.assertEqual(p, '1') self.assertEqual(Chem.GetAtomRLabel(at), 1) def test64MoleculeCleanup(self): m = Chem.MolFromSmiles('CN(=O)=O', False) self.assertTrue(m) self.assertTrue(m.GetAtomWithIdx(1).GetFormalCharge()==0 and \ m.GetAtomWithIdx(2).GetFormalCharge()==0 and \ m.GetAtomWithIdx(3).GetFormalCharge()==0) self.assertTrue(m.GetBondBetweenAtoms(1,3).GetBondType()==Chem.BondType.DOUBLE and \ m.GetBondBetweenAtoms(1,2).GetBondType()==Chem.BondType.DOUBLE ) Chem.Cleanup(m) m.UpdatePropertyCache() self.assertTrue(m.GetAtomWithIdx(1).GetFormalCharge()==1 and \ (m.GetAtomWithIdx(2).GetFormalCharge()==-1 or \ m.GetAtomWithIdx(3).GetFormalCharge()==-1)) self.assertTrue(m.GetBondBetweenAtoms(1,3).GetBondType()==Chem.BondType.SINGLE or \ m.GetBondBetweenAtoms(1,2).GetBondType()==Chem.BondType.SINGLE ) def test65StreamSupplier(self): fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'FileParsers', 'test_data', 'NCI_aids_few.sdf.gz') molNames = [ "48", "78", "128", "163", "164", "170", "180", "186", "192", "203", "210", "211", "213", "220", "229", "256" ] inf = gzip.open(fileN) if 0: sb = Chem.streambuf(inf) suppl = Chem.ForwardSDMolSupplier(sb) else: suppl = Chem.ForwardSDMolSupplier(inf) i = 0 while not suppl.atEnd(): mol = next(suppl) self.assertTrue(mol) self.assertTrue(mol.GetProp("_Name") == molNames[i]) i += 1 self.assertEqual(i, 16) # make sure we have object ownership preserved inf = gzip.open(fileN) suppl = Chem.ForwardSDMolSupplier(inf) inf = None i = 0 while not suppl.atEnd(): mol = next(suppl) self.assertTrue(mol) self.assertTrue(mol.GetProp("_Name") == molNames[i]) i += 1 self.assertEqual(i, 16) def testMaeStreamSupplier(self): try: MaeMolSupplier = Chem.MaeMolSupplier except AttributeError: # Built without Maestro support, return w/o testing return fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'FileParsers', 'test_data', 'NCI_aids_few.maegz') molNames = [ "48", "78", "128", "163", "164", "170", "180", "186", "192", "203", "210", "211", "213", "220", "229", "256" ] inf = gzip.open(fileN) suppl = MaeMolSupplier(inf) i = 0 while not suppl.atEnd(): mol = next(suppl) self.assertTrue(mol) self.assertTrue(mol.GetProp("_Name") == molNames[i]) i += 1 self.assertEqual(i, 16) # make sure we have object ownership preserved inf = gzip.open(fileN) suppl = MaeMolSupplier(inf) inf = None i = 0 while not suppl.atEnd(): mol = next(suppl) self.assertTrue(mol) self.assertTrue(mol.GetProp("_Name") == molNames[i]) i += 1 self.assertEqual(i, 16) def testMaeFileSupplier(self): try: MaeMolSupplier = Chem.MaeMolSupplier except AttributeError: # Built without Maestro support, return w/o testing return fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'FileParsers', 'test_data', 'NCI_aids_few.mae') molNames = [ "48", "78", "128", "163", "164", "170", "180", "186", "192", "203", "210", "211", "213", "220", "229", "256" ] suppl = MaeMolSupplier(fileN) i = 0 while not suppl.atEnd(): mol = next(suppl) self.assertTrue(mol) self.assertTrue(mol.GetProp("_Name") == molNames[i]) i += 1 self.assertEqual(i, 16) def testMaeFileSupplierException(self): try: MaeMolSupplier = Chem.MaeMolSupplier except AttributeError: # Built without Maestro support, return w/o testing return fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'FileParsers', 'test_data', 'bad_ppty.mae') err_msg_substr = "Bad format for property"; ok = False suppl = MaeMolSupplier(fileN) for i in range(5): try: mol = next(suppl) except RuntimeError as e: self.assertEqual(i, 1) self.assertTrue(err_msg_substr in str(e)) ok = True break else: self.assertTrue(mol) self.assertTrue(mol.HasProp("_Name")) self.assertTrue(mol.GetNumAtoms() == 1) self.assertFalse(suppl.atEnd()) self.assertTrue(ok) def test66StreamSupplierIter(self): fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'FileParsers', 'test_data', 'NCI_aids_few.sdf.gz') inf = gzip.open(fileN) if 0: sb = Chem.streambuf(inf) suppl = Chem.ForwardSDMolSupplier(sb) else: suppl = Chem.ForwardSDMolSupplier(inf) molNames = [ "48", "78", "128", "163", "164", "170", "180", "186", "192", "203", "210", "211", "213", "220", "229", "256" ] i = 0 for mol in suppl: self.assertTrue(mol) self.assertTrue(mol.GetProp("_Name") == molNames[i]) i += 1 self.assertEqual(i, 16) def test67StreamSupplierStringIO(self): fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'FileParsers', 'test_data', 'NCI_aids_few.sdf.gz') from io import BytesIO sio = BytesIO(gzip.open(fileN).read()) suppl = Chem.ForwardSDMolSupplier(sio) molNames = [ "48", "78", "128", "163", "164", "170", "180", "186", "192", "203", "210", "211", "213", "220", "229", "256" ] i = 0 for mol in suppl: self.assertTrue(mol) self.assertTrue(mol.GetProp("_Name") == molNames[i]) i += 1 self.assertEqual(i, 16) def test68ForwardSupplierUsingFilename(self): fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'FileParsers', 'test_data', 'NCI_aids_few.sdf') suppl = Chem.ForwardSDMolSupplier(fileN) molNames = [ "48", "78", "128", "163", "164", "170", "180", "186", "192", "203", "210", "211", "213", "220", "229", "256" ] i = 0 for mol in suppl: self.assertTrue(mol) self.assertTrue(mol.GetProp("_Name") == molNames[i]) i += 1 self.assertEqual(i, 16) self.assertRaises(IOError, lambda: Chem.ForwardSDMolSupplier('nosuchfile.sdf')) def test69StreamSupplierStreambuf(self): fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'FileParsers', 'test_data', 'NCI_aids_few.sdf.gz') sb = rdBase.streambuf(gzip.open(fileN)) suppl = Chem.ForwardSDMolSupplier(sb) molNames = [ "48", "78", "128", "163", "164", "170", "180", "186", "192", "203", "210", "211", "213", "220", "229", "256" ] i = 0 for mol in suppl: self.assertTrue(mol) self.assertTrue(mol.GetProp("_Name") == molNames[i]) i += 1 self.assertEqual(i, 16) def test70StreamSDWriter(self): from io import BytesIO, StringIO fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'FileParsers', 'test_data', 'NCI_aids_few.sdf.gz') inf = gzip.open(fileN) suppl = Chem.ForwardSDMolSupplier(inf) osio = StringIO() w = Chem.SDWriter(osio) molNames = [ "48", "78", "128", "163", "164", "170", "180", "186", "192", "203", "210", "211", "213", "220", "229", "256" ] i = 0 for mol in suppl: self.assertTrue(mol) self.assertTrue(mol.GetProp("_Name") == molNames[i]) w.write(mol) i += 1 self.assertEqual(i, 16) w.flush() w = None txt = osio.getvalue().encode() isio = BytesIO(txt) suppl = Chem.ForwardSDMolSupplier(isio) i = 0 for mol in suppl: self.assertTrue(mol) self.assertTrue(mol.GetProp("_Name") == molNames[i]) i += 1 self.assertEqual(i, 16) def test71StreamSmilesWriter(self): from io import StringIO fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'FileParsers', 'test_data', 'esters.sdf') suppl = Chem.ForwardSDMolSupplier(fileN) osio = StringIO() w = Chem.SmilesWriter(osio) ms = [x for x in suppl] w.SetProps(ms[0].GetPropNames()) i = 0 for mol in ms: self.assertTrue(mol) w.write(mol) i += 1 self.assertEqual(i, 6) w.flush() w = None txt = osio.getvalue() self.assertEqual(txt.count('ID'), 1) self.assertEqual(txt.count('\n'), 7) def test72StreamTDTWriter(self): from io import StringIO fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'FileParsers', 'test_data', 'esters.sdf') suppl = Chem.ForwardSDMolSupplier(fileN) osio = StringIO() w = Chem.TDTWriter(osio) ms = [x for x in suppl] w.SetProps(ms[0].GetPropNames()) i = 0 for mol in ms: self.assertTrue(mol) w.write(mol) i += 1 self.assertEqual(i, 6) w.flush() w = None txt = osio.getvalue() self.assertEqual(txt.count('ID'), 6) self.assertEqual(txt.count('NAME'), 6) def test73SanitizationOptions(self): m = Chem.MolFromSmiles('c1ccccc1', sanitize=False) res = Chem.SanitizeMol(m, catchErrors=True) self.assertEqual(res, 0) m = Chem.MolFromSmiles('c1cccc1', sanitize=False) res = Chem.SanitizeMol(m, catchErrors=True) self.assertEqual(res, Chem.SanitizeFlags.SANITIZE_KEKULIZE) m = Chem.MolFromSmiles('CC(C)(C)(C)C', sanitize=False) res = Chem.SanitizeMol(m, catchErrors=True) self.assertEqual(res, Chem.SanitizeFlags.SANITIZE_PROPERTIES) m = Chem.MolFromSmiles('c1cccc1', sanitize=False) res = Chem.SanitizeMol( m, sanitizeOps=Chem.SanitizeFlags.SANITIZE_ALL ^ Chem.SanitizeFlags.SANITIZE_KEKULIZE, catchErrors=True) self.assertEqual(res, Chem.SanitizeFlags.SANITIZE_NONE) def test74Issue3510149(self): mol = Chem.MolFromSmiles("CCC1CNCC1CC") atoms = mol.GetAtoms() mol = None for atom in atoms: idx = atom.GetIdx() p = atom.GetOwningMol().GetNumAtoms() mol = Chem.MolFromSmiles("CCC1CNCC1CC") bonds = mol.GetBonds() mol = None for bond in bonds: idx = bond.GetIdx() p = atom.GetOwningMol().GetNumAtoms() mol = Chem.MolFromSmiles("CCC1CNCC1CC") bond = mol.GetBondBetweenAtoms(0, 1) mol = None idx = bond.GetBeginAtomIdx() p = bond.GetOwningMol().GetNumAtoms() fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'FileParsers', 'test_data', 'NCI_aids_few.sdf') sdSup = Chem.SDMolSupplier(fileN) mol = next(sdSup) nats = mol.GetNumAtoms() conf = mol.GetConformer() mol = None self.assertEqual(nats, conf.GetNumAtoms()) conf.GetOwningMol().GetProp("_Name") def test75AllBondsExplicit(self): m = Chem.MolFromSmiles("CCC") smi = Chem.MolToSmiles(m) self.assertEqual(smi, "CCC") smi = Chem.MolToSmiles(m, allBondsExplicit=True) self.assertEqual(smi, "C-C-C") m = Chem.MolFromSmiles("c1ccccc1") smi = Chem.MolToSmiles(m) self.assertEqual(smi, "c1ccccc1") smi = Chem.MolToSmiles(m, allBondsExplicit=True) self.assertEqual(smi, "c1:c:c:c:c:c:1") def test76VeryLargeMolecule(self): # this is sf.net issue 3524984 smi = '[C@H](F)(Cl)' + 'c1cc[nH]c1' * 500 + '[C@H](F)(Cl)' m = Chem.MolFromSmiles(smi) self.assertTrue(m) self.assertEqual(m.GetNumAtoms(), 2506) scs = Chem.FindMolChiralCenters(m) self.assertEqual(len(scs), 2) def test77MolFragmentToSmiles(self): smi = "OC1CC1CC" m = Chem.MolFromSmiles(smi) fsmi = Chem.MolFragmentToSmiles(m, [1, 2, 3]) self.assertEqual(fsmi, "C1CC1") fsmi = Chem.MolFragmentToSmiles(m, [1, 2, 3], bondsToUse=[1, 2, 5]) self.assertEqual(fsmi, "C1CC1") fsmi = Chem.MolFragmentToSmiles(m, [1, 2, 3], bondsToUse=[1, 2]) self.assertEqual(fsmi, "CCC") fsmi = Chem.MolFragmentToSmiles(m, [1, 2, 3], atomSymbols=["", "[A]", "[C]", "[B]", "", ""]) self.assertEqual(fsmi, "[A]1[B][C]1") fsmi = Chem.MolFragmentToSmiles(m, [1, 2, 3], bondSymbols=["", "%", "%", "", "", "%"]) self.assertEqual(fsmi, "C1%C%C%1") smi = "c1ccccc1C" m = Chem.MolFromSmiles(smi) fsmi = Chem.MolFragmentToSmiles(m, range(6)) self.assertEqual(fsmi, "c1ccccc1") Chem.Kekulize(m) fsmi = Chem.MolFragmentToSmiles(m, range(6), kekuleSmiles=True) self.assertEqual(fsmi, "C1=CC=CC=C1") fsmi = Chem.MolFragmentToSmiles(m, range(6), atomSymbols=["[C]"] * 7, kekuleSmiles=True) self.assertEqual(fsmi, "[C]1=[C][C]=[C][C]=[C]1") self.assertRaises(ValueError, lambda: Chem.MolFragmentToSmiles(m, [])) def test78AtomAndBondProps(self): m = Chem.MolFromSmiles('c1ccccc1') at = m.GetAtomWithIdx(0) self.assertFalse(at.HasProp('foo')) at.SetProp('foo', 'bar') self.assertTrue(at.HasProp('foo')) self.assertEqual(at.GetProp('foo'), 'bar') bond = m.GetBondWithIdx(0) self.assertFalse(bond.HasProp('foo')) bond.SetProp('foo', 'bar') self.assertTrue(bond.HasProp('foo')) self.assertEqual(bond.GetProp('foo'), 'bar') def test79AddRecursiveStructureQueries(self): qs = {'carbonyl': Chem.MolFromSmiles('CO'), 'amine': Chem.MolFromSmiles('CN')} q = Chem.MolFromSmiles('CCC') q.GetAtomWithIdx(0).SetProp('query', 'carbonyl,amine') Chem.MolAddRecursiveQueries(q, qs, 'query') m = Chem.MolFromSmiles('CCCO') self.assertTrue(m.HasSubstructMatch(q)) m = Chem.MolFromSmiles('CCCN') self.assertTrue(m.HasSubstructMatch(q)) m = Chem.MolFromSmiles('CCCC') self.assertFalse(m.HasSubstructMatch(q)) def test80ParseMolQueryDefFile(self): fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'ChemTransforms', 'testData', 'query_file1.txt') d = Chem.ParseMolQueryDefFile(fileN, standardize=False) self.assertTrue('CarboxylicAcid' in d) m = Chem.MolFromSmiles('CC(=O)O') self.assertTrue(m.HasSubstructMatch(d['CarboxylicAcid'])) self.assertFalse(m.HasSubstructMatch(d['CarboxylicAcid.Aromatic'])) d = Chem.ParseMolQueryDefFile(fileN) self.assertTrue('carboxylicacid' in d) self.assertFalse('CarboxylicAcid' in d) def test81Issue275(self): smi = Chem.MolToSmiles( Chem.MurckoDecompose(Chem.MolFromSmiles('CCCCC[C@H]1CC[C@H](C(=O)O)CC1'))) self.assertEqual(smi, 'C1CCCCC1') def test82Issue288(self): m = Chem.MolFromSmiles('CC*') m.GetAtomWithIdx(2).SetProp('molAtomMapNumber', '30') smi = Chem.MolToSmiles(m) self.assertEqual(smi, 'CC[*:30]') # try newer api m = Chem.MolFromSmiles('CC*') m.GetAtomWithIdx(2).SetAtomMapNum(30) smi = Chem.MolToSmiles(m) self.assertEqual(smi, 'CC[*:30]') def test83GitHubIssue19(self): fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'FileParsers', 'test_data', 'empty2.sdf') sdSup = Chem.SDMolSupplier(fileN) self.assertTrue(sdSup.atEnd()) self.assertRaises(IndexError, lambda: sdSup[0]) sdSup.SetData('') self.assertTrue(sdSup.atEnd()) self.assertRaises(IndexError, lambda: sdSup[0]) sdSup = Chem.SDMolSupplier(fileN) self.assertRaises(IndexError, lambda: sdSup[0]) sdSup.SetData('') self.assertRaises(IndexError, lambda: sdSup[0]) sdSup = Chem.SDMolSupplier(fileN) self.assertEqual(len(sdSup), 0) sdSup.SetData('') self.assertEqual(len(sdSup), 0) def test84PDBBasics(self): fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'FileParsers', 'test_data', '1CRN.pdb') m = Chem.MolFromPDBFile(fileN, proximityBonding=False) self.assertEqual(m.GetNumAtoms(), 327) self.assertEqual(m.GetNumBonds(), 3) m = Chem.MolFromPDBFile(fileN) self.assertTrue(m is not None) self.assertEqual(m.GetNumAtoms(), 327) self.assertEqual(m.GetNumBonds(), 337) self.assertTrue(m.GetAtomWithIdx(0).GetPDBResidueInfo()) self.assertEqual(m.GetAtomWithIdx(0).GetPDBResidueInfo().GetName(), " N ") self.assertEqual(m.GetAtomWithIdx(0).GetPDBResidueInfo().GetResidueName(), "THR") self.assertAlmostEqual(m.GetAtomWithIdx(0).GetPDBResidueInfo().GetTempFactor(), 13.79, 2) m = Chem.MolFromPDBBlock(Chem.MolToPDBBlock(m)) self.assertEqual(m.GetNumAtoms(), 327) self.assertEqual(m.GetNumBonds(), 337) self.assertTrue(m.GetAtomWithIdx(0).GetPDBResidueInfo()) self.assertEqual(m.GetAtomWithIdx(0).GetPDBResidueInfo().GetName(), " N ") self.assertEqual(m.GetAtomWithIdx(0).GetPDBResidueInfo().GetResidueName(), "THR") self.assertAlmostEqual(m.GetAtomWithIdx(0).GetPDBResidueInfo().GetTempFactor(), 13.79, 2) # test multivalent Hs fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'FileParsers', 'test_data', '2c92_hypervalentH.pdb') mol = Chem.MolFromPDBFile(fileN, sanitize=False, removeHs=False) atom = mol.GetAtomWithIdx(84) self.assertEqual(atom.GetAtomicNum(), 1) # is it H self.assertEqual(atom.GetDegree(), 1) # H should have 1 bond for n in atom.GetNeighbors(): # Check if neighbor is from the same residue self.assertEqual(atom.GetPDBResidueInfo().GetResidueName(), n.GetPDBResidueInfo().GetResidueName()) # test unbinding metals (ZN) fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'FileParsers', 'test_data', '1ps3_zn.pdb') mol = Chem.MolFromPDBFile(fileN, sanitize=False, removeHs=False) atom = mol.GetAtomWithIdx(40) self.assertEqual(atom.GetAtomicNum(), 30) # is it Zn self.assertEqual(atom.GetDegree(), 4) # Zn should have 4 zero-order bonds self.assertEqual(atom.GetExplicitValence(), 0) bonds_order = [bond.GetBondType() for bond in atom.GetBonds()] self.assertEqual(bonds_order, [Chem.BondType.ZERO] * atom.GetDegree()) # test metal bonds without proximity bonding mol = Chem.MolFromPDBFile(fileN, sanitize=False, removeHs=False, proximityBonding=False) atom = mol.GetAtomWithIdx(40) self.assertEqual(atom.GetAtomicNum(), 30) # is it Zn self.assertEqual(atom.GetDegree(), 4) # Zn should have 4 zero-order bonds self.assertEqual(atom.GetExplicitValence(), 0) bonds_order = [bond.GetBondType() for bond in atom.GetBonds()] self.assertEqual(bonds_order, [Chem.BondType.ZERO] * atom.GetDegree()) # test unbinding HOHs fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'FileParsers', 'test_data', '2vnf_bindedHOH.pdb') mol = Chem.MolFromPDBFile(fileN, sanitize=False, removeHs=False) atom = mol.GetAtomWithIdx(10) self.assertEqual(atom.GetPDBResidueInfo().GetResidueName(), 'HOH') self.assertEqual(atom.GetDegree(), 0) # HOH should have no bonds # test metal bonding in ligand fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'FileParsers', 'test_data', '2dej_APW.pdb') mol = Chem.MolFromPDBFile(fileN, sanitize=False, removeHs=False) atom = mol.GetAtomWithIdx(6) self.assertEqual(atom.GetAtomicNum(), 12) self.assertEqual(atom.GetDegree(), 2) atom = mol.GetAtomWithIdx(35) self.assertEqual(atom.GetPDBResidueInfo().GetResidueName(), 'HOH') self.assertEqual(atom.GetDegree(), 0) def test85AtomCopying(self): """Can a copied atom be added to a molecule?""" import copy m = Chem.MolFromSmiles('C1CC1') a = m.GetAtomWithIdx(0) a_copy1 = copy.copy(a) a_copy2 = Chem.Atom(a) m = None a = None def assert_is_valid_atom(a): new_m = Chem.RWMol() new_m.AddAtom(a) # This will not match if the owning mol is unset for a_copy, # or if there has been a clean up. self.assertEqual(new_m.GetAtomWithIdx(0).GetIdx(), 0) assert_is_valid_atom(a_copy1) assert_is_valid_atom(a_copy2) def test85MolCopying(self): m = Chem.MolFromSmiles('C1CC1[C@H](F)Cl') m.SetProp('foo', 'bar') m2 = Chem.Mol(m) self.assertEqual(Chem.MolToSmiles(m, True), Chem.MolToSmiles(m2, True)) self.assertTrue(m2.HasProp('foo')) self.assertEqual(m2.GetProp('foo'), 'bar') ri = m2.GetRingInfo() self.assertTrue(ri) self.assertEqual(ri.NumRings(), 1) def test85MolCopying2(self): import copy m1 = Chem.MolFromSmiles('CC') m1.SetProp('Foo', 'bar') m1.foo = [1] m2 = copy.copy(m1) m3 = copy.copy(m2) m4 = copy.deepcopy(m1) m5 = copy.deepcopy(m2) m6 = copy.deepcopy(m4) self.assertEqual(m1.GetProp('Foo'), 'bar') self.assertEqual(m2.GetProp('Foo'), 'bar') self.assertEqual(m3.GetProp('Foo'), 'bar') self.assertEqual(m4.GetProp('Foo'), 'bar') self.assertEqual(m5.GetProp('Foo'), 'bar') self.assertEqual(m6.GetProp('Foo'), 'bar') m2.foo.append(4) self.assertEqual(m1.foo, [1, 4]) self.assertEqual(m2.foo, [1, 4]) self.assertEqual(m3.foo, [1, 4]) self.assertEqual(m4.foo, [1]) self.assertEqual(m5.foo, [1]) self.assertEqual(m6.foo, [1]) m7 = Chem.RWMol(m1) self.assertFalse(hasattr(m7, 'foo')) m7.foo = [1] m8 = copy.copy(m7) m9 = copy.deepcopy(m7) m8.foo.append(4) self.assertEqual(m7.GetProp('Foo'), 'bar') self.assertEqual(m8.GetProp('Foo'), 'bar') self.assertEqual(m9.GetProp('Foo'), 'bar') self.assertEqual(m8.foo, [1, 4]) self.assertEqual(m9.foo, [1]) def test86MolRenumbering(self): import random m = Chem.MolFromSmiles('C[C@H]1CC[C@H](C/C=C/[C@H](F)Cl)CC1') cSmi = Chem.MolToSmiles(m, True) for i in range(m.GetNumAtoms()): ans = list(range(m.GetNumAtoms())) random.shuffle(ans) m2 = Chem.RenumberAtoms(m, ans) nSmi = Chem.MolToSmiles(m2, True) self.assertEqual(cSmi, nSmi) def test87FragmentOnBonds(self): m = Chem.MolFromSmiles('CC1CC(O)C1CCC1CC1') bis = m.GetSubstructMatches(Chem.MolFromSmarts('[!R][R]')) bs = [] labels = [] for bi in bis: b = m.GetBondBetweenAtoms(bi[0], bi[1]) if b.GetBeginAtomIdx() == bi[0]: labels.append((10, 1)) else: labels.append((1, 10)) bs.append(b.GetIdx()) nm = Chem.FragmentOnBonds(m, bs) frags = Chem.GetMolFrags(nm) self.assertEqual(len(frags), 5) self.assertEqual(frags, ((0, 12), (1, 2, 3, 5, 11, 14, 16), (4, 13), (6, 7, 15, 18), (8, 9, 10, 17))) smi = Chem.MolToSmiles(nm, True) self.assertEqual(smi, '*C1CC([4*])C1[6*].[1*]C.[3*]O.[5*]CC[8*].[7*]C1CC1') nm = Chem.FragmentOnBonds(m, bs, dummyLabels=labels) frags = Chem.GetMolFrags(nm) self.assertEqual(len(frags), 5) self.assertEqual(frags, ((0, 12), (1, 2, 3, 5, 11, 14, 16), (4, 13), (6, 7, 15, 18), (8, 9, 10, 17))) smi = Chem.MolToSmiles(nm, True) self.assertEqual(smi, '[1*]C.[1*]CC[1*].[1*]O.[10*]C1CC([10*])C1[10*].[10*]C1CC1') m = Chem.MolFromSmiles('CCC(=O)CC(=O)C') bis = m.GetSubstructMatches(Chem.MolFromSmarts('C=O')) bs = [] for bi in bis: b = m.GetBondBetweenAtoms(bi[0], bi[1]) bs.append(b.GetIdx()) bts = [Chem.BondType.DOUBLE] * len(bs) nm = Chem.FragmentOnBonds(m, bs, bondTypes=bts) frags = Chem.GetMolFrags(nm) self.assertEqual(len(frags), 3) smi = Chem.MolToSmiles(nm, True) self.assertEqual(smi, '[2*]=O.[3*]=C(CC)CC(=[6*])C.[5*]=O') # github issue 430: m = Chem.MolFromSmiles('OCCCCN') self.assertRaises(ValueError, lambda: Chem.FragmentOnBonds(m, ())) def test88QueryAtoms(self): from rdkit.Chem import rdqueries m = Chem.MolFromSmiles('c1nc(C)n(CC)c1') qa = rdqueries.ExplicitDegreeEqualsQueryAtom(3) l = tuple([x.GetIdx() for x in m.GetAtomsMatchingQuery(qa)]) self.assertEqual(l, (2, 4)) qa.ExpandQuery(rdqueries.AtomNumEqualsQueryAtom(6, negate=True)) l = tuple([x.GetIdx() for x in m.GetAtomsMatchingQuery(qa)]) self.assertEqual(l, (4, )) qa = rdqueries.ExplicitDegreeEqualsQueryAtom(3) qa.ExpandQuery( rdqueries.AtomNumEqualsQueryAtom(6, negate=True), how=Chem.CompositeQueryType.COMPOSITE_OR) l = tuple([x.GetIdx() for x in m.GetAtomsMatchingQuery(qa)]) self.assertEqual(l, (1, 2, 4)) qa = rdqueries.ExplicitDegreeEqualsQueryAtom(3) qa.ExpandQuery( rdqueries.AtomNumEqualsQueryAtom(6, negate=True), how=Chem.CompositeQueryType.COMPOSITE_XOR) l = tuple([x.GetIdx() for x in m.GetAtomsMatchingQuery(qa)]) self.assertEqual(l, (1, 2)) qa = rdqueries.ExplicitDegreeGreaterQueryAtom(2) l = tuple([x.GetIdx() for x in m.GetAtomsMatchingQuery(qa)]) self.assertEqual(l, (2, 4)) qa = rdqueries.ExplicitDegreeLessQueryAtom(2) l = tuple([x.GetIdx() for x in m.GetAtomsMatchingQuery(qa)]) self.assertEqual(l, (3, 6)) m = Chem.MolFromSmiles('N[CH][CH]') qa = rdqueries.NumRadicalElectronsGreaterQueryAtom(0) l = tuple([x.GetIdx() for x in m.GetAtomsMatchingQuery(qa)]) self.assertEqual(l, (1, 2)) qa = rdqueries.NumRadicalElectronsGreaterQueryAtom(1) l = tuple([x.GetIdx() for x in m.GetAtomsMatchingQuery(qa)]) self.assertEqual(l, (2, )) m = Chem.MolFromSmiles('F[C@H](Cl)C') qa = rdqueries.HasChiralTagQueryAtom() l = tuple([x.GetIdx() for x in m.GetAtomsMatchingQuery(qa)]) self.assertEqual(l, (1, )) qa = rdqueries.MissingChiralTagQueryAtom() l = tuple([x.GetIdx() for x in m.GetAtomsMatchingQuery(qa)]) self.assertEqual(l, ()) m = Chem.MolFromSmiles('F[CH](Cl)C') qa = rdqueries.HasChiralTagQueryAtom() l = tuple([x.GetIdx() for x in m.GetAtomsMatchingQuery(qa)]) self.assertEqual(l, ()) qa = rdqueries.MissingChiralTagQueryAtom() l = tuple([x.GetIdx() for x in m.GetAtomsMatchingQuery(qa)]) self.assertEqual(l, (1, )) m = Chem.MolFromSmiles('CNCON') qa = rdqueries.NumHeteroatomNeighborsEqualsQueryAtom(2) l = tuple([x.GetIdx() for x in m.GetAtomsMatchingQuery(qa)]) self.assertEqual(l, (2, )) qa = rdqueries.NumHeteroatomNeighborsGreaterQueryAtom(0) l = tuple([x.GetIdx() for x in m.GetAtomsMatchingQuery(qa)]) self.assertEqual(l, (0, 2, 3, 4)) def test89UnicodeInput(self): m = Chem.MolFromSmiles(u'c1ccccc1') self.assertTrue(m is not None) self.assertEqual(m.GetNumAtoms(), 6) m = Chem.MolFromSmarts(u'c1ccccc1') self.assertTrue(m is not None) self.assertEqual(m.GetNumAtoms(), 6) def test90FragmentOnSomeBonds(self): m = Chem.MolFromSmiles('OCCCCN') pieces = Chem.FragmentOnSomeBonds(m, (0, 2, 4), 2) self.assertEqual(len(pieces), 3) frags = Chem.GetMolFrags(pieces[0]) self.assertEqual(len(frags), 3) self.assertEqual(len(frags[0]), 2) self.assertEqual(len(frags[1]), 4) self.assertEqual(len(frags[2]), 4) frags = Chem.GetMolFrags(pieces[1]) self.assertEqual(len(frags), 3) self.assertEqual(len(frags[0]), 2) self.assertEqual(len(frags[1]), 6) self.assertEqual(len(frags[2]), 2) frags = Chem.GetMolFrags(pieces[2]) self.assertEqual(len(frags), 3) self.assertEqual(len(frags[0]), 4) self.assertEqual(len(frags[1]), 4) self.assertEqual(len(frags[2]), 2) pieces, cpa = Chem.FragmentOnSomeBonds(m, (0, 2, 4), 2, returnCutsPerAtom=True) self.assertEqual(len(pieces), 3) self.assertEqual(len(cpa), 3) self.assertEqual(len(cpa[0]), m.GetNumAtoms()) # github issue 430: m = Chem.MolFromSmiles('OCCCCN') self.assertRaises(ValueError, lambda: Chem.FragmentOnSomeBonds(m, ())) pieces = Chem.FragmentOnSomeBonds(m, (0, 2, 4), 0) self.assertEqual(len(pieces), 0) def test91RankAtoms(self): m = Chem.MolFromSmiles('ONCS.ONCS') ranks = Chem.CanonicalRankAtoms(m, breakTies=False) self.assertEqual(list(ranks[0:4]), list(ranks[4:])) m = Chem.MolFromSmiles("c1ccccc1") ranks = Chem.CanonicalRankAtoms(m, breakTies=False) for x in ranks: self.assertEqual(x, 0) m = Chem.MolFromSmiles("C1NCN1") ranks = Chem.CanonicalRankAtoms(m, breakTies=False) self.assertEqual(ranks[0], ranks[2]) self.assertEqual(ranks[1], ranks[3]) def test92RankAtomsInFragment(self): m = Chem.MolFromSmiles('ONCS.ONCS') ranks = Chem.CanonicalRankAtomsInFragment(m, [0, 1, 2, 3], [0, 1, 2]) ranks2 = Chem.CanonicalRankAtomsInFragment(m, [4, 5, 6, 7], [3, 4, 5]) self.assertEqual(list(ranks[0:4]), list(ranks2[4:])) self.assertEqual(list(ranks[4:]), [-1] * 4) self.assertEqual(list(ranks2[0:4]), [-1] * 4) # doc tests mol = Chem.MolFromSmiles('C1NCN1.C1NCN1') self.assertEqual( list(Chem.CanonicalRankAtomsInFragment(mol, atomsToUse=range(0, 4), breakTies=False)), [4, 6, 4, 6, -1, -1, -1, -1]) self.assertNotEqual( list(Chem.CanonicalRankAtomsInFragment(mol, atomsToUse=range(0, 4), breakTies=True)), [4, 6, 4, 6, -1, -1, -1, -1]) self.assertEqual( list(Chem.CanonicalRankAtomsInFragment(mol, atomsToUse=range(4, 8), breakTies=False)), [-1, -1, -1, -1, 4, 6, 4, 6]) self.assertNotEqual( list(Chem.CanonicalRankAtomsInFragment(mol, atomsToUse=range(4, 8), breakTies=True)), [-1, -1, -1, -1, 4, 6, 4, 6]) def test93RWMolsAsROMol(self): """ test the RWMol class as a proper ROMol """ mol = Chem.MolFromSmiles('C1CCC1') self.assertTrue(type(mol) == Chem.Mol) rwmol = Chem.RWMol(mol) self.assertEqual(Chem.MolToSmiles(rwmol, True), Chem.MolToSmiles(rwmol.GetMol())) newAt = Chem.Atom(8) rwmol.ReplaceAtom(0, newAt) self.assertEqual(Chem.MolToSmiles(rwmol, True), Chem.MolToSmiles(rwmol.GetMol())) def test94CopyWithConfs(self): """ test copying Mols with some conformers """ fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'FileParsers', 'test_data', 'cmpd2.tpl') m1 = Chem.MolFromTPLFile(fileN) self.assertTrue(m1 is not None) self.assertEqual(m1.GetNumAtoms(), 12) self.assertEqual(m1.GetNumConformers(), 2) self.assertEqual(m1.GetConformer(0).GetNumAtoms(), 12) self.assertEqual(m1.GetConformer(1).GetNumAtoms(), 12) m2 = Chem.Mol(m1) self.assertEqual(m2.GetNumAtoms(), 12) self.assertEqual(m2.GetNumConformers(), 2) self.assertEqual(m2.GetConformer(0).GetNumAtoms(), 12) self.assertEqual(m2.GetConformer(1).GetNumAtoms(), 12) m2 = Chem.Mol(m1, False, 0) self.assertEqual(m2.GetNumAtoms(), 12) self.assertEqual(m2.GetNumConformers(), 1) self.assertEqual(m2.GetConformer(0).GetNumAtoms(), 12) m2 = Chem.Mol(m1, False, 1) self.assertEqual(m2.GetNumAtoms(), 12) self.assertEqual(m2.GetNumConformers(), 1) self.assertEqual(m2.GetConformer(1).GetNumAtoms(), 12) m2 = Chem.Mol(m1, True) self.assertTrue(m2.GetNumAtoms() == 12) self.assertTrue(m2.GetNumConformers() == 0) m2 = Chem.RWMol(m1) self.assertEqual(m2.GetNumAtoms(), 12) self.assertEqual(m2.GetNumConformers(), 2) self.assertEqual(m2.GetConformer(0).GetNumAtoms(), 12) self.assertEqual(m2.GetConformer(1).GetNumAtoms(), 12) m2 = Chem.RWMol(m1, False, 0) self.assertEqual(m2.GetNumAtoms(), 12) self.assertEqual(m2.GetNumConformers(), 1) self.assertEqual(m2.GetConformer(0).GetNumAtoms(), 12) m2 = Chem.RWMol(m1, False, 1) self.assertEqual(m2.GetNumAtoms(), 12) self.assertEqual(m2.GetNumConformers(), 1) self.assertEqual(m2.GetConformer(1).GetNumAtoms(), 12) m2 = Chem.RWMol(m1, True) self.assertTrue(m2.GetNumAtoms() == 12) self.assertTrue(m2.GetNumConformers() == 0) def testAtomPropQueries(self): """ test the property queries """ from rdkit.Chem import rdqueries m = Chem.MolFromSmiles("C" * 14) atoms = m.GetAtoms() atoms[0].SetProp("hah", "hah") atoms[1].SetIntProp("bar", 1) atoms[2].SetIntProp("bar", 2) atoms[3].SetBoolProp("baz", True) atoms[4].SetBoolProp("baz", False) atoms[5].SetProp("boo", "hoo") atoms[6].SetProp("boo", "-urns") atoms[7].SetDoubleProp("boot", 1.0) atoms[8].SetDoubleProp("boot", 4.0) atoms[9].SetDoubleProp("number", 4.0) atoms[10].SetIntProp("number", 4) tests = ((rdqueries.HasIntPropWithValueQueryAtom, "bar", { 1: [1], 2: [2] }), (rdqueries.HasBoolPropWithValueQueryAtom, "baz", { True: [3], False: [4] }), (rdqueries.HasStringPropWithValueQueryAtom, "boo", { "hoo": [5], "-urns": [6] }), (rdqueries.HasDoublePropWithValueQueryAtom, "boot", { 1.0: [7], 4.0: [8] })) for query, name, lookups in tests: for t, v in lookups.items(): q = query(name, t) self.assertEqual(v, [x.GetIdx() for x in m.GetAtomsMatchingQuery(q)]) q = query(name, t, negate=True) self.assertEqual( sorted(set(range(14)) - set(v)), [x.GetIdx() for x in m.GetAtomsMatchingQuery(q)]) # check tolerances self.assertEqual([ x.GetIdx() for x in m.GetAtomsMatchingQuery( rdqueries.HasDoublePropWithValueQueryAtom("boot", 1.0, tolerance=3.)) ], [7, 8]) # numbers are numbers?, i.e. int!=double self.assertEqual([ x.GetIdx() for x in m.GetAtomsMatchingQuery(rdqueries.HasIntPropWithValueQueryAtom("number", 4)) ], [10]) def testBondPropQueries(self): """ test the property queries """ from rdkit.Chem import rdqueries m = Chem.MolFromSmiles("C" * 14) bonds = m.GetBonds() bonds[0].SetProp("hah", "hah") bonds[1].SetIntProp("bar", 1) bonds[2].SetIntProp("bar", 2) bonds[3].SetBoolProp("baz", True) bonds[4].SetBoolProp("baz", False) bonds[5].SetProp("boo", "hoo") bonds[6].SetProp("boo", "-urns") bonds[7].SetDoubleProp("boot", 1.0) bonds[8].SetDoubleProp("boot", 4.0) bonds[9].SetDoubleProp("number", 4.0) bonds[10].SetIntProp("number", 4) tests = ((rdqueries.HasIntPropWithValueQueryBond, "bar", { 1: [1], 2: [2] }), (rdqueries.HasBoolPropWithValueQueryBond, "baz", { True: [3], False: [4] }), (rdqueries.HasStringPropWithValueQueryBond, "boo", { "hoo": [5], "-urns": [6] }), (rdqueries.HasDoublePropWithValueQueryBond, "boot", { 1.0: [7], 4.0: [8] })) for query, name, lookups in tests: for t, v in lookups.items(): q = query(name, t) self.assertEqual(v, [x.GetIdx() for x in m.GetBonds() if q.Match(x)]) q = query(name, t, negate=True) self.assertEqual( sorted(set(range(13)) - set(v)), [x.GetIdx() for x in m.GetBonds() if q.Match(x)]) # check tolerances q = rdqueries.HasDoublePropWithValueQueryBond("boot", 1.0, tolerance=3.) self.assertEqual([x.GetIdx() for x in m.GetBonds() if q.Match(x)], [7, 8]) # numbers are numbers?, i.e. int!=double q = rdqueries.HasIntPropWithValueQueryBond("number", 4) self.assertEqual([x.GetIdx() for x in m.GetBonds() if q.Match(x)], [10]) def testGetShortestPath(self): """ test the GetShortestPath() wrapper """ smi = "CC(OC1C(CCCC3)C3C(CCCC2)C2C1OC(C)=O)=O" m = Chem.MolFromSmiles(smi) path = Chem.GetShortestPath(m, 1, 20) self.assertEqual(path, (1, 2, 3, 16, 17, 18, 20)) def testGithub497(self): with tempfile.TemporaryFile() as tmp, gzip.open(tmp) as outf: with self.assertRaises(ValueError): w = Chem.SDWriter(outf) def testGithub498(self): if (sys.version_info < (3, 0)): mode = 'w+' else: mode = 'wt+' m = Chem.MolFromSmiles('C') with tempfile.NamedTemporaryFile() as tmp, gzip.open(tmp, mode) as outf: w = Chem.SDWriter(outf) w.write(m) w.close() def testReplaceBond(self): origmol = Chem.RWMol(Chem.MolFromSmiles("CC")) bonds = list(origmol.GetBonds()) self.assertEqual(len(bonds), 1) singlebond = bonds[0] self.assertEqual(singlebond.GetBondType(), Chem.BondType.SINGLE) # this is the only way we create a bond, is take it from another molecule doublebonded = Chem.MolFromSmiles("C=C") doublebond = list(doublebonded.GetBonds())[0] # make sure replacing the bond changes the smiles self.assertEqual(Chem.MolToSmiles(origmol), "CC") origmol.ReplaceBond(singlebond.GetIdx(), doublebond) Chem.SanitizeMol(origmol) self.assertEqual(Chem.MolToSmiles(origmol), "C=C") def testAdjustQueryProperties(self): m = Chem.MolFromSmarts('C1CCC1*') am = Chem.AdjustQueryProperties(m) self.assertTrue(Chem.MolFromSmiles('C1CCC1C').HasSubstructMatch(m)) self.assertTrue(Chem.MolFromSmiles('C1CCC1C').HasSubstructMatch(am)) self.assertTrue(Chem.MolFromSmiles('C1CC(C)C1C').HasSubstructMatch(m)) self.assertFalse(Chem.MolFromSmiles('C1CC(C)C1C').HasSubstructMatch(am)) m = Chem.MolFromSmiles('C1CCC1*') am = Chem.AdjustQueryProperties(m) self.assertFalse(Chem.MolFromSmiles('C1CCC1C').HasSubstructMatch(m)) self.assertTrue(Chem.MolFromSmiles('C1CCC1C').HasSubstructMatch(am)) qps = Chem.AdjustQueryParameters() qps.makeDummiesQueries = False am = Chem.AdjustQueryProperties(m, qps) self.assertFalse(Chem.MolFromSmiles('C1CCC1C').HasSubstructMatch(am)) m = Chem.MolFromSmiles('C1=CC=CC=C1', sanitize=False) am = Chem.AdjustQueryProperties(m) self.assertTrue(Chem.MolFromSmiles('c1ccccc1').HasSubstructMatch(am)) qp = Chem.AdjustQueryParameters() qp.aromatizeIfPossible = False am = Chem.AdjustQueryProperties(m, qp) self.assertFalse(Chem.MolFromSmiles('c1ccccc1').HasSubstructMatch(am)) m = Chem.MolFromSmiles('C1CCC1OC') qps = Chem.AdjustQueryParameters() qps.makeAtomsGeneric = True am = Chem.AdjustQueryProperties(m, qps) self.assertEqual(Chem.MolToSmarts(am), '*1-*-*-*-1-*-*') qps.makeAtomsGenericFlags = Chem.ADJUST_IGNORERINGS am = Chem.AdjustQueryProperties(m, qps) self.assertEqual(Chem.MolToSmarts(am), '[#6&D2]1-[#6&D2]-[#6&D2]-[#6&D3]-1-*-*') qps = Chem.AdjustQueryParameters() qps.makeBondsGeneric = True am = Chem.AdjustQueryProperties(m, qps) self.assertEqual(Chem.MolToSmarts(am), '[#6&D2]1~[#6&D2]~[#6&D2]~[#6&D3]~1~[#8]~[#6]') qps.makeBondsGenericFlags = Chem.ADJUST_IGNORERINGS am = Chem.AdjustQueryProperties(m, qps) self.assertEqual(Chem.MolToSmarts(am), '[#6&D2]1-[#6&D2]-[#6&D2]-[#6&D3]-1~[#8]~[#6]') def testMolFragmentSmarts(self): m = Chem.MolFromSmiles('C1CCC1OC') self.assertEqual(Chem.MolFragmentToSmarts(m, [0, 1, 2]), '[#6]-[#6]-[#6]') # if bondsToUse is honored, the ring won't show up self.assertEqual(Chem.MolFragmentToSmarts(m, [0, 1, 2, 3], bondsToUse=[0, 1, 2, 3]), '[#6]-[#6]-[#6]-[#6]') # Does MolFragmentToSmarts accept output of AdjustQueryProperties? qps = Chem.AdjustQueryParameters() qps.makeAtomsGeneric = True am = Chem.AdjustQueryProperties(m, qps) self.assertEqual(Chem.MolFragmentToSmarts(am, [0, 1, 2]), '*-*-*') def testAdjustQueryPropertiesgithubIssue1474(self): core = Chem.MolFromSmiles('[*:1]C1N([*:2])C([*:3])O1') core.GetAtomWithIdx(0).SetProp('foo', 'bar') core.GetAtomWithIdx(1).SetProp('foo', 'bar') ap = Chem.AdjustQueryProperties(core) self.assertEqual(ap.GetAtomWithIdx(0).GetPropsAsDict()["foo"], "bar") self.assertEqual(ap.GetAtomWithIdx(1).GetPropsAsDict()["foo"], "bar") def testGithubIssue579(self): fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'FileParsers', 'test_data', 'NCI_aids_few.sdf.gz') inf = gzip.open(fileN) suppl = Chem.ForwardSDMolSupplier(inf) m0 = next(suppl) self.assertIsNot(m0, None) inf.close() del suppl def testSequenceBasics(self): " very basic round-tripping of the sequence reader/writer support " helm = 'PEPTIDE1{C.Y.I.Q.N.C.P.L.G}$$$$' seq = 'CYIQNCPLG' fasta = '>\nCYIQNCPLG\n' smi = 'CC[C@H](C)[C@H](NC(=O)[C@H](Cc1ccc(O)cc1)NC(=O)[C@@H](N)CS)C(=O)N[C@@H](CCC(N)=O)C(=O)N[C@@H](CC(N)=O)C(=O)N[C@@H](CS)C(=O)N1CCC[C@H]1C(=O)N[C@@H](CC(C)C)C(=O)NCC(=O)O' m = Chem.MolFromSequence(seq) self.assertTrue(m is not None) self.assertEqual(Chem.MolToSequence(m), seq) self.assertEqual(Chem.MolToHELM(m), helm) self.assertEqual(Chem.MolToFASTA(m), fasta) self.assertEqual(Chem.MolToSmiles(m, isomericSmiles=True), smi) m = Chem.MolFromHELM(helm) self.assertTrue(m is not None) self.assertEqual(Chem.MolToSequence(m), seq) self.assertEqual(Chem.MolToHELM(m), helm) self.assertEqual(Chem.MolToFASTA(m), fasta) self.assertEqual(Chem.MolToSmiles(m, isomericSmiles=True), smi) m = Chem.MolFromFASTA(fasta) self.assertTrue(m is not None) self.assertEqual(Chem.MolToSequence(m), seq) self.assertEqual(Chem.MolToHELM(m), helm) self.assertEqual(Chem.MolToFASTA(m), fasta) self.assertEqual(Chem.MolToSmiles(m, isomericSmiles=True), smi) seq = "CGCGAATTACCGCG" m = Chem.MolFromSequence(seq, flavor=6) # DNA self.assertEqual(Chem.MolToSequence(m), 'CGCGAATTACCGCG') self.assertEqual( Chem.MolToHELM(m), 'RNA1{[dR](C)P.[dR](G)P.[dR](C)P.[dR](G)P.[dR](A)P.[dR](A)P.[dR](T)P.[dR](T)P.[dR](A)P.[dR](C)P.[dR](C)P.[dR](G)P.[dR](C)P.[dR](G)}$$$$' ) seq = "CGCGAAUUACCGCG" m = Chem.MolFromSequence(seq, flavor=2) # RNA self.assertEqual(Chem.MolToSequence(m), 'CGCGAAUUACCGCG') self.assertEqual( Chem.MolToHELM(m), 'RNA1{R(C)P.R(G)P.R(C)P.R(G)P.R(A)P.R(A)P.R(U)P.R(U)P.R(A)P.R(C)P.R(C)P.R(G)P.R(C)P.R(G)}$$$$' ) m = Chem.MolFromSequence(seq, flavor=3) # RNA - 5' cap self.assertEqual(Chem.MolToSequence(m), 'CGCGAAUUACCGCG') self.assertEqual( Chem.MolToHELM(m), 'RNA1{P.R(C)P.R(G)P.R(C)P.R(G)P.R(A)P.R(A)P.R(U)P.R(U)P.R(A)P.R(C)P.R(C)P.R(G)P.R(C)P.R(G)}$$$$' ) def testResMolSupplier(self): mol = Chem.MolFromSmiles('CC') resMolSuppl = Chem.ResonanceMolSupplier(mol) del resMolSuppl resMolSuppl = Chem.ResonanceMolSupplier(mol) self.assertEqual(resMolSuppl.GetNumConjGrps(), 0) self.assertEqual(len(resMolSuppl), 1) self.assertEqual(resMolSuppl.GetNumConjGrps(), 0) mol = Chem.MolFromSmiles('NC(=[NH2+])c1ccc(cc1)C(=O)[O-]') totalFormalCharge = getTotalFormalCharge(mol) resMolSuppl = Chem.ResonanceMolSupplier(mol) self.assertFalse(resMolSuppl.GetIsEnumerated()) self.assertEqual(len(resMolSuppl), 4) self.assertTrue(resMolSuppl.GetIsEnumerated()) resMolSuppl = Chem.ResonanceMolSupplier(mol) self.assertFalse(resMolSuppl.GetIsEnumerated()) resMolSuppl.Enumerate() self.assertTrue(resMolSuppl.GetIsEnumerated()) self.assertTrue((resMolSuppl[0].GetBondBetweenAtoms(0, 1).GetBondType() \ != resMolSuppl[1].GetBondBetweenAtoms(0, 1).GetBondType()) or (resMolSuppl[0].GetBondBetweenAtoms(9, 10).GetBondType() \ != resMolSuppl[1].GetBondBetweenAtoms(9, 10).GetBondType())) resMolSuppl = Chem.ResonanceMolSupplier(mol, Chem.KEKULE_ALL) self.assertEqual(len(resMolSuppl), 8) bondTypeSet = set() # check that we actually have two alternate Kekule structures bondTypeSet.add(resMolSuppl[0].GetBondBetweenAtoms(3, 4).GetBondType()) bondTypeSet.add(resMolSuppl[1].GetBondBetweenAtoms(3, 4).GetBondType()) self.assertEqual(len(bondTypeSet), 2) bondTypeDict = {} resMolSuppl = Chem.ResonanceMolSupplier(mol, Chem.ALLOW_INCOMPLETE_OCTETS \ | Chem.UNCONSTRAINED_CATIONS \ | Chem.UNCONSTRAINED_ANIONS) self.assertEqual(len(resMolSuppl), 32) for i in range(len(resMolSuppl)): resMol = resMolSuppl[i] self.assertEqual(getTotalFormalCharge(resMol), totalFormalCharge) while (not resMolSuppl.atEnd()): resMol = next(resMolSuppl) self.assertEqual(getTotalFormalCharge(resMol), totalFormalCharge) resMolSuppl.reset() cmpFormalChargeBondOrder(self, resMolSuppl[0], next(resMolSuppl)) resMolSuppl = Chem.ResonanceMolSupplier(mol, Chem.ALLOW_INCOMPLETE_OCTETS \ | Chem.UNCONSTRAINED_CATIONS \ | Chem.UNCONSTRAINED_ANIONS, 10) self.assertEqual(len(resMolSuppl), 10) crambinPdb = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'FileParsers', 'test_data', '1CRN.pdb') mol = Chem.MolFromPDBFile(crambinPdb) resMolSuppl = Chem.ResonanceMolSupplier(mol) self.assertEqual(len(resMolSuppl), 1) resMolSuppl = Chem.ResonanceMolSupplier(mol, Chem.KEKULE_ALL) self.assertEqual(len(resMolSuppl), 8) def testSubstructMatchAcetate(self): mol = Chem.MolFromSmiles('CC(=O)[O-]') query = Chem.MolFromSmarts('C(=O)[O-]') resMolSuppl = Chem.ResonanceMolSupplier(mol) matches = mol.GetSubstructMatches(query) self.assertEqual(len(matches), 1) self.assertEqual(matches, ((1, 2, 3), )) matches = mol.GetSubstructMatches(query, uniquify=True) self.assertEqual(len(matches), 1) self.assertEqual(matches, ((1, 2, 3), )) matches = mol.GetSubstructMatches(query, uniquify=False) self.assertEqual(len(matches), 1) self.assertEqual(matches, ((1, 2, 3), )) matches = resMolSuppl.GetSubstructMatches(query) self.assertEqual(len(matches), 2) self.assertEqual(matches, ((1, 2, 3), (1, 3, 2))) matches = resMolSuppl.GetSubstructMatches(query, uniquify=True) self.assertEqual(len(matches), 1) self.assertEqual(matches, ((1, 2, 3), )) matches = resMolSuppl.GetSubstructMatches(query, uniquify=False) self.assertEqual(len(matches), 2) self.assertEqual(matches, ((1, 2, 3), (1, 3, 2))) query = Chem.MolFromSmarts('C(~O)~O') matches = mol.GetSubstructMatches(query, uniquify=False) self.assertEqual(len(matches), 2) self.assertEqual(matches, ((1, 2, 3), (1, 3, 2))) matches = mol.GetSubstructMatches(query, uniquify=True) self.assertEqual(len(matches), 1) self.assertEqual(matches, ((1, 2, 3), )) matches = resMolSuppl.GetSubstructMatches(query, uniquify=False) self.assertEqual(len(matches), 2) self.assertEqual(matches, ((1, 2, 3), (1, 3, 2))) matches = resMolSuppl.GetSubstructMatches(query, uniquify=True) self.assertEqual(len(matches), 1) self.assertEqual(matches, ((1, 2, 3), )) def testSubstructMatchDMAP(self): mol = Chem.MolFromSmiles('C(C)Nc1cc[nH+]cc1') query = Chem.MolFromSmarts('[#7+]') resMolSuppl = Chem.ResonanceMolSupplier(mol) matches = mol.GetSubstructMatches(query, False, False, False) self.assertEqual(len(matches), 1) p = matches[0] self.assertEqual(p[0], 6) matches = resMolSuppl.GetSubstructMatches(query, False, False, False) self.assertEqual(len(matches), 2) v = [] p = matches[0] v.append(p[0]) p = matches[1] v.append(p[0]) v.sort() self.assertEqual(v[0], 2) self.assertEqual(v[1], 6) def testCrambin(self): crambinPdb = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'FileParsers', 'test_data', '1CRN.pdb') crambin = Chem.MolFromPDBFile(crambinPdb) res = [] # protonate NH2 res.append(Chem.MolFromSmarts('[Nh2][Ch;Ch2]')) # protonate Arg res.append(Chem.MolFromSmarts('[Nh][C]([Nh2])=[Nh]')) setResidueFormalCharge(crambin, res, 1) res = [] # deprotonate COOH res.append(Chem.MolFromSmarts('C(=O)[Oh]')) setResidueFormalCharge(crambin, res, -1) res = [] resMolSupplST = Chem.ResonanceMolSupplier(crambin) # crambin has 2 Arg (3 resonance structures each); 1 Asp, 1 Glu # and 1 terminal COO- (2 resonance structures each) # so possible resonance structures are 3^2 * 2^3 = 72 self.assertEqual(len(resMolSupplST), 72) self.assertEqual(resMolSupplST.GetNumConjGrps(), 56) carboxylateQuery = Chem.MolFromSmarts('C(=O)[O-]') guanidiniumQuery = Chem.MolFromSmarts('NC(=[NH2+])N') matches = crambin.GetSubstructMatches(carboxylateQuery) self.assertEqual(len(matches), 3) matches = crambin.GetSubstructMatches(carboxylateQuery, uniquify=False) self.assertEqual(len(matches), 3) matches = crambin.GetSubstructMatches(guanidiniumQuery) self.assertEqual(len(matches), 0) matches = crambin.GetSubstructMatches(guanidiniumQuery, uniquify=False) self.assertEqual(len(matches), 0) matches = resMolSupplST.GetSubstructMatches(carboxylateQuery) self.assertEqual(len(matches), 6) self.assertEqual(matches, ((166, 167, 168), (166, 168, 167), (298, 299, 300), (298, 300, 299), (320, 321, 326), (320, 326, 321))) matches = resMolSupplST.GetSubstructMatches(carboxylateQuery, uniquify=True) self.assertEqual(len(matches), 3) self.assertEqual(matches, ((166, 167, 168), (298, 299, 300), (320, 321, 326))) matches = resMolSupplST.GetSubstructMatches(guanidiniumQuery) self.assertEqual(len(matches), 8) self.assertEqual(matches, ((66, 67, 68, 69), (66, 67, 69, 68), (68, 67, 69, 66), (69, 67, 68, 66), (123, 124, 125, 126), (123, 124, 126, 125), (125, 124, 126, 123), (126, 124, 125, 123))) matches = resMolSupplST.GetSubstructMatches(guanidiniumQuery, uniquify=True) self.assertEqual(len(matches), 2) self.assertEqual(matches, ((66, 67, 69, 68), (123, 124, 126, 125))) btList2ST = getBtList2(resMolSupplST) self.assertTrue(btList2ST) resMolSupplMT = Chem.ResonanceMolSupplier(crambin) resMolSupplMT.SetNumThreads(0) self.assertEqual(len(resMolSupplST), len(resMolSupplMT)) btList2MT = getBtList2(resMolSupplMT) self.assertTrue(btList2MT) self.assertEqual(len(btList2ST), len(btList2MT)) for i in range(len(btList2ST)): for j in range(len(btList2ST)): self.assertEqual(btList2ST[i][j], btList2MT[i][j]) for suppl in [resMolSupplST, resMolSupplMT]: matches = suppl.GetSubstructMatches(carboxylateQuery, numThreads=0) self.assertEqual(len(matches), 6) self.assertEqual(matches, ((166, 167, 168), (166, 168, 167), (298, 299, 300), (298, 300, 299), (320, 321, 326), (320, 326, 321))) matches = suppl.GetSubstructMatches(carboxylateQuery, uniquify=True, numThreads=0) self.assertEqual(len(matches), 3) self.assertEqual(matches, ((166, 167, 168), (298, 299, 300), (320, 321, 326))) matches = suppl.GetSubstructMatches(guanidiniumQuery, numThreads=0) self.assertEqual(len(matches), 8) self.assertEqual(matches, ((66, 67, 68, 69), (66, 67, 69, 68), (68, 67, 69, 66), (69, 67, 68, 66), (123, 124, 125, 126), (123, 124, 126, 125), (125, 124, 126, 123), (126, 124, 125, 123))) matches = suppl.GetSubstructMatches(guanidiniumQuery, uniquify=True, numThreads=0) self.assertEqual(len(matches), 2) self.assertEqual(matches, ((66, 67, 69, 68), (123, 124, 126, 125))) def testGitHUb1166(self): mol = Chem.MolFromSmiles('NC(=[NH2+])c1ccc(cc1)C(=O)[O-]') resMolSuppl = Chem.ResonanceMolSupplier(mol, Chem.KEKULE_ALL) self.assertEqual(len(resMolSuppl), 8) # check that formal charges on odd indices are in the same position # as on even indices for i in range(0, len(resMolSuppl), 2): self.assertEqual(resMolSuppl[i].GetNumAtoms(), resMolSuppl[i + 1].GetNumAtoms()) for atomIdx in range(resMolSuppl[i].GetNumAtoms()): self.assertEqual(resMolSuppl[i].GetAtomWithIdx(atomIdx).GetFormalCharge(), resMolSuppl[i + 1].GetAtomWithIdx(atomIdx).GetFormalCharge()) # check that bond orders are alternate on aromatic bonds between # structures on odd indices and structures on even indices self.assertEqual(resMolSuppl[i].GetNumBonds(), resMolSuppl[i + 1].GetNumBonds()) for bondIdx in range(resMolSuppl[i].GetNumBonds()): self.assertTrue( ((not resMolSuppl[i].GetBondWithIdx(bondIdx).GetIsAromatic()) and (not resMolSuppl[i + 1].GetBondWithIdx(bondIdx).GetIsAromatic()) and (resMolSuppl[i].GetBondWithIdx(bondIdx).GetBondType() == resMolSuppl[i + 1] .GetBondWithIdx(bondIdx).GetBondType())) or (resMolSuppl[i].GetBondWithIdx(bondIdx).GetIsAromatic() and resMolSuppl[i + 1].GetBondWithIdx(bondIdx).GetIsAromatic() and (int( round(resMolSuppl[i].GetBondWithIdx(bondIdx).GetBondTypeAsDouble() + resMolSuppl[i + 1].GetBondWithIdx(bondIdx).GetBondTypeAsDouble())) == 3))) def testConjGrpPerception(self): mol1 = Chem.MolFromMolBlock("""\ RDKit 2D 14 15 0 0 0 0 0 0 0 0999 V2000 3.7539 -1.2744 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 2.4317 -0.5660 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 1.1571 -1.3568 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 -0.1651 -0.6484 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 -1.4397 -1.4393 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 -1.3921 -2.9385 0.0000 F 0 0 0 0 0 0 0 0 0 0 0 0 -2.7619 -0.7309 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 -2.8095 0.7684 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 -4.1316 1.4768 0.0000 F 0 0 0 0 0 0 0 0 0 0 0 0 -1.5349 1.5592 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 -0.2127 0.8508 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 1.0619 1.6417 0.0000 N 0 0 0 0 0 0 0 0 0 0 0 0 2.3841 0.9333 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 3.6587 1.7241 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 1 2 1 0 2 3 4 0 3 4 4 0 4 5 4 0 5 6 1 0 5 7 4 0 7 8 4 0 8 9 1 0 8 10 4 0 10 11 4 0 11 12 4 0 12 13 4 0 13 14 1 0 13 2 4 0 11 4 4 0 M END $$$$ """) mol2 = Chem.MolFromMolBlock("""\ RDKit 2D 14 15 0 0 0 0 0 0 0 0999 V2000 1.0619 -1.6417 0.0000 N 0 0 0 0 0 0 0 0 0 0 0 0 -0.2127 -0.8508 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 -1.5349 -1.5592 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 -2.8095 -0.7684 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 -2.7619 0.7309 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 -1.4397 1.4393 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 -0.1651 0.6484 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 1.1571 1.3568 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 2.4317 0.5660 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 3.7539 1.2744 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 2.3841 -0.9333 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 3.6587 -1.7241 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 -4.1316 -1.4768 0.0000 F 0 0 0 0 0 0 0 0 0 0 0 0 -1.3921 2.9385 0.0000 F 0 0 0 0 0 0 0 0 0 0 0 0 1 2 4 0 3 4 4 0 4 5 4 0 5 6 4 0 2 3 4 0 2 7 4 0 7 8 4 0 8 9 4 0 9 10 1 0 9 11 4 0 11 12 1 0 11 1 4 0 6 7 4 0 4 13 1 0 6 14 1 0 M END $$$$ """) resMolSuppl1 = Chem.ResonanceMolSupplier(mol1, Chem.KEKULE_ALL) self.assertEqual(len(resMolSuppl1), 3) resMolSuppl2 = Chem.ResonanceMolSupplier(mol2, Chem.KEKULE_ALL) self.assertEqual(len(resMolSuppl2), 3) def testAtomBondProps(self): m = Chem.MolFromSmiles('c1ccccc1') for atom in m.GetAtoms(): d = atom.GetPropsAsDict() self.assertEqual(set(d.keys()), set(['_CIPRank', '__computedProps'])) self.assertEqual(d['_CIPRank'], 0) self.assertEqual(list(d['__computedProps']), ['_CIPRank']) for bond in m.GetBonds(): self.assertEqual(bond.GetPropsAsDict(), {}) def testSDProps(self): fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'FileParsers', 'test_data', 'NCI_aids_few.sdf') #fileN = "../FileParsers/test_data/NCI_aids_few.sdf" sddata = [ { '_MolFileInfo': 'BBtclserve11129916382D 0 0.00000 0.00000 48', 'NSC': 48, 'NCI_AIDS_Antiviral_Screen_IC50': '2.00E-04\tM\t=\t2.46E-05\t3', '_Name': 48, 'CAS_RN': '15716-70-8', '_MolFileComments': '15716-70-8', 'NCI_AIDS_Antiviral_Screen_EC50': '2.00E-04\tM\t>\t2.00E-04\t3', 'NCI_AIDS_Antiviral_Screen_Conclusion': 'CI' }, { '_MolFileInfo': 'BBtclserve11129916382D 0 0.00000 0.00000 78', 'NSC': 78, 'NCI_AIDS_Antiviral_Screen_IC50': '2.00E-04\tM\t=\t9.80E-05\t3', '_Name': 78, 'CAS_RN': '6290-84-2', '_MolFileComments': '6290-84-2', 'NCI_AIDS_Antiviral_Screen_EC50': '2.00E-04\tM\t>\t2.00E-04\t3', 'NCI_AIDS_Antiviral_Screen_Conclusion': 'CI' }, { '_MolFileInfo': 'BBtclserve11129916382D 0 0.00000 0.00000 128', 'NSC': 128, 'NCI_AIDS_Antiviral_Screen_IC50': '2.00E-04\tM\t=\t4.60E-05\t4', '_Name': 128, 'CAS_RN': '5395-10-8', '_MolFileComments': '5395-10-8', 'NCI_AIDS_Antiviral_Screen_EC50': '2.00E-04\tM\t>\t2.00E-04\t4', 'NCI_AIDS_Antiviral_Screen_Conclusion': 'CI' }, { '_MolFileInfo': 'BBtclserve11129916382D 0 0.00000 0.00000 163', 'NSC': 163, 'NCI_AIDS_Antiviral_Screen_IC50': '6.75E-04\tM\t>\t6.75E-04\t2', '_Name': 163, 'CAS_RN': '81-11-8', '_MolFileComments': '81-11-8', 'NCI_AIDS_Antiviral_Screen_EC50': '6.75E-04\tM\t>\t6.75E-04\t2', 'NCI_AIDS_Antiviral_Screen_Conclusion': 'CI' }, { '_MolFileInfo': 'BBtclserve11129916382D 0 0.00000 0.00000 164', 'NSC': 164, 'NCI_AIDS_Antiviral_Screen_IC50': '2.00E-04\tM\t>\t2.00E-04\t2', '_Name': 164, 'CAS_RN': '5325-43-9', '_MolFileComments': '5325-43-9', 'NCI_AIDS_Antiviral_Screen_EC50': '2.00E-04\tM\t>\t2.00E-04\t2', 'NCI_AIDS_Antiviral_Screen_Conclusion': 'CI' }, { '_MolFileInfo': 'BBtclserve11129916382D 0 0.00000 0.00000 170', 'NSC': 170, '_Name': 170, 'CAS_RN': '999-99-9', '_MolFileComments': '999-99-9', 'NCI_AIDS_Antiviral_Screen_EC50': '9.47E-04\tM\t>\t9.47E-04\t1', 'NCI_AIDS_Antiviral_Screen_Conclusion': 'CI' }, { '_MolFileInfo': 'BBtclserve11129916382D 0 0.00000 0.00000 180', 'NSC': 180, 'NCI_AIDS_Antiviral_Screen_IC50': '6.46E-04\tM\t=\t5.80E-04\t2\n1.81E-03\tM\t=\t6.90E-04\t2', '_Name': 180, 'CAS_RN': '69-72-7', '_MolFileComments': '69-72-7', 'NCI_AIDS_Antiviral_Screen_EC50': '6.46E-04\tM\t>\t6.46E-04\t2\n1.81E-03\tM\t>\t1.81E-03\t2', 'NCI_AIDS_Antiviral_Screen_Conclusion': 'CI' }, { '_MolFileInfo': 'BBtclserve11129916382D 0 0.00000 0.00000 186', 'NSC': 186, 'NCI_AIDS_Antiviral_Screen_IC50': '1.44E-04\tM\t=\t2.49E-05\t2', '_Name': 186, 'CAS_RN': '518-75-2', '_MolFileComments': '518-75-2', 'NCI_AIDS_Antiviral_Screen_EC50': '1.44E-04\tM\t>\t1.44E-04\t2', 'NCI_AIDS_Antiviral_Screen_Conclusion': 'CI' }, { '_MolFileInfo': 'BBtclserve11129916382D 0 0.00000 0.00000 192', 'NSC': 192, 'NCI_AIDS_Antiviral_Screen_IC50': '2.00E-04\tM\t=\t3.38E-06\t2', '_Name': 192, 'CAS_RN': '2217-55-2', '_MolFileComments': '2217-55-2', 'NCI_AIDS_Antiviral_Screen_EC50': '2.00E-04\tM\t>\t2.00E-04\t2', 'NCI_AIDS_Antiviral_Screen_Conclusion': 'CI' }, { '_MolFileInfo': 'BBtclserve11129916382D 0 0.00000 0.00000 203', 'NSC': 203, '_Name': 203, 'CAS_RN': '1155-00-6', '_MolFileComments': '1155-00-6', 'NCI_AIDS_Antiviral_Screen_Conclusion': 'CI' }, { '_MolFileInfo': 'BBtclserve11129916382D 0 0.00000 0.00000 210', 'NSC': 210, 'NCI_AIDS_Antiviral_Screen_IC50': '1.33E-03\tM\t>\t1.33E-03\t2', '_Name': 210, 'CAS_RN': '5325-75-7', '_MolFileComments': '5325-75-7', 'NCI_AIDS_Antiviral_Screen_EC50': '1.33E-03\tM\t>\t1.33E-03\t2', 'NCI_AIDS_Antiviral_Screen_Conclusion': 'CI' }, { '_MolFileInfo': 'BBtclserve11129916382D 0 0.00000 0.00000 211', 'NSC': 211, 'NCI_AIDS_Antiviral_Screen_IC50': '2.00E-04\tM\t>\t2.00E-04\t8\n2.00E-03\tM\t=\t1.12E-03\t2', '_Name': 211, 'CAS_RN': '5325-76-8', '_MolFileComments': '5325-76-8', 'NCI_AIDS_Antiviral_Screen_EC50': '2.00E-04\tM\t>\t7.42E-05\t8\n2.00E-03\tM\t=\t6.35E-05\t2', 'NCI_AIDS_Antiviral_Screen_Conclusion': 'CM' }, { '_MolFileInfo': 'BBtclserve11129916382D 0 0.00000 0.00000 213', 'NSC': 213, 'NCI_AIDS_Antiviral_Screen_IC50': '2.00E-04\tM\t>\t2.00E-04\t4', '_Name': 213, 'CAS_RN': '119-80-2', '_MolFileComments': '119-80-2', 'NCI_AIDS_Antiviral_Screen_EC50': '2.00E-04\tM\t>\t2.00E-04\t4', 'NCI_AIDS_Antiviral_Screen_Conclusion': 'CI' }, { '_MolFileInfo': 'BBtclserve11129916382D 0 0.00000 0.00000 220', 'NSC': 220, 'NCI_AIDS_Antiviral_Screen_IC50': '2.00E-04\tM\t>\t2.00E-04\t4', '_Name': 220, 'CAS_RN': '5325-83-7', '_MolFileComments': '5325-83-7', 'NCI_AIDS_Antiviral_Screen_EC50': '2.00E-04\tM\t>\t2.00E-04\t4', 'NCI_AIDS_Antiviral_Screen_Conclusion': 'CI' }, { '_MolFileInfo': 'BBtclserve11129916382D 0 0.00000 0.00000 229', 'NSC': 229, 'NCI_AIDS_Antiviral_Screen_IC50': '2.00E-04\tM\t>\t2.00E-04\t2', '_Name': 229, 'CAS_RN': '5325-88-2', '_MolFileComments': '5325-88-2', 'NCI_AIDS_Antiviral_Screen_EC50': '2.00E-04\tM\t>\t2.00E-04\t2', 'NCI_AIDS_Antiviral_Screen_Conclusion': 'CI' }, { '_MolFileInfo': 'BBtclserve11129916382D 0 0.00000 0.00000 256', 'NSC': 256, 'NCI_AIDS_Antiviral_Screen_IC50': '2.00E-04\tM\t>\t2.00E-04\t4', '_Name': 256, 'CAS_RN': '5326-06-7', '_MolFileComments': '5326-06-7', 'NCI_AIDS_Antiviral_Screen_EC50': '2.00E-04\tM\t>\t2.00E-04\t4', 'NCI_AIDS_Antiviral_Screen_Conclusion': 'CI' }, ] sdSup = Chem.SDMolSupplier(fileN) for i, mol in enumerate(sdSup): self.assertEqual(mol.GetPropsAsDict(includePrivate=True), sddata[i]) def testGetSetProps(self): m = Chem.MolFromSmiles("CC") errors = { "int": "key `foo` exists but does not result in an integer value", "double": "key `foo` exists but does not result in a double value", "bool": "key `foo` exists but does not result in a True or False value" } for ob in [m, list(m.GetAtoms())[0], list(m.GetBonds())[0]]: ob.SetDoubleProp("foo", 2.0) with self.assertRaises(ValueError) as e: ob.GetBoolProp("foo") self.assertEqual(str(e.exception), errors["bool"]) with self.assertRaises(ValueError) as e: ob.GetIntProp("foo") self.assertEqual(str(e.exception), errors["int"]) ob.SetBoolProp("foo", True) with self.assertRaises(ValueError) as e: ob.GetDoubleProp("foo") self.assertEqual(str(e.exception), errors["double"]) with self.assertRaises(ValueError) as e: ob.GetIntProp("foo") self.assertEqual(str(e.exception), errors["int"]) def testInvariantException(self): m = Chem.MolFromSmiles("C") try: m.GetAtomWithIdx(3) except RuntimeError as e: import platform details = str(e) if platform.system() == 'Windows': details = details.replace('\\', '/') self.assertTrue("Code/GraphMol/ROMol.cpp".lower() in details.lower()) self.assertTrue("Failed Expression: 3 < 1" in details) self.assertTrue("RDKIT:" in details) self.assertTrue(__version__ in details) # this test should probably always be last since it wraps # the logging stream def testLogging(self): from io import StringIO err = sys.stderr try: loggers = [("RDKit ERROR", "1", Chem.LogErrorMsg), ("RDKit WARNING", "2", Chem.LogWarningMsg)] for msg, v, log in loggers: sys.stderr = StringIO() log(v) self.assertEqual(sys.stderr.getvalue(), "") Chem.WrapLogs() for msg, v, log in loggers: sys.stderr = StringIO() log(v) s = sys.stderr.getvalue() self.assertTrue(msg in s) finally: sys.stderr = err def testGetSDText(self): fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'FileParsers', 'test_data', 'NCI_aids_few.sdf') #fileN = "../FileParsers/test_data/NCI_aids_few.sdf" sdSup = Chem.SDMolSupplier(fileN) for m in sdSup: sdt = Chem.SDWriter.GetText(m) ts = Chem.SDMolSupplier() ts.SetData(sdt) nm = next(ts) self.assertEqual(Chem.MolToSmiles(m, True), Chem.MolToSmiles(nm, True)) for pn in m.GetPropNames(): self.assertTrue(nm.HasProp(pn)) self.assertEqual(m.GetProp(pn), nm.GetProp(pn)) def testUnfoldedRDKFingerprint(self): from rdkit.Chem import AllChem m = Chem.MolFromSmiles('c1ccccc1N') fp = AllChem.UnfoldedRDKFingerprintCountBased(m) fpDict = fp.GetNonzeroElements() self.assertEqual(len(fpDict.items()), 19) self.assertTrue(374073638 in fpDict) self.assertEqual(fpDict[374073638], 6) self.assertTrue(464351883 in fpDict) self.assertEqual(fpDict[464351883], 2) self.assertTrue(1949583554 in fpDict) self.assertEqual(fpDict[1949583554], 6) self.assertTrue(4105342207 in fpDict) self.assertEqual(fpDict[4105342207], 1) self.assertTrue(794080973 in fpDict) self.assertEqual(fpDict[794080973], 1) self.assertTrue(3826517238 in fpDict) self.assertEqual(fpDict[3826517238], 2) m = Chem.MolFromSmiles('Cl') fp = AllChem.UnfoldedRDKFingerprintCountBased(m) fpDict = fp.GetNonzeroElements() self.assertEqual(len(fpDict.items()), 0) m = Chem.MolFromSmiles('CCCO') aBits = {} fp = AllChem.UnfoldedRDKFingerprintCountBased(m, bitInfo=aBits) fpDict = fp.GetNonzeroElements() self.assertEqual(len(fpDict.items()), 5) self.assertTrue(1524090560 in fpDict) self.assertEqual(fpDict[1524090560], 1) self.assertTrue(1940446997 in fpDict) self.assertEqual(fpDict[1940446997], 1) self.assertTrue(3977409745 in fpDict) self.assertEqual(fpDict[3977409745], 1) self.assertTrue(4274652475 in fpDict) self.assertEqual(fpDict[4274652475], 1) self.assertTrue(4275705116 in fpDict) self.assertEqual(fpDict[4275705116], 2) self.assertTrue(1524090560 in aBits) self.assertEqual(aBits[1524090560], [[1, 2]]) self.assertTrue(1940446997 in aBits) self.assertEqual(aBits[1940446997], [[0, 1]]) self.assertTrue(3977409745 in aBits) self.assertEqual(aBits[3977409745], [[0, 1, 2]]) self.assertTrue(4274652475 in aBits) self.assertEqual(aBits[4274652475], [[2]]) self.assertTrue(4275705116 in aBits) self.assertEqual(aBits[4275705116], [[0], [1]]) def testRDKFingerprintBitInfo(self): m = Chem.MolFromSmiles('CCCO') aBits = {} fp1 = Chem.RDKFingerprint(m, bitInfo=aBits) self.assertTrue(1183 in aBits) self.assertEqual(aBits[1183], [[1, 2]]) self.assertTrue(709 in aBits) self.assertEqual(aBits[709], [[0, 1]]) self.assertTrue(1118 in aBits) self.assertEqual(aBits[1118], [[0, 1, 2]]) self.assertTrue(562 in aBits) self.assertEqual(aBits[562], [[2]]) self.assertTrue(1772 in aBits) self.assertEqual(aBits[1772], [[0], [1]]) def testSimpleAromaticity(self): m = Chem.MolFromSmiles('c1ccccc1') self.assertTrue(m.GetBondWithIdx(0).GetIsAromatic()) self.assertTrue(m.GetAtomWithIdx(0).GetIsAromatic()) Chem.Kekulize(m, True) self.assertFalse(m.GetBondWithIdx(0).GetIsAromatic()) self.assertFalse(m.GetAtomWithIdx(0).GetIsAromatic()) Chem.SetAromaticity(m, Chem.AROMATICITY_SIMPLE) self.assertTrue(m.GetBondWithIdx(0).GetIsAromatic()) self.assertTrue(m.GetAtomWithIdx(0).GetIsAromatic()) m = Chem.MolFromSmiles('c1c[nH]cc1') self.assertTrue(m.GetBondWithIdx(0).GetIsAromatic()) self.assertTrue(m.GetAtomWithIdx(0).GetIsAromatic()) Chem.Kekulize(m, True) self.assertFalse(m.GetBondWithIdx(0).GetIsAromatic()) self.assertFalse(m.GetAtomWithIdx(0).GetIsAromatic()) Chem.SetAromaticity(m, Chem.AROMATICITY_SIMPLE) self.assertTrue(m.GetBondWithIdx(0).GetIsAromatic()) self.assertTrue(m.GetAtomWithIdx(0).GetIsAromatic()) m = Chem.MolFromSmiles('c1cccoocc1') self.assertTrue(m.GetBondWithIdx(0).GetIsAromatic()) self.assertTrue(m.GetAtomWithIdx(0).GetIsAromatic()) Chem.Kekulize(m, True) self.assertFalse(m.GetBondWithIdx(0).GetIsAromatic()) self.assertFalse(m.GetAtomWithIdx(0).GetIsAromatic()) Chem.SetAromaticity(m, Chem.AROMATICITY_SIMPLE) self.assertFalse(m.GetBondWithIdx(0).GetIsAromatic()) self.assertFalse(m.GetAtomWithIdx(0).GetIsAromatic()) m = Chem.MolFromSmiles('c1ooc1') self.assertTrue(m.GetBondWithIdx(0).GetIsAromatic()) self.assertTrue(m.GetAtomWithIdx(0).GetIsAromatic()) Chem.Kekulize(m, True) self.assertFalse(m.GetBondWithIdx(0).GetIsAromatic()) self.assertFalse(m.GetAtomWithIdx(0).GetIsAromatic()) Chem.SetAromaticity(m, Chem.AROMATICITY_SIMPLE) self.assertFalse(m.GetBondWithIdx(0).GetIsAromatic()) self.assertFalse(m.GetAtomWithIdx(0).GetIsAromatic()) m = Chem.MolFromSmiles('C1=CC2=CC=CC=CC2=C1') self.assertTrue(m.GetBondWithIdx(0).GetIsAromatic()) self.assertTrue(m.GetAtomWithIdx(0).GetIsAromatic()) Chem.Kekulize(m, True) self.assertFalse(m.GetBondWithIdx(0).GetIsAromatic()) self.assertFalse(m.GetAtomWithIdx(0).GetIsAromatic()) Chem.SetAromaticity(m, Chem.AROMATICITY_SIMPLE) self.assertFalse(m.GetBondWithIdx(0).GetIsAromatic()) self.assertFalse(m.GetAtomWithIdx(0).GetIsAromatic()) def testGithub955(self): m = Chem.MolFromSmiles("CCC") m.GetAtomWithIdx(0).SetProp("foo", "1") self.assertEqual(list(m.GetAtomWithIdx(0).GetPropNames()), ["foo"]) m.GetBondWithIdx(0).SetProp("foo", "1") self.assertEqual(list(m.GetBondWithIdx(0).GetPropNames()), ["foo"]) def testMDLProps(self): m = Chem.MolFromSmiles("CCC") m.GetAtomWithIdx(0).SetAtomMapNum(1) Chem.SetAtomAlias(m.GetAtomWithIdx(1), "foo") Chem.SetAtomValue(m.GetAtomWithIdx(1), "bar") m = Chem.MolFromMolBlock(Chem.MolToMolBlock(m)) self.assertEqual(m.GetAtomWithIdx(0).GetAtomMapNum(), 1) self.assertEqual(Chem.GetAtomAlias(m.GetAtomWithIdx(1)), "foo") self.assertEqual(Chem.GetAtomValue(m.GetAtomWithIdx(1)), "bar") def testSmilesProps(self): m = Chem.MolFromSmiles("C") Chem.SetSupplementalSmilesLabel(m.GetAtomWithIdx(0), 'xxx') self.assertEqual(Chem.MolToSmiles(m), "Cxxx") def testGithub1051(self): # just need to test that this exists: self.assertTrue(Chem.BondDir.EITHERDOUBLE) def testGithub1041(self): a = Chem.Atom(6) self.assertRaises(RuntimeError, lambda: a.GetOwningMol()) self.assertRaises(RuntimeError, lambda: a.GetNeighbors()) self.assertRaises(RuntimeError, lambda: a.GetBonds()) self.assertRaises(RuntimeError, lambda: a.IsInRing()) self.assertRaises(RuntimeError, lambda: a.IsInRingSize(4)) def testSmilesParseParams(self): smi = "CCC |$foo;;bar$| ourname" m = Chem.MolFromSmiles(smi) self.assertTrue(m is not None) ps = Chem.SmilesParserParams() ps.allowCXSMILES = False m = Chem.MolFromSmiles(smi, ps) self.assertTrue(m is None) ps.allowCXSMILES = True ps.parseName = True m = Chem.MolFromSmiles(smi, ps) self.assertTrue(m is not None) self.assertTrue(m.GetAtomWithIdx(0).HasProp('atomLabel')) self.assertEqual(m.GetAtomWithIdx(0).GetProp('atomLabel'), "foo") self.assertTrue(m.HasProp('_Name')) self.assertEqual(m.GetProp('_Name'), "ourname") self.assertEqual(m.GetProp("_CXSMILES_Data"), "|$foo;;bar$|") def testWriteCXSmiles(self): smi = "CCC |$foo;;bar$|" ps = Chem.SmilesParserParams() ps.allowCXSMILES = True m = Chem.MolFromSmiles(smi, ps) self.assertTrue(m is not None) self.assertTrue(m.GetAtomWithIdx(0).HasProp('atomLabel')) self.assertEqual(m.GetAtomWithIdx(0).GetProp('atomLabel'), "foo") self.assertEqual(Chem.MolToCXSmiles(m),'CCC |$foo;;bar$|') smi = "Cl.CCC |$;foo;;bar$|" m = Chem.MolFromSmiles(smi, ps) self.assertTrue(m is not None) self.assertTrue(m.GetAtomWithIdx(1).HasProp('atomLabel')) self.assertEqual(m.GetAtomWithIdx(1).GetProp('atomLabel'), "foo") self.assertEqual(Chem.MolFragmentToCXSmiles(m,atomsToUse=(1,2,3)), 'CCC |$foo;;bar$|') def testPickleProps(self): import pickle m = Chem.MolFromSmiles('C1=CN=CC=C1') m.SetProp("_Name", "Name") for atom in m.GetAtoms(): atom.SetProp("_foo", "bar" + str(atom.GetIdx())) atom.SetProp("foo", "baz" + str(atom.GetIdx())) Chem.SetDefaultPickleProperties(Chem.PropertyPickleOptions.AllProps) pkl = pickle.dumps(m) m2 = pickle.loads(pkl) smi1 = Chem.MolToSmiles(m) smi2 = Chem.MolToSmiles(m2) self.assertTrue(smi1 == smi2) self.assertEqual(m2.GetProp("_Name"), "Name") for atom in m2.GetAtoms(): self.assertEqual(atom.GetProp("_foo"), "bar" + str(atom.GetIdx())) self.assertEqual(atom.GetProp("foo"), "baz" + str(atom.GetIdx())) Chem.SetDefaultPickleProperties(Chem.PropertyPickleOptions.AtomProps) pkl = pickle.dumps(m) m2 = pickle.loads(pkl) smi1 = Chem.MolToSmiles(m) smi2 = Chem.MolToSmiles(m2) self.assertTrue(smi1 == smi2) self.assertFalse(m2.HasProp("_Name")) for atom in m2.GetAtoms(): self.assertFalse(atom.HasProp("_foo")) self.assertEqual(atom.GetProp("foo"), "baz" + str(atom.GetIdx())) Chem.SetDefaultPickleProperties(Chem.PropertyPickleOptions.NoProps) pkl = pickle.dumps(m) m2 = pickle.loads(pkl) smi1 = Chem.MolToSmiles(m) smi2 = Chem.MolToSmiles(m2) self.assertTrue(smi1 == smi2) self.assertFalse(m2.HasProp("_Name")) for atom in m2.GetAtoms(): self.assertFalse(atom.HasProp("_foo")) self.assertFalse(atom.HasProp("foo")) Chem.SetDefaultPickleProperties(Chem.PropertyPickleOptions.MolProps | Chem.PropertyPickleOptions.PrivateProps) pkl = pickle.dumps(m) m2 = pickle.loads(pkl) smi1 = Chem.MolToSmiles(m) smi2 = Chem.MolToSmiles(m2) self.assertTrue(smi1 == smi2) self.assertEqual(m2.GetProp("_Name"), "Name") for atom in m2.GetAtoms(): self.assertFalse(atom.HasProp("_foo")) self.assertFalse(atom.HasProp("foo")) def testGithub1352(self): self.assertTrue('SP' in Chem.HybridizationType.names) self.assertTrue('S' in Chem.HybridizationType.names) m = Chem.MolFromSmiles('CC(=O)O.[Na]') self.assertEqual(m.GetAtomWithIdx(0).GetHybridization().name, 'SP3') self.assertEqual(m.GetAtomWithIdx(4).GetHybridization().name, 'S') def testGithub1366(self): mol = Chem.MolFromSmiles('*C*') mol = Chem.RWMol(mol) ats = iter(mol.GetAtoms()) atom = next(ats) mol.RemoveAtom(atom.GetIdx()) self.assertRaises(RuntimeError, next, ats) mol = Chem.MolFromSmiles('*C*') mol = Chem.RWMol(mol) bonds = iter(mol.GetBonds()) bond = next(bonds) mol.RemoveBond(bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()) self.assertRaises(RuntimeError, next, bonds) def testGithub1478(self): data = """ MJ150720 8 8 0 0 0 0 0 0 0 0999 V2000 -0.4242 -1.4883 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0 0.2901 -1.0758 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 1.0046 0.9865 0.0000 A 0 0 0 0 0 0 0 0 0 0 0 0 1.0046 0.1614 0.0000 A 0 0 0 0 0 0 0 0 0 0 0 0 0.2901 -0.2508 0.0000 A 0 0 0 0 0 0 0 0 0 0 0 0 -0.4243 0.1614 0.0000 A 0 0 0 0 0 0 0 0 0 0 0 0 -0.4243 0.9865 0.0000 A 0 0 0 0 0 0 0 0 0 0 0 0 0.2901 1.3990 0.0000 A 0 0 0 0 0 0 0 0 0 0 0 0 7 6 4 0 0 0 0 8 7 4 0 0 0 0 6 5 4 0 0 0 0 5 4 4 0 0 0 0 5 2 1 0 0 0 0 4 3 4 0 0 0 0 8 3 4 0 0 0 0 2 1 2 0 0 0 0 M END """ pattern = Chem.MolFromMolBlock(data) m = Chem.MolFromSmiles("c1ccccc1C=O") self.assertTrue(m.HasSubstructMatch(pattern)) def testGithub1320(self): import pickle mol = Chem.MolFromSmiles('N[C@@H](C)O') mol2 = pickle.loads(pickle.dumps(mol)) self.assertEqual( Chem.MolToSmiles(mol, isomericSmiles=True), Chem.MolToSmiles(mol2, isomericSmiles=True)) Chem.SetDefaultPickleProperties(Chem.PropertyPickleOptions.AtomProps | Chem.PropertyPickleOptions.BondProps | Chem.PropertyPickleOptions.MolProps | Chem.PropertyPickleOptions.PrivateProps | Chem.PropertyPickleOptions.ComputedProps) mol3 = pickle.loads(pickle.dumps(mol)) for a1, a2 in zip(mol.GetAtoms(), mol3.GetAtoms()): d1 = a1.GetPropsAsDict() d2 = a2.GetPropsAsDict() if "__computedProps" in d1: c1 = list(d1["__computedProps"]) c2 = list(d2["__computedProps"]) del d1["__computedProps"] del d2["__computedProps"] self.assertEqual(c1, c2) assert d1 == d2 for a1, a2 in zip(mol.GetBonds(), mol3.GetBonds()): d1 = a1.GetPropsAsDict() d2 = a2.GetPropsAsDict() if "__computedProps" in d1: c1 = list(d1["__computedProps"]) c2 = list(d2["__computedProps"]) del d1["__computedProps"] del d2["__computedProps"] self.assertEqual(c1, c2) assert d1 == d2 self.assertEqual( Chem.MolToSmiles(mol, isomericSmiles=True), Chem.MolToSmiles(mol3, isomericSmiles=True)) def testOldPropPickles(self): data = 'crdkit.Chem.rdchem\nMol\np0\n(S\'\\xef\\xbe\\xad\\xde\\x00\\x00\\x00\\x00\\x08\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00)\\x00\\x00\\x00-\\x00\\x00\\x00\\x80\\x01\\x06\\x00`\\x00\\x00\\x00\\x01\\x03\\x07\\x00`\\x00\\x00\\x00\\x02\\x01\\x06 4\\x00\\x00\\x00\\x01\\x01\\x04\\x06\\x00`\\x00\\x00\\x00\\x01\\x03\\x06\\x00(\\x00\\x00\\x00\\x03\\x04\\x08\\x00(\\x00\\x00\\x00\\x03\\x02\\x07\\x00h\\x00\\x00\\x00\\x03\\x02\\x01\\x06 4\\x00\\x00\\x00\\x02\\x01\\x04\\x06\\x00(\\x00\\x00\\x00\\x03\\x04\\x08\\x00(\\x00\\x00\\x00\\x03\\x02\\x07\\x00(\\x00\\x00\\x00\\x03\\x03\\x06\\x00`\\x00\\x00\\x00\\x02\\x02\\x06 4\\x00\\x00\\x00\\x01\\x01\\x04\\x08\\x00(\\x00\\x00\\x00\\x03\\x02\\x06@(\\x00\\x00\\x00\\x03\\x04\\x06@h\\x00\\x00\\x00\\x03\\x03\\x01\\x06@h\\x00\\x00\\x00\\x03\\x03\\x01\\x06@h\\x00\\x00\\x00\\x03\\x03\\x01\\x06@h\\x00\\x00\\x00\\x03\\x03\\x01\\x06@h\\x00\\x00\\x00\\x03\\x03\\x01\\x06\\x00`\\x00\\x00\\x00\\x02\\x02\\x06 4\\x00\\x00\\x00\\x01\\x01\\x04\\x06\\x00(\\x00\\x00\\x00\\x03\\x04\\x08\\x00(\\x00\\x00\\x00\\x03\\x02\\x07\\x00h\\x00\\x00\\x00\\x03\\x02\\x01\\x06 4\\x00\\x00\\x00\\x02\\x01\\x04\\x06\\x00`\\x00\\x00\\x00\\x02\\x02\\x06\\x00`\\x00\\x00\\x00\\x02\\x02\\x06\\x00`\\x00\\x00\\x00\\x02\\x02\\x06@(\\x00\\x00\\x00\\x03\\x04\\x06@h\\x00\\x00\\x00\\x03\\x03\\x01\\x06@h\\x00\\x00\\x00\\x03\\x03\\x01\\x06@h\\x00\\x00\\x00\\x03\\x03\\x01\\x06@h\\x00\\x00\\x00\\x03\\x03\\x01\\x06@(\\x00\\x00\\x00\\x03\\x04\\x06\\x00`\\x00\\x00\\x00\\x03\\x01\\x06\\x00`\\x00\\x00\\x00\\x02\\x02\\x06\\x00`\\x00\\x00\\x00\\x02\\x02\\x06\\x00`\\x00\\x00\\x00\\x02\\x02\\x06\\x00`\\x00\\x00\\x00\\x02\\x02\\x06\\x00`\\x00\\x00\\x00\\x02\\x02\\x0b\\x00\\x01\\x00\\x01\\x02\\x00\\x02\\x03\\x00\\x02\\x04\\x00\\x04\\x05(\\x02\\x04\\x06 \\x06\\x07\\x00\\x07\\x08\\x00\\x08\\t(\\x02\\x08\\n \\n\\x0b\\x00\\x0b\\x0c\\x00\\x0c\\r\\x00\\r\\x0e \\x0e\\x0fh\\x0c\\x0f\\x10h\\x0c\\x10\\x11h\\x0c\\x11\\x12h\\x0c\\x12\\x13h\\x0c\\x0c\\x14\\x00\\x14\\x15\\x00\\x15\\x16\\x00\\x16\\x17(\\x02\\x16\\x18 \\x18\\x19\\x00\\x19\\x1a\\x00\\x1a\\x1b\\x00\\x1b\\x1c\\x00\\x1c\\x1d\\x00\\x1d\\x1eh\\x0c\\x1e\\x1fh\\x0c\\x1f h\\x0c !h\\x0c!"h\\x0c\\x07#\\x00#$\\x00$%\\x00%&\\x00&\\\'\\x00\\\'(\\x00\\x15\\n\\x00"\\x19\\x00(#\\x00\\x13\\x0eh\\x0c"\\x1dh\\x0c\\x14\\x05\\x05\\x0b\\n\\x15\\x14\\x0c\\x06\\x0f\\x10\\x11\\x12\\x13\\x0e\\x06\\x1a\\x1b\\x1c\\x1d"\\x19\\x06\\x1e\\x1f !"\\x1d\\x06$%&\\\'(#\\x17\\x00\\x00\\x00\\x00\\x12\\x03\\x00\\x00\\x00\\x07\\x00\\x00\\x00numArom\\x01\\x02\\x00\\x00\\x00\\x0f\\x00\\x00\\x00_StereochemDone\\x01\\x01\\x00\\x00\\x00\\x03\\x00\\x00\\x00foo\\x00\\x03\\x00\\x00\\x00bar\\x13:\\x02\\x00\\x00\\x00\\x08\\x00\\x00\\x00_CIPRank\\x02\\x12\\x00\\x00\\x00\\x05\\x00\\x00\\x00myidx\\x00\\x01\\x00\\x00\\x000\\x02\\x00\\x00\\x00\\x08\\x00\\x00\\x00_CIPRank\\x02\\x1d\\x00\\x00\\x00\\x05\\x00\\x00\\x00myidx\\x00\\x01\\x00\\x00\\x001\\x04\\x00\\x00\\x00\\x08\\x00\\x00\\x00_CIPRank\\x02\\x15\\x00\\x00\\x00\\x12\\x00\\x00\\x00_ChiralityPossible\\x01\\x01\\x00\\x00\\x00\\x08\\x00\\x00\\x00_CIPCode\\x00\\x01\\x00\\x00\\x00S\\x05\\x00\\x00\\x00myidx\\x00\\x01\\x00\\x00\\x002\\x02\\x00\\x00\\x00\\x08\\x00\\x00\\x00_CIPRank\\x02\\x00\\x00\\x00\\x00\\x05\\x00\\x00\\x00myidx\\x00\\x01\\x00\\x00\\x003\\x02\\x00\\x00\\x00\\x08\\x00\\x00\\x00_CIPRank\\x02\\x1a\\x00\\x00\\x00\\x05\\x00\\x00\\x00myidx\\x00\\x01\\x00\\x00\\x004\\x02\\x00\\x00\\x00\\x08\\x00\\x00\\x00_CIPRank\\x02"\\x00\\x00\\x00\\x05\\x00\\x00\\x00myidx\\x00\\x01\\x00\\x00\\x005\\x02\\x00\\x00\\x00\\x08\\x00\\x00\\x00_CIPRank\\x02\\x1f\\x00\\x00\\x00\\x05\\x00\\x00\\x00myidx\\x00\\x01\\x00\\x00\\x006\\x04\\x00\\x00\\x00\\x08\\x00\\x00\\x00_CIPRank\\x02\\x16\\x00\\x00\\x00\\x12\\x00\\x00\\x00_ChiralityPossible\\x01\\x01\\x00\\x00\\x00\\x08\\x00\\x00\\x00_CIPCode\\x00\\x01\\x00\\x00\\x00S\\x05\\x00\\x00\\x00myidx\\x00\\x01\\x00\\x00\\x007\\x02\\x00\\x00\\x00\\x08\\x00\\x00\\x00_CIPRank\\x02\\x1c\\x00\\x00\\x00\\x05\\x00\\x00\\x00myidx\\x00\\x01\\x00\\x00\\x008\\x02\\x00\\x00\\x00\\x08\\x00\\x00\\x00_CIPRank\\x02$\\x00\\x00\\x00\\x05\\x00\\x00\\x00myidx\\x00\\x01\\x00\\x00\\x009\\x02\\x00\\x00\\x00\\x08\\x00\\x00\\x00_CIPRank\\x02 \\x00\\x00\\x00\\x05\\x00\\x00\\x00myidx\\x00\\x02\\x00\\x00\\x0010\\x02\\x00\\x00\\x00\\x08\\x00\\x00\\x00_CIPRank\\x02\\x13\\x00\\x00\\x00\\x05\\x00\\x00\\x00myidx\\x00\\x02\\x00\\x00\\x0011\\x04\\x00\\x00\\x00\\x08\\x00\\x00\\x00_CIPRank\\x02\\x18\\x00\\x00\\x00\\x12\\x00\\x00\\x00_ChiralityPossible\\x01\\x01\\x00\\x00\\x00\\x08\\x00\\x00\\x00_CIPCode\\x00\\x01\\x00\\x00\\x00S\\x05\\x00\\x00\\x00myidx\\x00\\x02\\x00\\x00\\x0012\\x02\\x00\\x00\\x00\\x08\\x00\\x00\\x00_CIPRank\\x02!\\x00\\x00\\x00\\x05\\x00\\x00\\x00myidx\\x00\\x02\\x00\\x00\\x0013\\x02\\x00\\x00\\x00\\x08\\x00\\x00\\x00_CIPRank\\x02\\x19\\x00\\x00\\x00\\x05\\x00\\x00\\x00myidx\\x00\\x02\\x00\\x00\\x0014\\x02\\x00\\x00\\x00\\x08\\x00\\x00\\x00_CIPRank\\x02\\x0f\\x00\\x00\\x00\\x05\\x00\\x00\\x00myidx\\x00\\x02\\x00\\x00\\x0015\\x02\\x00\\x00\\x00\\x08\\x00\\x00\\x00_CIPRank\\x02\\x0b\\x00\\x00\\x00\\x05\\x00\\x00\\x00myidx\\x00\\x02\\x00\\x00\\x0016\\x02\\x00\\x00\\x00\\x08\\x00\\x00\\x00_CIPRank\\x02\\x08\\x00\\x00\\x00\\x05\\x00\\x00\\x00myidx\\x00\\x02\\x00\\x00\\x0017\\x02\\x00\\x00\\x00\\x08\\x00\\x00\\x00_CIPRank\\x02\\x0b\\x00\\x00\\x00\\x05\\x00\\x00\\x00myidx\\x00\\x02\\x00\\x00\\x0018\\x02\\x00\\x00\\x00\\x08\\x00\\x00\\x00_CIPRank\\x02\\x0f\\x00\\x00\\x00\\x05\\x00\\x00\\x00myidx\\x00\\x02\\x00\\x00\\x0019\\x02\\x00\\x00\\x00\\x08\\x00\\x00\\x00_CIPRank\\x02\\x07\\x00\\x00\\x00\\x05\\x00\\x00\\x00myidx\\x00\\x02\\x00\\x00\\x0020\\x04\\x00\\x00\\x00\\x08\\x00\\x00\\x00_CIPRank\\x02\\x17\\x00\\x00\\x00\\x12\\x00\\x00\\x00_ChiralityPossible\\x01\\x01\\x00\\x00\\x00\\x08\\x00\\x00\\x00_CIPCode\\x00\\x01\\x00\\x00\\x00S\\x05\\x00\\x00\\x00myidx\\x00\\x02\\x00\\x00\\x0021\\x02\\x00\\x00\\x00\\x08\\x00\\x00\\x00_CIPRank\\x02\\x1b\\x00\\x00\\x00\\x05\\x00\\x00\\x00myidx\\x00\\x02\\x00\\x00\\x0022\\x02\\x00\\x00\\x00\\x08\\x00\\x00\\x00_CIPRank\\x02#\\x00\\x00\\x00\\x05\\x00\\x00\\x00myidx\\x00\\x02\\x00\\x00\\x0023\\x02\\x00\\x00\\x00\\x08\\x00\\x00\\x00_CIPRank\\x02\\x1e\\x00\\x00\\x00\\x05\\x00\\x00\\x00myidx\\x00\\x02\\x00\\x00\\x0024\\x04\\x00\\x00\\x00\\x08\\x00\\x00\\x00_CIPRank\\x02\\x14\\x00\\x00\\x00\\x12\\x00\\x00\\x00_ChiralityPossible\\x01\\x01\\x00\\x00\\x00\\x08\\x00\\x00\\x00_CIPCode\\x00\\x01\\x00\\x00\\x00R\\x05\\x00\\x00\\x00myidx\\x00\\x02\\x00\\x00\\x0025\\x02\\x00\\x00\\x00\\x08\\x00\\x00\\x00_CIPRank\\x02\\x06\\x00\\x00\\x00\\x05\\x00\\x00\\x00myidx\\x00\\x02\\x00\\x00\\x0026\\x02\\x00\\x00\\x00\\x08\\x00\\x00\\x00_CIPRank\\x02\\x03\\x00\\x00\\x00\\x05\\x00\\x00\\x00myidx\\x00\\x02\\x00\\x00\\x0027\\x02\\x00\\x00\\x00\\x08\\x00\\x00\\x00_CIPRank\\x02\\x05\\x00\\x00\\x00\\x05\\x00\\x00\\x00myidx\\x00\\x02\\x00\\x00\\x0028\\x02\\x00\\x00\\x00\\x08\\x00\\x00\\x00_CIPRank\\x02\\x10\\x00\\x00\\x00\\x05\\x00\\x00\\x00myidx\\x00\\x02\\x00\\x00\\x0029\\x02\\x00\\x00\\x00\\x08\\x00\\x00\\x00_CIPRank\\x02\\x0c\\x00\\x00\\x00\\x05\\x00\\x00\\x00myidx\\x00\\x02\\x00\\x00\\x0030\\x02\\x00\\x00\\x00\\x08\\x00\\x00\\x00_CIPRank\\x02\\t\\x00\\x00\\x00\\x05\\x00\\x00\\x00myidx\\x00\\x02\\x00\\x00\\x0031\\x02\\x00\\x00\\x00\\x08\\x00\\x00\\x00_CIPRank\\x02\\n\\x00\\x00\\x00\\x05\\x00\\x00\\x00myidx\\x00\\x02\\x00\\x00\\x0032\\x02\\x00\\x00\\x00\\x08\\x00\\x00\\x00_CIPRank\\x02\\r\\x00\\x00\\x00\\x05\\x00\\x00\\x00myidx\\x00\\x02\\x00\\x00\\x0033\\x02\\x00\\x00\\x00\\x08\\x00\\x00\\x00_CIPRank\\x02\\x11\\x00\\x00\\x00\\x05\\x00\\x00\\x00myidx\\x00\\x02\\x00\\x00\\x0034\\x02\\x00\\x00\\x00\\x08\\x00\\x00\\x00_CIPRank\\x02\\x0e\\x00\\x00\\x00\\x05\\x00\\x00\\x00myidx\\x00\\x02\\x00\\x00\\x0035\\x02\\x00\\x00\\x00\\x08\\x00\\x00\\x00_CIPRank\\x02\\x04\\x00\\x00\\x00\\x05\\x00\\x00\\x00myidx\\x00\\x02\\x00\\x00\\x0036\\x02\\x00\\x00\\x00\\x08\\x00\\x00\\x00_CIPRank\\x02\\x02\\x00\\x00\\x00\\x05\\x00\\x00\\x00myidx\\x00\\x02\\x00\\x00\\x0037\\x02\\x00\\x00\\x00\\x08\\x00\\x00\\x00_CIPRank\\x02\\x01\\x00\\x00\\x00\\x05\\x00\\x00\\x00myidx\\x00\\x02\\x00\\x00\\x0038\\x02\\x00\\x00\\x00\\x08\\x00\\x00\\x00_CIPRank\\x02\\x02\\x00\\x00\\x00\\x05\\x00\\x00\\x00myidx\\x00\\x02\\x00\\x00\\x0039\\x02\\x00\\x00\\x00\\x08\\x00\\x00\\x00_CIPRank\\x02\\x04\\x00\\x00\\x00\\x05\\x00\\x00\\x00myidx\\x00\\x02\\x00\\x00\\x0040\\x13\\x16\'\np1\ntp2\nRp3\n.' import pickle # bonds were broken in v1 m2 = pickle.loads(data.encode("utf-8"), encoding='bytes') self.assertEqual(m2.GetProp("foo"), "bar") for atom in m2.GetAtoms(): self.assertEqual(atom.GetProp("myidx"), str(atom.GetIdx())) self.assertEqual( Chem.MolToSmiles(m2, True), Chem.MolToSmiles( Chem.MolFromSmiles( "CN[C@@H](C)C(=O)N[C@H](C(=O)N1C[C@@H](Oc2ccccc2)C[C@H]1C(=O)N[C@@H]1CCCc2ccccc21)C1CCCCC1" ), True)) def testGithub1461(self): # this is simple, it should throw a precondition and not seg fault m = Chem.RWMol() try: m.AddBond(0, 1, Chem.BondType.SINGLE) self.assertFalse(True) # shouldn't get here except RuntimeError: pass def testMolBundles1(self): b = Chem.MolBundle() smis = ('CC(Cl)(F)CC(F)(Br)', 'C[C@](Cl)(F)C[C@H](F)(Br)', 'C[C@](Cl)(F)C[C@@H](F)(Br)') for smi in smis: b.AddMol(Chem.MolFromSmiles(smi)) self.assertEqual(len(b), 3) self.assertEqual(b.Size(), 3) self.assertRaises(IndexError, lambda: b[4]) self.assertEqual( Chem.MolToSmiles(b[1], isomericSmiles=True), Chem.MolToSmiles(Chem.MolFromSmiles(smis[1]), isomericSmiles=True)) self.assertTrue( b.HasSubstructMatch(Chem.MolFromSmiles('CC(Cl)(F)CC(F)(Br)'), useChirality=True)) self.assertTrue( b.HasSubstructMatch(Chem.MolFromSmiles('C[C@](Cl)(F)C[C@@H](F)(Br)'), useChirality=True)) self.assertTrue( b.HasSubstructMatch(Chem.MolFromSmiles('C[C@@](Cl)(F)C[C@@H](F)(Br)'), useChirality=False)) self.assertFalse( b.HasSubstructMatch(Chem.MolFromSmiles('C[C@@](Cl)(F)C[C@@H](F)(Br)'), useChirality=True)) self.assertEqual( len(b.GetSubstructMatch(Chem.MolFromSmiles('CC(Cl)(F)CC(F)(Br)'), useChirality=True)), 8) self.assertEqual( len(b.GetSubstructMatch(Chem.MolFromSmiles('C[C@](Cl)(F)C[C@@H](F)(Br)'), useChirality=True)), 8) self.assertEqual( len( b.GetSubstructMatch(Chem.MolFromSmiles('C[C@@](Cl)(F)C[C@@H](F)(Br)'), useChirality=False)), 8) self.assertEqual( len( b.GetSubstructMatch(Chem.MolFromSmiles('C[C@@](Cl)(F)C[C@@H](F)(Br)'), useChirality=True)), 0) self.assertEqual( len(b.GetSubstructMatches(Chem.MolFromSmiles('CC(Cl)(F)CC(F)(Br)'), useChirality=True)), 1) self.assertEqual( len( b.GetSubstructMatches(Chem.MolFromSmiles('C[C@](Cl)(F)C[C@@H](F)(Br)'), useChirality=True)), 1) self.assertEqual( len( b.GetSubstructMatches( Chem.MolFromSmiles('C[C@@](Cl)(F)C[C@@H](F)(Br)'), useChirality=False)), 1) self.assertEqual( len( b.GetSubstructMatches(Chem.MolFromSmiles('C[C@@](Cl)(F)C[C@@H](F)(Br)'), useChirality=True)), 0) self.assertEqual( len(b.GetSubstructMatches(Chem.MolFromSmiles('CC(Cl)(F)CC(F)(Br)'), useChirality=True)[0]), 8) self.assertEqual( len( b.GetSubstructMatches(Chem.MolFromSmiles('C[C@](Cl)(F)C[C@@H](F)(Br)'), useChirality=True)[0]), 8) self.assertEqual( len( b.GetSubstructMatches( Chem.MolFromSmiles('C[C@@](Cl)(F)C[C@@H](F)(Br)'), useChirality=False)[0]), 8) def testMolBundles2(self): b = Chem.MolBundle() smis = ('Fc1c(Cl)cccc1', 'Fc1cc(Cl)ccc1', 'Fc1ccc(Cl)cc1') for smi in smis: b.AddMol(Chem.MolFromSmiles(smi)) self.assertEqual(len(b), 3) self.assertEqual(b.Size(), 3) self.assertTrue(Chem.MolFromSmiles('Fc1c(Cl)cccc1').HasSubstructMatch(b)) self.assertTrue(Chem.MolFromSmiles('Fc1cc(Cl)ccc1').HasSubstructMatch(b)) self.assertTrue(Chem.MolFromSmiles('Fc1c(Cl)cccc1C').HasSubstructMatch(b)) self.assertTrue(Chem.MolFromSmiles('Fc1cc(Cl)ccc1C').HasSubstructMatch(b)) self.assertFalse(Chem.MolFromSmiles('Fc1c(Br)cccc1').HasSubstructMatch(b)) self.assertEqual(len(Chem.MolFromSmiles('Fc1c(Cl)cccc1').GetSubstructMatch(b)), 8) self.assertEqual(len(Chem.MolFromSmiles('Fc1c(Cl)cccc1').GetSubstructMatches(b)), 1) self.assertEqual(len(Chem.MolFromSmiles('Fc1c(Cl)cccc1').GetSubstructMatches(b)[0]), 8) self.assertEqual(len(Chem.MolFromSmiles('Fc1ccc(Cl)cc1').GetSubstructMatches(b)), 1) self.assertEqual( len(Chem.MolFromSmiles('Fc1ccc(Cl)cc1').GetSubstructMatches(b, uniquify=False)), 2) self.assertEqual(len(Chem.MolFromSmiles('Fc1c(C)cccc1').GetSubstructMatch(b)), 0) self.assertEqual(len(Chem.MolFromSmiles('Fc1c(C)cccc1').GetSubstructMatches(b)), 0) def testGithub1622(self): nonaromatics = ( "C1=C[N]C=C1", # radicals are not two electron donors "O=C1C=CNC=C1", # exocyclic double bonds don't steal electrons "C1=CS(=O)C=C1", # not sure how to classify this example from the # OEChem docs "C1#CC=CC=C1" # benzyne # 5-membered heterocycles "C1=COC=C1", # furan "C1=CSC=C1", # thiophene "C1=CNC=C1", #pyrrole "C1=COC=N1", # oxazole "C1=CSC=N1", # thiazole "C1=CNC=N1", # imidazole "C1=CNN=C1", # pyrazole "C1=CON=C1", # isoxazole "C1=CSN=C1", # isothiazole "C1=CON=N1", # 1,2,3-oxadiazole "C1=CNN=N1", # 1,2,3-triazole "N1=CSC=N1", # 1,3,4-thiadiazole # not outside the second rows "C1=CC=C[Si]=C1", "C1=CC=CC=P1", # 5-membered heterocycles outside the second row "C1=C[Se]C=C1", 'C1=C[Te]C=C1') for smi in nonaromatics: m = Chem.MolFromSmiles(smi, sanitize=False) Chem.SanitizeMol(m, Chem.SANITIZE_ALL ^ Chem.SANITIZE_SETAROMATICITY) Chem.SetAromaticity(m, Chem.AROMATICITY_MDL) self.assertFalse(m.GetAtomWithIdx(0).GetIsAromatic()) aromatics = ( "C1=CC=CC=C1", # benzene, of course # hetrocyclics "N1=CC=CC=C1", # pyridine "N1=CC=CC=N1", # pyridazine "N1=CC=CN=C1", # pyrimidine "N1=CC=NC=C1", # pyrazine "N1=CN=CN=C1", # 1,3,5-triazine # polycyclic aromatics "C1=CC2=CC=CC=CC2=C1", # azulene "C1=CC=CC2=CC=CC=C12", "C1=CC2=CC=CC=CC=C12", "C1=CC=C2C(=C1)N=CC=N2", "C1=CN=CC2C=CC=CC1=2", "C1=CC=C2C(=C1)N=C3C=CC=CC3=N2", "C1=CN=NC2C=CC=CC1=2", # macrocycle aromatics "C1=CC=CC=CC=CC=C1", "C1=CC=CC=CC=CC=CC=CC=CC=CC=C1", "N1=CN=NC=CC=CC=CC=CC=CC=CC=CC=CC=CC=CC=CC=CC=C1") for smi in aromatics: m = Chem.MolFromSmiles(smi, sanitize=False) Chem.SanitizeMol(m, Chem.SANITIZE_ALL ^ Chem.SANITIZE_SETAROMATICITY) Chem.SetAromaticity(m, Chem.AROMATICITY_MDL) self.assertTrue(m.GetAtomWithIdx(0).GetIsAromatic()) def testMolBlockChirality(self): m = Chem.MolFromSmiles('C[C@H](Cl)Br') mb = Chem.MolToMolBlock(m) m2 = Chem.MolFromMolBlock(mb) csmi1 = Chem.MolToSmiles(m, isomericSmiles=True) csmi2 = Chem.MolToSmiles(m2, isomericSmiles=True) self.assertEqual(csmi1, csmi2) def testIssue1735(self): # this shouldn't seg fault... m = Chem.RWMol() ranks = Chem.CanonicalRankAtoms(m, breakTies=False) ranks = Chem.CanonicalRankAtoms(m, breakTies=True) def testGithub1615(self): mb = """Issue399a.mol ChemDraw04050615582D 4 4 0 0 0 0 0 0 0 0999 V2000 -0.7697 0.0000 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 0.0553 0.0000 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 0.7697 0.4125 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 0.7697 -0.4125 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0 2 1 1 0 2 3 1 0 3 4 1 0 2 4 1 0 M END""" m = Chem.MolFromMolBlock(mb) self.assertFalse(m.GetAtomWithIdx(1).HasProp("_CIPCode")) self.assertEqual(m.GetBondWithIdx(0).GetBondDir(), Chem.BondDir.NONE) self.assertEqual(m.GetAtomWithIdx(1).GetChiralTag(), Chem.ChiralType.CHI_UNSPECIFIED) m.GetAtomWithIdx(1).SetChiralTag(Chem.ChiralType.CHI_TETRAHEDRAL_CW) Chem.AssignStereochemistry(m, force=True) self.assertTrue(m.GetAtomWithIdx(1).HasProp("_CIPCode")) self.assertEqual(m.GetAtomWithIdx(1).GetProp("_CIPCode"), "S") self.assertEqual(m.GetBondWithIdx(0).GetBondDir(), Chem.BondDir.NONE) Chem.WedgeBond(m.GetBondWithIdx(0), 1, m.GetConformer()) self.assertEqual(m.GetBondWithIdx(0).GetBondDir(), Chem.BondDir.BEGINWEDGE) def testSmilesToAtom(self): a = Chem.AtomFromSmiles("C") self.assertEqual(a.GetAtomicNum(), 6) b = Chem.BondFromSmiles("=") self.assertEqual(b.GetBondType(), Chem.BondType.DOUBLE) a = Chem.AtomFromSmiles("error") self.assertIs(a, None) b = Chem.BondFromSmiles("d") self.assertIs(b, None) a = Chem.AtomFromSmarts("C") self.assertEqual(a.GetAtomicNum(), 6) b = Chem.BondFromSmarts("=") self.assertEqual(b.GetBondType(), Chem.BondType.DOUBLE) a = Chem.AtomFromSmarts("error") self.assertIs(a, None) b = Chem.BondFromSmarts("d") self.assertIs(b, None) def testSVGParsing(self): svg = """<?xml version='1.0' encoding='iso-8859-1'?> <svg version='1.1' baseProfile='full' xmlns='http://www.w3.org/2000/svg' xmlns:rdkit='http://www.rdkit.org/xml' xmlns:xlink='http://www.w3.org/1999/xlink' xml:space='preserve' width='200px' height='200px' > <rect style='opacity:1.0;fill:#FFFFFF;stroke:none' width='200' height='200' x='0' y='0'> </rect> <path d='M 9.09091,89.4974 24.2916,84.7462' style='fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:2px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1' /> <path d='M 24.2916,84.7462 39.4923,79.9949' style='fill:none;fill-rule:evenodd;stroke:#0000FF;stroke-width:2px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1' /> <path d='M 86.2908,106.814 75.1709,93.4683 72.0765,96.8285 86.2908,106.814' style='fill:#000000;fill-rule:evenodd;stroke:#000000;stroke-width:2px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1' /> <path d='M 75.1709,93.4683 57.8622,86.8431 64.051,80.1229 75.1709,93.4683' style='fill:#0000FF;fill-rule:evenodd;stroke:#0000FF;stroke-width:2px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1' /> <path d='M 75.1709,93.4683 72.0765,96.8285 57.8622,86.8431 75.1709,93.4683' style='fill:#0000FF;fill-rule:evenodd;stroke:#0000FF;stroke-width:2px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1' /> <path d='M 86.2908,106.814 82.1459,125.293' style='fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:2px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1' /> <path d='M 82.1459,125.293 78.0009,143.772' style='fill:none;fill-rule:evenodd;stroke:#00CC00;stroke-width:2px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1' /> <path d='M 86.2908,106.814 129.89,93.1862' style='fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:2px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1' /> <path d='M 134.347,94.186 138.492,75.7069' style='fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:2px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1' /> <path d='M 138.492,75.7069 142.637,57.2277' style='fill:none;fill-rule:evenodd;stroke:#FF0000;stroke-width:2px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1' /> <path d='M 125.432,92.1865 129.577,73.7074' style='fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:2px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1' /> <path d='M 129.577,73.7074 133.722,55.2282' style='fill:none;fill-rule:evenodd;stroke:#FF0000;stroke-width:2px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1' /> <path d='M 129.89,93.1862 142.557,104.852' style='fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:2px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1' /> <path d='M 142.557,104.852 155.224,116.517' style='fill:none;fill-rule:evenodd;stroke:#FF0000;stroke-width:2px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1' /> <text x='39.4923' y='83.483' style='font-size:15px;font-style:normal;font-weight:normal;fill-opacity:1;stroke:none;font-family:sans-serif;text-anchor:start;fill:#0000FF' ><tspan>NH</tspan></text> <text x='67.6656' y='158.998' style='font-size:15px;font-style:normal;font-weight:normal;fill-opacity:1;stroke:none;font-family:sans-serif;text-anchor:start;fill:#00CC00' ><tspan>Cl</tspan></text> <text x='132.777' y='56.228' style='font-size:15px;font-style:normal;font-weight:normal;fill-opacity:1;stroke:none;font-family:sans-serif;text-anchor:start;fill:#FF0000' ><tspan>O</tspan></text> <text x='149.782' y='131.743' style='font-size:15px;font-style:normal;font-weight:normal;fill-opacity:1;stroke:none;font-family:sans-serif;text-anchor:start;fill:#FF0000' ><tspan>OH</tspan></text> <text x='89.9952' y='194' style='font-size:12px;font-style:normal;font-weight:normal;fill-opacity:1;stroke:none;font-family:sans-serif;text-anchor:start;fill:#000000' ><tspan>m1</tspan></text> <metadata> <rdkit:mol xmlns:rdkit = "http://www.rdkit.org/xml" version="0.9"> <rdkit:atom idx="1" atom-smiles="[CH3]" drawing-x="9.09091" drawing-y="89.4974" x="-2.78651" y="0.295614" z="0" /> <rdkit:atom idx="2" atom-smiles="[NH]" drawing-x="52.6897" drawing-y="75.8699" x="-1.35482" y="0.743114" z="0" /> <rdkit:atom idx="3" atom-smiles="[C@H]" drawing-x="86.2908" drawing-y="106.814" x="-0.251428" y="-0.273019" z="0" /> <rdkit:atom idx="4" atom-smiles="[Cl]" drawing-x="76.2932" drawing-y="151.385" x="-0.579728" y="-1.73665" z="0" /> <rdkit:atom idx="5" atom-smiles="[C]" drawing-x="129.89" drawing-y="93.1862" x="1.18027" y="0.174481" z="0" /> <rdkit:atom idx="6" atom-smiles="[O]" drawing-x="139.887" drawing-y="48.6148" x="1.50857" y="1.63811" z="0" /> <rdkit:atom idx="7" atom-smiles="[OH]" drawing-x="163.491" drawing-y="124.13" x="2.28366" y="-0.841652" z="0" /> <rdkit:bond idx="1" begin-atom-idx="1" end-atom-idx="2" bond-smiles="-" /> <rdkit:bond idx="2" begin-atom-idx="2" end-atom-idx="3" bond-smiles="-" /> <rdkit:bond idx="3" begin-atom-idx="3" end-atom-idx="4" bond-smiles="-" /> <rdkit:bond idx="4" begin-atom-idx="3" end-atom-idx="5" bond-smiles="-" /> <rdkit:bond idx="5" begin-atom-idx="5" end-atom-idx="6" bond-smiles="=" /> <rdkit:bond idx="6" begin-atom-idx="5" end-atom-idx="7" bond-smiles="-" /> </rdkit:mol></metadata> </svg>""" mol = Chem.MolFromRDKitSVG(svg) self.assertEqual(mol.GetNumAtoms(), 7) self.assertEqual(Chem.MolToSmiles(mol), 'CN[C@H](Cl)C(=O)O') svg2 = """<?xml version='1.0' encoding='iso-8859-1'?> <svg version='1.1' baseProfile='full' xmlns='http://www.w3.org/2000/svg' xmlns:rdkit='http://www.rdkit.org/xml' xmlns:xlink='http://www.w3.org/1999/xlink' xml:space='preserve' width='200px' height='200px' > <rect style='opacity:1.0;fill:#FFFFFF;stroke:none' width='200' height='200' x='0' y='0'> </rect> <path d='M 9.09091,89.4974 24.2916,84.7462' style='fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:2px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1' /> <path d='M 24.2916,84.7462 39.4923,79.9949' style='fill:none;fill-rule:evenodd;stroke:#0000FF;stroke-width:2px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1' /> <path d='M 86.2908,106.814 75.1709,93.4683 72.0765,96.8285 86.2908,106.814' style='fill:#000000;fill-rule:evenodd;stroke:#000000;stroke-width:2px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1' /> <path d='M 75.1709,93.4683 57.8622,86.8431 64.051,80.1229 75.1709,93.4683' style='fill:#0000FF;fill-rule:evenodd;stroke:#0000FF;stroke-width:2px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1' /> <path d='M 75.1709,93.4683 72.0765,96.8285 57.8622,86.8431 75.1709,93.4683' style='fill:#0000FF;fill-rule:evenodd;stroke:#0000FF;stroke-width:2px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1' /> <path d='M 86.2908,106.814 82.1459,125.293' style='fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:2px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1' /> <path d='M 82.1459,125.293 78.0009,143.772' style='fill:none;fill-rule:evenodd;stroke:#00CC00;stroke-width:2px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1' /> <path d='M 86.2908,106.814 129.89,93.1862' style='fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:2px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1' /> <path d='M 134.347,94.186 138.492,75.7069' style='fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:2px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1' /> <path d='M 138.492,75.7069 142.637,57.2277' style='fill:none;fill-rule:evenodd;stroke:#FF0000;stroke-width:2px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1' /> <path d='M 125.432,92.1865 129.577,73.7074' style='fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:2px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1' /> <path d='M 129.577,73.7074 133.722,55.2282' style='fill:none;fill-rule:evenodd;stroke:#FF0000;stroke-width:2px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1' /> <path d='M 129.89,93.1862 142.557,104.852' style='fill:none;fill-rule:evenodd;stroke:#000000;stroke-width:2px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1' /> <path d='M 142.557,104.852 155.224,116.517' style='fill:none;fill-rule:evenodd;stroke:#FF0000;stroke-width:2px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1' /> <text x='39.4923' y='83.483' style='font-size:15px;font-style:normal;font-weight:normal;fill-opacity:1;stroke:none;font-family:sans-serif;text-anchor:start;fill:#0000FF' ><tspan>NH</tspan></text> <text x='67.6656' y='158.998' style='font-size:15px;font-style:normal;font-weight:normal;fill-opacity:1;stroke:none;font-family:sans-serif;text-anchor:start;fill:#00CC00' ><tspan>Cl</tspan></text> <text x='132.777' y='56.228' style='font-size:15px;font-style:normal;font-weight:normal;fill-opacity:1;stroke:none;font-family:sans-serif;text-anchor:start;fill:#FF0000' ><tspan>O</tspan></text> <text x='149.782' y='131.743' style='font-size:15px;font-style:normal;font-weight:normal;fill-opacity:1;stroke:none;font-family:sans-serif;text-anchor:start;fill:#FF0000' ><tspan>OH</tspan></text> <text x='89.9952' y='194' style='font-size:12px;font-style:normal;font-weight:normal;fill-opacity:1;stroke:none;font-family:sans-serif;text-anchor:start;fill:#000000' ><tspan>m1</tspan></text> </svg>""" mol = Chem.MolFromRDKitSVG(svg2) self.assertTrue(mol is None) with self.assertRaises(RuntimeError): mol = Chem.MolFromRDKitSVG("bad svg") def testAssignChiralTypesFromBondDirs(self): """ Just check to see that AssignChiralTypesFromBondDirs is wrapped. Critical tests of the underlying C++ function already exist in SD file reader tests. """ mol = Chem.MolFromSmiles('C(F)(Cl)Br') rdkit.Chem.rdDepictor.Compute2DCoords(mol) atom0 = mol.GetAtomWithIdx(0) self.assertEqual(atom0.GetChiralTag(), Chem.rdchem.ChiralType.CHI_UNSPECIFIED) bond = mol.GetBondBetweenAtoms(0, 1) bond.SetBondDir(Chem.rdchem.BondDir.BEGINWEDGE) Chem.AssignChiralTypesFromBondDirs(mol) self.assertEqual(atom0.GetChiralTag(), Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CCW) def testAssignStereochemistryFrom3D(self): def _stereoTester(mol,expectedCIP,expectedStereo): mol.UpdatePropertyCache() self.assertEqual(mol.GetNumAtoms(),9) self.assertFalse(mol.GetAtomWithIdx(1).HasProp("_CIPCode")) self.assertEqual(mol.GetBondWithIdx(3).GetStereo(),Chem.BondStereo.STEREONONE) for bond in mol.GetBonds(): bond.SetBondDir(Chem.BondDir.NONE) Chem.AssignStereochemistryFrom3D(mol) self.assertTrue(mol.GetAtomWithIdx(1).HasProp("_CIPCode")) self.assertEqual(mol.GetAtomWithIdx(1).GetProp("_CIPCode"),expectedCIP) self.assertEqual(mol.GetBondWithIdx(3).GetStereo(),expectedStereo) fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'test_data', 'stereochem.sdf') suppl = Chem.SDMolSupplier(fileN, sanitize=False) expected = ( ("R",Chem.BondStereo.STEREOZ), ("R",Chem.BondStereo.STEREOE), ("S",Chem.BondStereo.STEREOZ), ("S",Chem.BondStereo.STEREOE), ) for i,mol in enumerate(suppl): cip,stereo = expected[i] _stereoTester(mol,cip,stereo) def testGitHub2082(self): ctab=""" MJ150720 9 9 0 0 0 0 0 0 0 0999 V2000 2.5687 -0.7144 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 2.1562 0.0000 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 2.5687 0.7144 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0 1.3312 0.0000 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 0.9187 -0.7144 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 0.0937 -0.7144 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 -0.3187 0.0000 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 0.0937 0.7144 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 0.9187 0.7144 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 2 1 1 6 2 3 1 0 2 4 1 0 4 5 2 0 5 6 1 0 6 7 2 0 7 8 1 0 8 9 2 0 9 4 1 0 M END """ mol = Chem.MolFromMolBlock(ctab) self.assertFalse(mol.GetConformer().Is3D()) self.assertTrue("@" in Chem.MolToSmiles(mol,True)) def testGitHub2082_2(self): # test a mol block that lies is 3D but labelled 2D ofile = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'Wrap', 'test_data', 'issue2082.mol') with open(ofile) as inf: ctab = inf.read() m = Chem.MolFromMolBlock(ctab) self.assertTrue(m.GetConformer().Is3D()) def testSetQuery(self): from rdkit.Chem import rdqueries pat = Chem.MolFromSmarts("[C]") self.assertFalse(Chem.MolFromSmiles("c1ccccc1").HasSubstructMatch(pat)) q = rdqueries.AtomNumEqualsQueryAtom(6) for atom in pat.GetAtoms(): atom.SetQuery(q) self.assertTrue(Chem.MolFromSmiles("c1ccccc1").HasSubstructMatch(pat)) def testGitHub1985(self): # simple check, this used to throw an exception try: Chem.MolToSmarts(Chem.MolFromSmarts("[C@]")) except: self.fail("[C@] caused an exception when roundtripping smarts") def testGetEnhancedStereo(self): rdbase = os.environ['RDBASE'] filename = os.path.join(rdbase, 'Code/GraphMol/FileParsers/test_data/two_centers_or.mol') m = Chem.MolFromMolFile(filename) sg = m.GetStereoGroups() self.assertEqual(len(sg), 2) group1 = sg[1] self.assertEqual(group1.GetGroupType(), Chem.StereoGroupType.STEREO_OR) stereo_atoms = group1.GetAtoms() self.assertEqual(len(stereo_atoms), 2) # file is 1 indexed and says 5 self.assertEqual(stereo_atoms[1].GetIdx(), 4) # make sure the atoms are connected to the parent molecule stereo_atoms[1].SetProp("foo","bar") self.assertTrue(m.GetAtomWithIdx(4).HasProp("foo")) # make sure that we can iterate over the atoms: for at in stereo_atoms: at.SetProp("foo2","bar2") self.assertTrue(m.GetAtomWithIdx(at.GetIdx()).HasProp("foo2")) def testEnhancedStereoPreservesMol(self): """ Check that the stereo group (and the atoms therein) preserve the lifetime of the associated mol. """ rdbase = os.environ['RDBASE'] filename = os.path.join(rdbase, 'Code/GraphMol/FileParsers/test_data/two_centers_or.mol') m = Chem.MolFromMolFile(filename) sg = m.GetStereoGroups() m = None gc.collect() self.assertEqual(len(sg), 2) group1 = sg[1] stereo_atoms = group1.GetAtoms() sg = None gc.collect() self.assertEqual(stereo_atoms[1].GetIdx(), 4) self.assertEqual(stereo_atoms[1].GetOwningMol().GetNumAtoms(),8) def testSetEnhancedStereoGroup(self): m = Chem.MolFromSmiles('F[C@@H](Br)[C@H](F)Cl |o1:1|') m2 = Chem.RWMol(m) groups = m2.GetStereoGroups() self.assertEqual(len(groups), 1) # Can clear the StereoGroups by setting to an empty list m2.SetStereoGroups([]) self.assertEqual(len(m2.GetStereoGroups()), 0) # Can add new StereoGroups group1 = Chem.rdchem.CreateStereoGroup(Chem.rdchem.StereoGroupType.STEREO_OR, m2, [1]) m2.SetStereoGroups([group1]) self.assertEqual(len(m2.GetStereoGroups()), 1) def testSetEnhancedStereoGroupOwnershipCheck(self): # make sure that the object returned by CreateStereoGroup() # preserves the owning molecule: m = Chem.RWMol(Chem.MolFromSmiles('F[C@@H](Br)[C@H](F)Cl')) group1 = Chem.rdchem.CreateStereoGroup(Chem.rdchem.StereoGroupType.STEREO_OR, m, [1]) m.SetStereoGroups([group1]) self.assertEqual(len(m.GetStereoGroups()), 1) m = None gc.collect() stereo_atoms = group1.GetAtoms() self.assertEqual(stereo_atoms[0].GetIdx(), 1) return self.assertEqual(stereo_atoms[0].GetOwningMol().GetNumAtoms(), 6) # make sure we can't add StereoGroups constructed from one molecule # to a different one: m2 = Chem.RWMol(Chem.MolFromSmiles('F[C@@H](Br)[C@H](F)Cl')) with self.assertRaises(ValueError): m2.SetStereoGroups([group1]) def testSetEnhancedStereoTypeChecking(self): m = Chem.RWMol(Chem.MolFromSmiles('F[C@@H](Br)[C@H](F)Cl')) # List or tuple should be allowed: group = Chem.rdchem.CreateStereoGroup(Chem.rdchem.StereoGroupType.STEREO_OR, m, [1, 3]) group = Chem.rdchem.CreateStereoGroup(Chem.rdchem.StereoGroupType.STEREO_OR, m, (1, 3)) # Python ValueError (range error) with index past the end with self.assertRaises(ValueError): group = Chem.rdchem.CreateStereoGroup(Chem.rdchem.StereoGroupType.STEREO_OR, m, [100]) # Mol is None with self.assertRaises(TypeError): group = Chem.rdchem.CreateStereoGroup(Chem.rdchem.StereoGroupType.STEREO_OR, None, [1]) # Atom indices must be numbers with self.assertRaises(TypeError): group = Chem.rdchem.CreateStereoGroup(Chem.rdchem.StereoGroupType.STEREO_OR, m, [1, 'text']) def testSubstructParameters(self): m = Chem.MolFromSmiles('C[C@](F)(Cl)OCC') p1 = Chem.MolFromSmiles('C[C@](F)(Cl)O') p2 = Chem.MolFromSmiles('C[C@@](F)(Cl)O') p3 = Chem.MolFromSmiles('CC(F)(Cl)O') ps = Chem.SubstructMatchParameters() self.assertTrue(m.HasSubstructMatch(p1,ps)) self.assertTrue(m.HasSubstructMatch(p2,ps)) self.assertTrue(m.HasSubstructMatch(p3,ps)) self.assertEqual(m.GetSubstructMatch(p1,ps),(0,1,2,3,4)) self.assertEqual(m.GetSubstructMatch(p2,ps),(0,1,2,3,4)) self.assertEqual(m.GetSubstructMatch(p3,ps),(0,1,2,3,4)) self.assertEqual(m.GetSubstructMatches(p1,ps),((0,1,2,3,4),)) self.assertEqual(m.GetSubstructMatches(p2,ps),((0,1,2,3,4),)) self.assertEqual(m.GetSubstructMatches(p3,ps),((0,1,2,3,4),)) ps.useChirality = True self.assertTrue(m.HasSubstructMatch(p1,ps)) self.assertFalse(m.HasSubstructMatch(p2,ps)) self.assertTrue(m.HasSubstructMatch(p3,ps)) self.assertEqual(m.GetSubstructMatch(p1,ps),(0,1,2,3,4)) self.assertEqual(m.GetSubstructMatch(p2,ps),()) self.assertEqual(m.GetSubstructMatch(p3,ps),(0,1,2,3,4)) self.assertEqual(m.GetSubstructMatches(p1,ps),((0,1,2,3,4),)) self.assertEqual(m.GetSubstructMatches(p2,ps),()) self.assertEqual(m.GetSubstructMatches(p3,ps),((0,1,2,3,4),)) def testSubstructParametersBundles(self): b = Chem.MolBundle() smis = ('C[C@](F)(Cl)O', 'C[C@](Br)(Cl)O', 'C[C@](I)(Cl)O') for smi in smis: b.AddMol(Chem.MolFromSmiles(smi)) self.assertEqual(len(b), 3) self.assertEqual(b.Size(), 3) ps = Chem.SubstructMatchParameters() ps.useChirality = True self.assertTrue(Chem.MolFromSmiles('C[C@](F)(Cl)OCC').HasSubstructMatch(b,ps)) self.assertFalse(Chem.MolFromSmiles('C[C@@](F)(Cl)OCC').HasSubstructMatch(b,ps)) self.assertTrue(Chem.MolFromSmiles('C[C@](I)(Cl)OCC').HasSubstructMatch(b,ps)) self.assertFalse(Chem.MolFromSmiles('C[C@@](I)(Cl)OCC').HasSubstructMatch(b,ps)) self.assertEqual(Chem.MolFromSmiles('C[C@](F)(Cl)OCC').GetSubstructMatch(b,ps),(0,1,2,3,4)) self.assertEqual(Chem.MolFromSmiles('C[C@@](F)(Cl)OCC').GetSubstructMatch(b,ps),()) self.assertEqual(Chem.MolFromSmiles('C[C@](I)(Cl)OCC').GetSubstructMatch(b,ps),(0,1,2,3,4)) self.assertEqual(Chem.MolFromSmiles('C[C@@](I)(Cl)OCC').GetSubstructMatch(b,ps),()) self.assertEqual(Chem.MolFromSmiles('C[C@](F)(Cl)OCC').GetSubstructMatches(b,ps),((0,1,2,3,4),)) self.assertEqual(Chem.MolFromSmiles('C[C@@](F)(Cl)OCC').GetSubstructMatches(b,ps),()) self.assertEqual(Chem.MolFromSmiles('C[C@](I)(Cl)OCC').GetSubstructMatches(b,ps),((0,1,2,3,4),)) self.assertEqual(Chem.MolFromSmiles('C[C@@](I)(Cl)OCC').GetSubstructMatches(b,ps),()) def testSubstructParametersBundles2(self): b = Chem.MolBundle() smis = ('C[C@](F)(Cl)O', 'C[C@](Br)(Cl)O', 'C[C@](I)(Cl)O') for smi in smis: b.AddMol(Chem.MolFromSmiles(smi)) self.assertEqual(len(b), 3) b2 = Chem.MolBundle() smis = ('C[C@@](F)(Cl)O', 'C[C@@](Br)(Cl)O', 'C[C@@](I)(Cl)O') for smi in smis: b2.AddMol(Chem.MolFromSmiles(smi)) self.assertEqual(len(b2), 3) ps = Chem.SubstructMatchParameters() ps.useChirality = True self.assertTrue(b.HasSubstructMatch(b,ps)) self.assertFalse(b.HasSubstructMatch(b2,ps)) self.assertFalse(b2.HasSubstructMatch(b,ps)) self.assertEqual(b.GetSubstructMatch(b,ps),(0,1,2,3,4)) self.assertEqual(b.GetSubstructMatch(b2,ps),()) self.assertEqual(b2.GetSubstructMatch(b,ps),()) self.assertEqual(b.GetSubstructMatches(b,ps),((0,1,2,3,4),)) self.assertEqual(b.GetSubstructMatches(b2,ps),()) self.assertEqual(b2.GetSubstructMatches(b,ps),()) def testGithub2285(self): fileN = os.path.join(RDConfig.RDBaseDir, 'Code', 'GraphMol', 'FileParsers', 'test_data', 'github2285.sdf') supp = Chem.ForwardSDMolSupplier(fileN, removeHs=False) if hasattr(supp, "__next__"): self.assertTrue(supp.__next__() is not None) else: self.assertTrue(supp.next() is not None) def testBitVectProp(self): bv = DataStructs.ExplicitBitVect(100) m = Chem.MolFromSmiles("CC") for atom in m.GetAtoms(): bv.SetBit(atom.GetIdx()) atom.SetExplicitBitVectProp("prop", bv) for atom in m.GetAtoms(): bv = atom.GetExplicitBitVectProp("prop") self.assertTrue(bv.GetBit(atom.GetIdx())) def testBitVectQuery(self): bv = DataStructs.ExplicitBitVect(4) bv.SetBit(0) bv.SetBit(2) # wow, what a mouthfull.. qa = rdqueries.HasBitVectPropWithValueQueryAtom("prop", bv, tolerance=0.0) m = Chem.MolFromSmiles("CC") for atom in m.GetAtoms(): if atom.GetIdx() == 0: atom.SetExplicitBitVectProp("prop", bv) l = tuple([x.GetIdx() for x in m.GetAtomsMatchingQuery(qa)]) self.assertEqual(l, (0,)) m = Chem.MolFromSmiles("CC") for atom in m.GetAtoms(): bv = DataStructs.ExplicitBitVect(4) bv.SetBit(atom.GetIdx()) atom.SetExplicitBitVectProp("prop", bv) sma = Chem.MolFromSmarts("C") for atom in sma.GetAtoms(): bv = DataStructs.ExplicitBitVect(4) bv.SetBit(1) qa = rdqueries.HasBitVectPropWithValueQueryAtom("prop", bv, tolerance=0.0) atom.ExpandQuery(qa) res = m.GetSubstructMatches(sma) self.assertEqual(res, ((1,),)) sma = Chem.MolFromSmarts("C") for atom in sma.GetAtoms(): bv = DataStructs.ExplicitBitVect(4) bv.SetBit(0) qa = rdqueries.HasBitVectPropWithValueQueryAtom("prop", bv, tolerance=0.0) atom.ExpandQuery(qa) res = m.GetSubstructMatches(sma) self.assertEqual(res, ((0,),)) sma = Chem.MolFromSmarts("C") for atom in sma.GetAtoms(): bv = DataStructs.ExplicitBitVect(4) bv.SetBit(0) qa = rdqueries.HasBitVectPropWithValueQueryAtom("prop", bv, tolerance=1.0) atom.ExpandQuery(qa) res = m.GetSubstructMatches(sma) self.assertEqual(res, ((0,),(1,))) def testGithub2441(self): m = Chem.MolFromSmiles("CC") conf = Chem.Conformer(2) m.AddConformer(conf,assignId=False) m.GetConformer().SetIntProp("foo",1) m.GetConformer().SetProp("bar","foo") self.assertTrue(m.GetConformer().HasProp("foo")) self.assertFalse(m.GetConformer().HasProp("food")) d = m.GetConformer().GetPropsAsDict() self.assertTrue('foo' in d) self.assertTrue('bar' in d) self.assertEqual(d['bar'],'foo') self.assertEqual(m.GetConformer().GetProp("bar"),"foo") self.assertEqual(m.GetConformer().GetIntProp("foo"),1) def testGithub2479(self): # Chemistry failure in last entry smi2='''c1ccccc duff c1ccccc1 ok c1ccncc1 pyridine C(C garbage C1CC1 ok2 C1C(Cl)C1 ok3 CC(C)(C)(C)C duff2 ''' suppl2 = Chem.SmilesMolSupplier() suppl2.SetData(smi2, titleLine=False, nameColumn=1) l = [x for x in suppl2] self.assertEqual(len(l),7) self.assertTrue(l[6] is None) # SMILES failure in last entry smi2='''c1ccccc duff c1ccccc1 ok c1ccncc1 pyridine C(C garbage C1CC1 ok2 C1C(Cl)C1 ok3 C1C(Cl)CCCC duff2 ''' suppl2 = Chem.SmilesMolSupplier() suppl2.SetData(smi2, titleLine=False, nameColumn=1) l = [x for x in suppl2] self.assertEqual(len(l),7) self.assertTrue(l[6] is None) sdf=b""" Mrv1810 06051911332D 3 2 0 0 0 0 999 V2000 -13.3985 4.9850 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 -12.7066 5.4343 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0 -12.0654 4.9151 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 1 2 1 0 0 0 0 2 3 1 0 0 0 0 M END $$$$ Mrv1810 06051911332D 3 2 0 0 0 0 999 V2000 -10.3083 4.8496 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 -9.6408 5.3345 0.0000 F 0 0 0 0 0 0 0 0 0 0 0 0 -9.0277 4.7825 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 1 2 1 0 0 0 0 2 3 1 0 0 0 0 M END $$$$ Mrv1810 06051911332D 3 2 0 0 0 0 999 V2000 -10.3083 4.8496 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 -9.6""" suppl3 = Chem.SDMolSupplier() suppl3.SetData(sdf) l = [x for x in suppl3] self.assertEqual(len(l),3) self.assertTrue(l[1] is None) self.assertTrue(l[2] is None) from io import BytesIO sio = BytesIO(sdf) suppl3 = Chem.ForwardSDMolSupplier(sio) l = [x for x in suppl3] self.assertEqual(len(l),3) self.assertTrue(l[1] is None) self.assertTrue(l[2] is None) sdf=b""" Mrv1810 06051911332D 3 2 0 0 0 0 999 V2000 -13.3985 4.9850 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 -12.7066 5.4343 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0 -12.0654 4.9151 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 1 2 1 0 0 0 0 2 3 1 0 0 0 0 M END > <pval> (1) [1,2,] $$$$ Mrv1810 06051911332D 3 2 0 0 0 0 999 V2000 -10.3083 4.8496 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 -9.6408 5.3345 0.0000 O 0 0 0 0 0 0 0 0 0 0 0 0 -9.0277 4.7825 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 1 2 1 0 0 0 0 2 3 1 0 0 0 0 M END > <pval> (1) [1,2,] """ suppl3 = Chem.SDMolSupplier() suppl3.SetData(sdf) l = [x for x in suppl3] self.assertEqual(len(l),2) self.assertTrue(l[0] is not None) self.assertTrue(l[1] is not None) from io import BytesIO sio = BytesIO(sdf) suppl3 = Chem.ForwardSDMolSupplier(sio) l = [x for x in suppl3] self.assertEqual(len(l),2) self.assertTrue(l[0] is not None) self.assertTrue(l[1] is not None) def testXYZ(self): conf = Chem.Conformer(5) conf.SetAtomPosition(0, [0.000, 0.000, 0.000]) conf.SetAtomPosition(1, [-0.635, -0.635, 0.635]) conf.SetAtomPosition(2, [-0.635, 0.635, -0.635]) conf.SetAtomPosition(3, [0.635, -0.635, -0.635]) conf.SetAtomPosition(4, [0.635, 0.635, 0.635]) emol = Chem.EditableMol(Chem.Mol()) for z in [6, 1, 1, 1, 1]: emol.AddAtom(Chem.Atom(z)) mol = emol.GetMol() mol.SetProp('_Name', 'methane\nthis part should not be output') mol.AddConformer(conf) xyzblock_expected = """5 methane C 0.000000 0.000000 0.000000 H -0.635000 -0.635000 0.635000 H -0.635000 0.635000 -0.635000 H 0.635000 -0.635000 -0.635000 H 0.635000 0.635000 0.635000 """ self.assertEqual(Chem.MolToXYZBlock(mol), xyzblock_expected) def testSanitizationExceptionBasics(self): try: Chem.SanitizeMol(Chem.MolFromSmiles('CFC',sanitize=False)) except Chem.AtomValenceException as exc: self.assertEqual(exc.cause.GetAtomIdx(),1) else: self.assertFalse(True) try: Chem.SanitizeMol(Chem.MolFromSmiles('c1cc1',sanitize=False)) except Chem.KekulizeException as exc: self.assertEqual(exc.cause.GetAtomIndices(),(0,1,2)) else: self.assertFalse(True) def testSanitizationExceptionHierarchy(self): with self.assertRaises(Chem.AtomValenceException): Chem.SanitizeMol(Chem.MolFromSmiles('CFC',sanitize=False)) with self.assertRaises(Chem.AtomSanitizeException): Chem.SanitizeMol(Chem.MolFromSmiles('CFC',sanitize=False)) with self.assertRaises(Chem.MolSanitizeException): Chem.SanitizeMol(Chem.MolFromSmiles('CFC',sanitize=False)) with self.assertRaises(ValueError): Chem.SanitizeMol(Chem.MolFromSmiles('CFC',sanitize=False)) with self.assertRaises(Chem.KekulizeException): Chem.SanitizeMol(Chem.MolFromSmiles('c1cc1',sanitize=False)) with self.assertRaises(Chem.MolSanitizeException): Chem.SanitizeMol(Chem.MolFromSmiles('c1cc1',sanitize=False)) with self.assertRaises(ValueError): Chem.SanitizeMol(Chem.MolFromSmiles('c1cc1', sanitize=False)) def testDetectChemistryProblems(self): m = Chem.MolFromSmiles('CFCc1cc1ClC',sanitize=False) ps = Chem.DetectChemistryProblems(m) self.assertEqual(len(ps),3) self.assertEqual([x.GetType() for x in ps],['AtomValenceException','AtomValenceException','KekulizeException']) self.assertEqual(ps[0].GetAtomIdx(),1) self.assertEqual(ps[1].GetAtomIdx(),6) self.assertEqual(ps[2].GetAtomIndices(),(3,4,5)) def testGithub2611(self): mol = Chem.MolFromSmiles('ONCS.ONCS') for atom in mol.GetAtoms(): atom.SetIsotope(atom.GetIdx()) order1 = list(Chem.CanonicalRankAtomsInFragment(mol, atomsToUse=range(0, 4), breakTies=False, includeIsotopes=True)) order2 = list(Chem.CanonicalRankAtomsInFragment(mol, atomsToUse=range(0, 8), breakTies=False, includeIsotopes=False)) self.assertNotEqual(order1[:4], order2[4:]) # ensure that the orders are ignored in the second batch self.assertEqual(order2[:4], order2[4:]) for smi in ['ONCS.ONCS', 'F[C@@H](Br)[C@H](F)Cl']: mol = Chem.MolFromSmiles(smi) for atom in mol.GetAtoms(): atom.SetIsotope(atom.GetIdx()) for iso,chiral in [(True,True),(True,False),(False,True), (False,False)]: order1 = list(Chem.CanonicalRankAtomsInFragment(mol, atomsToUse=range(0, mol.GetNumAtoms()), bondsToUse=range(0,mol.GetNumBonds()), breakTies=False, includeIsotopes=iso, includeChirality=chiral)) order2 = list(Chem.CanonicalRankAtomsInFragment(mol, atomsToUse=range(0, mol.GetNumAtoms()), bondsToUse=range(0,mol.GetNumBonds()), breakTies=True, includeIsotopes=iso, includeChirality=chiral)) order3 = list(Chem.CanonicalRankAtoms(mol, breakTies=False, includeIsotopes=iso, includeChirality=chiral)) order4 = list(Chem.CanonicalRankAtoms(mol, breakTies=True, includeIsotopes=iso, includeChirality=chiral)) self.assertEqual(order1,order3) self.assertEqual(order2,order4) def testSetBondStereoFromDirections(self): m1 = Chem.MolFromMolBlock(''' Mrv1810 10141909482D 4 3 0 0 0 0 999 V2000 3.3412 -2.9968 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 2.5162 -2.9968 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 2.1037 -3.7112 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 3.7537 -2.2823 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 1 2 2 0 0 0 0 2 3 1 0 0 0 0 1 4 1 0 0 0 0 M END ''', sanitize=False) self.assertEqual(m1.GetBondBetweenAtoms(0,1).GetBondType(),Chem.BondType.DOUBLE) self.assertEqual(m1.GetBondBetweenAtoms(0,1).GetStereo(),Chem.BondStereo.STEREONONE) Chem.SetBondStereoFromDirections(m1) self.assertEqual(m1.GetBondBetweenAtoms(0,1).GetStereo(),Chem.BondStereo.STEREOTRANS) m2 = Chem.MolFromMolBlock(''' Mrv1810 10141909542D 4 3 0 0 0 0 999 V2000 3.4745 -5.2424 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 2.6495 -5.2424 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 2.2370 -5.9569 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 3.8870 -5.9569 0.0000 C 0 0 0 0 0 0 0 0 0 0 0 0 1 2 2 0 0 0 0 2 3 1 0 0 0 0 1 4 1 0 0 0 0 M END ''',sanitize=False) self.assertEqual(m2.GetBondBetweenAtoms(0,1).GetBondType(),Chem.BondType.DOUBLE) self.assertEqual(m2.GetBondBetweenAtoms(0,1).GetStereo(),Chem.BondStereo.STEREONONE) Chem.SetBondStereoFromDirections(m2) self.assertEqual(m2.GetBondBetweenAtoms(0,1).GetStereo(),Chem.BondStereo.STEREOCIS) def testSetBondDirFromStereo(self): m1 = Chem.MolFromSmiles('CC=CC') m1.GetBondWithIdx(1).SetStereoAtoms(0,3) m1.GetBondWithIdx(1).SetStereo(Chem.BondStereo.STEREOCIS) Chem.SetDoubleBondNeighborDirections(m1) self.assertEqual(Chem.MolToSmiles(m1),r"C/C=C\C") self.assertEqual(m1.GetBondWithIdx(0).GetBondDir(),Chem.BondDir.ENDUPRIGHT) self.assertEqual(m1.GetBondWithIdx(2).GetBondDir(),Chem.BondDir.ENDDOWNRIGHT) if __name__ == '__main__': if "RDTESTCASE" in os.environ: suite = unittest.TestSuite() testcases = os.environ["RDTESTCASE"] for name in testcases.split(':'): suite.addTest(TestCase(name)) runner = unittest.TextTestRunner() runner.run(suite) else: unittest.main()
1
20,173
it's not wrong, but you don't technical need `rdmolops` here since it's imported as part of `Chem`
rdkit-rdkit
cpp
@@ -534,6 +534,7 @@ class InfluxListenStore(ListenStore): pxz.stdin.close() + pxz.wait() self.log.info('ListenBrainz listen dump done!') self.log.info('Dump present at %s!', archive_path) return archive_path
1
# coding=utf-8 import listenbrainz.db.user as db_user import os.path import subprocess import tarfile import tempfile import time import shutil import ujson import uuid from brainzutils import cache from collections import defaultdict from datetime import datetime from influxdb import InfluxDBClient from influxdb.exceptions import InfluxDBClientError, InfluxDBServerError from listenbrainz import DUMP_LICENSE_FILE_PATH from listenbrainz.db import DUMP_DEFAULT_THREAD_COUNT from listenbrainz.db.dump import SchemaMismatchException from listenbrainz.listen import Listen from listenbrainz.listenstore import ListenStore from listenbrainz.listenstore import ORDER_ASC, ORDER_TEXT, \ USER_CACHE_TIME, REDIS_USER_TIMESTAMPS, LISTENS_DUMP_SCHEMA_VERSION from listenbrainz.utils import quote, get_escaped_measurement_name, get_measurement_name, get_influx_query_timestamp, \ convert_influx_nano_to_python_time, convert_python_time_to_nano_int, convert_to_unix_timestamp, \ create_path, log_ioerrors, init_cache REDIS_INFLUX_USER_LISTEN_COUNT = "ls.listencount." # append username COUNT_RETENTION_POLICY = "one_week" COUNT_MEASUREMENT_NAME = "listen_count" TEMP_COUNT_MEASUREMENT = COUNT_RETENTION_POLICY + "." + COUNT_MEASUREMENT_NAME TIMELINE_COUNT_MEASUREMENT = COUNT_MEASUREMENT_NAME DUMP_CHUNK_SIZE = 100000 NUMBER_OF_USERS_PER_DIRECTORY = 1000 DUMP_FILE_SIZE_LIMIT = 1024 * 1024 * 1024 # 1 GB class InfluxListenStore(ListenStore): REDIS_INFLUX_TOTAL_LISTEN_COUNT = "ls.listencount.total" TOTAL_LISTEN_COUNT_CACHE_TIME = 5 * 60 USER_LISTEN_COUNT_CACHE_TIME = 10 * 60 # in seconds. 15 minutes def __init__(self, conf): ListenStore.__init__(self, conf) self.influx = InfluxDBClient(host=conf['INFLUX_HOST'], port=conf['INFLUX_PORT'], database=conf['INFLUX_DB_NAME']) # Initialize brainzutils cache init_cache(host=conf['REDIS_HOST'], port=conf['REDIS_PORT'], namespace=conf['REDIS_NAMESPACE']) def get_listen_count_for_user(self, user_name, need_exact=False): """Get the total number of listens for a user. The number of listens comes from brainzutils cache unless an exact number is asked for. Args: user_name: the user to get listens for need_exact: if True, get an exact number of listens directly from the ListenStore """ if not need_exact: # check if the user's listen count is already in cache # if already present return it directly instead of calculating it again # decode is set to False as we have not encoded the value when we set it # in brainzutils cache as we need to call increment operation which requires # an integer value user_key = '{}{}'.format(REDIS_INFLUX_USER_LISTEN_COUNT, user_name) count = cache.get(user_key, decode=False) if count: return int(count) try: results = self.influx.query('SELECT count(*) FROM ' + get_escaped_measurement_name(user_name)) except (InfluxDBServerError, InfluxDBClientError) as e: self.log.error("Cannot query influx: %s" % str(e)) raise # get the number of listens from the json try: count = results.get_points(measurement = get_measurement_name(user_name)).__next__()['count_recording_msid'] except (KeyError, StopIteration): count = 0 # put this value into brainzutils cache with an expiry time user_key = "{}{}".format(REDIS_INFLUX_USER_LISTEN_COUNT, user_name) cache.set(user_key, int(count), InfluxListenStore.USER_LISTEN_COUNT_CACHE_TIME, encode=False) return int(count) def reset_listen_count(self, user_name): """ Reset the listen count of a user from cache and put in a new calculated value. Args: user_name: the musicbrainz id of user whose listen count needs to be reset """ self.get_listen_count_for_user(user_name, need_exact=True) def _select_single_value(self, query, measurement): try: results = self.influx.query(query) except Exception as err: self.log.error("Cannot query influx: %s" % str(err)) raise for result in results.get_points(measurement=measurement): return result['time'] return None def _select_single_timestamp(self, query, measurement): try: results = self.influx.query(query) except Exception as err: self.log.error("Cannot query influx: %s" % str(err)) raise for result in results.get_points(measurement=measurement): dt = datetime.strptime(result['time'], "%Y-%m-%dT%H:%M:%SZ") return int(dt.strftime('%s')) return None def get_total_listen_count(self, cache_value=True): """ Returns the total number of listens stored in the ListenStore. First checks the brainzutils cache for the value, if not present there makes a query to the db and caches it in brainzutils cache. """ if cache_value: count = cache.get(InfluxListenStore.REDIS_INFLUX_TOTAL_LISTEN_COUNT, decode=False) if count: return int(count) try: result = self.influx.query("""SELECT %s FROM "%s" ORDER BY time DESC LIMIT 1""" % (COUNT_MEASUREMENT_NAME, TIMELINE_COUNT_MEASUREMENT)) except (InfluxDBServerError, InfluxDBClientError) as err: self.log.error("Cannot query influx: %s" % str(err)) raise try: item = result.get_points(measurement=TIMELINE_COUNT_MEASUREMENT).__next__() count = int(item[COUNT_MEASUREMENT_NAME]) timestamp = convert_to_unix_timestamp(item['time']) except (KeyError, ValueError, StopIteration): timestamp = 0 count = 0 # Now sum counts that have been added in the interval we're interested in try: result = self.influx.query("""SELECT sum(%s) as total FROM "%s" WHERE time > %s""" % (COUNT_MEASUREMENT_NAME, TEMP_COUNT_MEASUREMENT, get_influx_query_timestamp(timestamp))) except (InfluxDBServerError, InfluxDBClientError) as err: self.log.error("Cannot query influx: %s" % str(err)) raise try: data = result.get_points(measurement=TEMP_COUNT_MEASUREMENT).__next__() count += int(data['total']) except StopIteration: pass if cache_value: cache.set( InfluxListenStore.REDIS_INFLUX_TOTAL_LISTEN_COUNT, int(count), InfluxListenStore.TOTAL_LISTEN_COUNT_CACHE_TIME, encode=False, ) return count def get_timestamps_for_user(self, user_name): """ Return the max_ts and min_ts for a given user and cache the result in brainzutils cache """ tss = cache.get(REDIS_USER_TIMESTAMPS % user_name) if tss: (min_ts, max_ts) = tss.split(",") min_ts = int(min_ts) max_ts = int(max_ts) else: query = 'SELECT first(artist_msid) FROM ' + get_escaped_measurement_name(user_name) min_ts = self._select_single_timestamp(query, get_measurement_name(user_name)) query = 'SELECT last(artist_msid) FROM ' + get_escaped_measurement_name(user_name) max_ts = self._select_single_timestamp(query, get_measurement_name(user_name)) cache.set(REDIS_USER_TIMESTAMPS % user_name, "%d,%d" % (min_ts, max_ts), USER_CACHE_TIME) return min_ts, max_ts def insert(self, listens): """ Insert a batch of listens. """ submit = [] user_names = {} for listen in listens: user_names[listen.user_name] = 1 submit.append(listen.to_influx(quote(listen.user_name))) if not self.influx.write_points(submit, time_precision='s'): self.log.error("Cannot write data to influx. (write_points returned False)") # If we reach this point, we were able to write the listens to the InfluxListenStore. # So update the listen counts of the users cached in brainzutils cache. for data in submit: user_key = "{}{}".format(REDIS_INFLUX_USER_LISTEN_COUNT, data['fields']['user_name']) cached_count = cache.get(user_key, decode=False) if cached_count: cache.increment(user_key) # Invalidate cached data for user for user_name in user_names.keys(): cache.delete(REDIS_USER_TIMESTAMPS % user_name) if len(listens): # Enter a measurement to count items inserted submit = [{ 'measurement': TEMP_COUNT_MEASUREMENT, 'tags': { COUNT_MEASUREMENT_NAME: len(listens) }, 'fields': { COUNT_MEASUREMENT_NAME: len(listens) } }] try: if not self.influx.write_points(submit): self.log.error("Cannot write listen cound to influx. (write_points returned False)") except (InfluxDBServerError, InfluxDBClientError, ValueError) as err: self.log.error("Cannot write data to influx: %s" % str(err)) raise def update_listen_counts(self): """ This should be called every few seconds in order to sum up all of the listen counts in influx and write them to a single figure """ # To update the current listen total, find when we last updated the timeline. try: result = self.influx.query("""SELECT %s FROM "%s" ORDER BY time DESC LIMIT 1""" % (COUNT_MEASUREMENT_NAME, TIMELINE_COUNT_MEASUREMENT)) except (InfluxDBServerError, InfluxDBClientError) as err: self.log.error("Cannot query influx: %s" % str(err)) raise try: item = result.get_points(measurement=TIMELINE_COUNT_MEASUREMENT).__next__() total = int(item[COUNT_MEASUREMENT_NAME]) start_timestamp = convert_influx_nano_to_python_time(item['time']) except (KeyError, ValueError, StopIteration): total = 0 start_timestamp = 0 # Next, find the timestamp of the latest and greatest temp counts try: result = self.influx.query("""SELECT %s FROM "%s" ORDER BY time DESC LIMIT 1""" % (COUNT_MEASUREMENT_NAME, TEMP_COUNT_MEASUREMENT)) except (InfluxDBServerError, InfluxDBClientError) as err: self.log.error("Cannot query influx: %s" % str(err)) raise try: item = result.get_points(measurement=TEMP_COUNT_MEASUREMENT).__next__() end_timestamp = convert_influx_nano_to_python_time(item['time']) except (KeyError, StopIteration): end_timestamp = start_timestamp # Now sum counts that have been added in the interval we're interested in try: result = self.influx.query("""SELECT sum(%s) as total FROM "%s" WHERE time > %d and time <= %d""" % (COUNT_MEASUREMENT_NAME, TEMP_COUNT_MEASUREMENT, convert_python_time_to_nano_int(start_timestamp), convert_python_time_to_nano_int(end_timestamp))) except (InfluxDBServerError, InfluxDBClientError) as err: self.log.error("Cannot query influx: %s" % str(err)) raise try: data = result.get_points(measurement=TEMP_COUNT_MEASUREMENT).__next__() total += int(data['total']) except StopIteration: # This means we have no item_counts to update, so bail. return # Finally write a new total with the timestamp of the last point submit = [{ 'measurement': TIMELINE_COUNT_MEASUREMENT, 'time': end_timestamp, 'tags': { COUNT_MEASUREMENT_NAME: total }, 'fields': { COUNT_MEASUREMENT_NAME: total } }] try: if not self.influx.write_points(submit): self.log.error("Cannot write data to influx. (write_points returned False)") except (InfluxDBServerError, InfluxDBClientError, ValueError) as err: self.log.error("Cannot update listen counts in influx: %s" % str(err)) raise def fetch_listens_from_storage(self, user_name, from_ts, to_ts, limit, order): """ The timestamps are stored as UTC in the postgres datebase while on retrieving the value they are converted to the local server's timezone. So to compare datetime object we need to create a object in the same timezone as the server. from_ts: seconds since epoch, in float to_ts: seconds since epoch, in float """ # Quote single quote characters which could be used to mount an injection attack. # Sadly, influxdb does not provide a means to do this in the client library query = 'SELECT * FROM ' + get_escaped_measurement_name(user_name) if from_ts is not None: query += "WHERE time > " + get_influx_query_timestamp(from_ts) else: query += "WHERE time < " + get_influx_query_timestamp(to_ts) query += " ORDER BY time " + ORDER_TEXT[order] + " LIMIT " + str(limit) try: results = self.influx.query(query) except Exception as err: self.log.error("Cannot query influx: %s" % str(err)) return [] listens = [] for result in results.get_points(measurement=get_measurement_name(user_name)): listens.append(Listen.from_influx(result)) if order == ORDER_ASC: listens.reverse() return listens def dump_user(self, username, fileobj, dump_time): """ Dump specified user's listens into specified file object. Args: username (str): the MusicBrainz ID of the user whose listens are to be dumped fileobj (file): the file into which listens should be written dump_time (datetime): the time at which the specific data dump was initiated Returns: int: the number of bytes this user's listens take in the dump file """ t0 = time.time() offset = 0 bytes_written = 0 listen_count = 0 # Get this user's listens in chunks while True: # loop until we get this chunk of listens while True: try: result = self.influx.query(""" SELECT * FROM {measurement} WHERE time <= {timestamp} ORDER BY time DESC LIMIT {limit} OFFSET {offset} """.format( measurement=get_escaped_measurement_name(username), timestamp=get_influx_query_timestamp(dump_time.strftime('%s')), limit=DUMP_CHUNK_SIZE, offset=offset, )) break except Exception as e: self.log.error('Error while getting listens for user %s', user['musicbrainz_id']) self.log.error(str(e)) time.sleep(3) rows_added = 0 for row in result.get_points(get_measurement_name(username)): listen = Listen.from_influx(row).to_api() try: bytes_written += fileobj.write(ujson.dumps(listen)) bytes_written += fileobj.write('\n') rows_added += 1 except IOError as e: log_ioerrors(self.log, e) raise except Exception as e: self.log.error('Exception while creating json for user: %s', user['musicbrainz_id']) self.log.error(str(e)) raise listen_count += rows_added if not rows_added: break offset += DUMP_CHUNK_SIZE time_taken = time.time() - t0 self.log.info('Listens for user %s dumped, total %d listens written at %.2f listens / sec!', username, listen_count, listen_count / time_taken) # the size for this user should not include the last newline we wrote # hence return bytes_written - 1 as the size in the dump for this user return bytes_written - 1 def dump_listens(self, location, dump_time=datetime.today(), threads=DUMP_DEFAULT_THREAD_COUNT): """ Dumps all listens in the ListenStore into a .tar.xz archive. Files are created with UUIDs as names. Each file can contain listens for a number of users. An index.json file is used to save which file contains the listens of which users. Args: location: the directory where the listens dump archive should be created dump_time (datetime): the time at which the data dump was started threads (int): the number of threads to user for compression Returns: the path to the dump archive """ self.log.info('Beginning dump of listens from InfluxDB...') self.log.info('Getting list of users whose listens are to be dumped...') users = db_user.get_all_users(columns=['id', 'musicbrainz_id']) self.log.info('Total number of users: %d', len(users)) archive_name = 'listenbrainz-listens-dump-{time}'.format(time=dump_time.strftime('%Y%m%d-%H%M%S')) archive_path = os.path.join(location, '{filename}.tar.xz'.format(filename=archive_name)) with open(archive_path, 'w') as archive: pxz_command = ['pxz', '--compress', '-T{threads}'.format(threads=threads)] pxz = subprocess.Popen(pxz_command, stdin=subprocess.PIPE, stdout=archive) with tarfile.open(fileobj=pxz.stdin, mode='w|') as tar: temp_dir = tempfile.mkdtemp() try: # add timestamp timestamp_path = os.path.join(temp_dir, 'TIMESTAMP') with open(timestamp_path, 'w') as f: f.write(dump_time.isoformat(' ')) tar.add(timestamp_path, arcname=os.path.join(archive_name, 'TIMESTAMP')) # add schema version schema_version_path = os.path.join(temp_dir, 'SCHEMA_SEQUENCE') with open(schema_version_path, 'w') as f: f.write(str(LISTENS_DUMP_SCHEMA_VERSION)) tar.add(schema_version_path, arcname=os.path.join(archive_name, 'SCHEMA_SEQUENCE')) # add copyright notice tar.add(DUMP_LICENSE_FILE_PATH, arcname=os.path.join(archive_name, 'COPYING')) except IOError as e: log_ioerrors(self.log, e) raise except Exception as e: self.log.error('Exception while adding dump metadata: %s', str(e)) raise listens_path = os.path.join(temp_dir, 'listens') dump_complete = False next_user_id = 0 index = {} while not dump_complete: file_name = str(uuid.uuid4()) # directory structure of the form "/%s/%02s/%s.listens" % (uuid[0], uuid[0:2], uuid) directory = os.path.join(listens_path, file_name[0], file_name[0:2]) create_path(directory) file_path = os.path.join(directory, '{uuid}.listens'.format(uuid=file_name)) with open(file_path, 'w') as f: file_done = False while next_user_id < len(users): if f.tell() > DUMP_FILE_SIZE_LIMIT: file_done = True break username = users[next_user_id]['musicbrainz_id'] offset = f.tell() size = self.dump_user(username=username, fileobj=f, dump_time=dump_time) index[username] = { 'file_name': file_name, 'offset': offset, 'size': size, } next_user_id += 1 if file_done: continue if next_user_id == len(users): dump_complete = True break # add the listens directory to the archive self.log.info('Got all listens, adding them to the archive...') tar.add(listens_path, arcname=os.path.join(archive_name, 'listens')) # add index.json file to the archive try: index_path = os.path.join(temp_dir, 'index.json') with open(index_path, 'w') as f: f.write(ujson.dumps(index)) tar.add(index_path, arcname=os.path.join(archive_name, 'index.json')) except IOError as e: log_ioerrors(self.log, e) raise except Exception as e: self.log.error('Exception while adding index file to archive: %s', str(e)) raise # remove the temporary directory shutil.rmtree(temp_dir) pxz.stdin.close() self.log.info('ListenBrainz listen dump done!') self.log.info('Dump present at %s!', archive_path) return archive_path def import_listens_dump(self, archive_path, threads=DUMP_DEFAULT_THREAD_COUNT): """ Imports listens into InfluxDB from a ListenBrainz listens dump .tar.xz archive. Args: archive (str): the path to the listens dump .tar.xz archive to be imported threads (int): the number of threads to be used for decompression (defaults to DUMP_DEFAULT_THREAD_COUNT) Returns: int: the number of users for whom listens have been imported """ self.log.info('Beginning import of listens from dump %s...', archive_path) # construct the pxz command to decompress the archive pxz_command = ['pxz', '--decompress', '--stdout', archive_path, '-T{threads}'.format(threads=threads)] # run the command once to ensure schema version is correct # and load the index pxz = subprocess.Popen(pxz_command, stdout=subprocess.PIPE) index = None with tarfile.open(fileobj=pxz.stdout, mode='r|') as tar: schema_check_done = False index_loaded = False for member in tar: file_name = member.name.split('/')[-1] if file_name == 'SCHEMA_SEQUENCE': self.log.info('Checking if schema version of dump matches...') schema_seq = int(tar.extractfile(member).read().strip()) if schema_seq != LISTENS_DUMP_SCHEMA_VERSION: raise SchemaMismatchException('Incorrect schema version! Expected: %d, got: %d.' 'Please ensure that the data dump version matches the code version' 'in order to import the data.' % (LISTENS_DUMP_SCHEMA_VERSION, schema_seq)) schema_check_done = True elif file_name == 'index.json': with tar.extractfile(member) as f: index = ujson.load(f) index_loaded = True if schema_check_done and index_loaded: self.log.info('Schema version matched and index.json loaded!') self.log.info('Starting import of listens...') break else: raise SchemaMismatchException('Metadata files missing in dump, please ensure that the dump file is valid.') # close pxz command and start over again, this time with the aim of importing all listens pxz.stdout.close() file_contents = defaultdict(list) for user, info in index.items(): file_contents[info['file_name']].append({ 'user_name': user, 'offset': info['offset'], 'size': info['size'], }) for file_name in file_contents: file_contents[file_name] = sorted(file_contents[file_name], key=lambda x: x['offset']) pxz = subprocess.Popen(pxz_command, stdout=subprocess.PIPE) users_done = 0 with tarfile.open(fileobj=pxz.stdout, mode='r|') as tar: for member in tar: file_name = member.name.split('/')[-1] if file_name.endswith('.listens'): file_name = file_name[:-8] with tar.extractfile(member) as f: for user in file_contents[file_name]: self.log.info('Importing user %s...', user['user_name']) assert(f.tell() == user['offset']) bytes_read = 0 listens = [] while bytes_read < user['size']: line = f.readline() bytes_read += len(line) listen = Listen.from_json(ujson.loads(line)).to_influx(quote(user['user_name'])) listens.append(listen) if len(listens) > DUMP_CHUNK_SIZE: self.write_points_to_db(listens) listens = [] if len(listens) > 0: self.write_points_to_db(listens) self.log.info('Import of user %s done!', user['user_name']) users_done += 1 self.log.info('Import of listens from dump %s done!', archive_path) pxz.stdout.close() return users_done def write_points_to_db(self, points): """ Write the given data to InfluxDB. This function sleeps for 3 seconds and tries again if the write fails. Args: points: a list containing dicts in the form taken by influx python bindings """ while not self.influx.write_points(points, time_precision='s'): self.log.error('Error while writing listens to influx, ' 'write_points returned False') time.sleep(3)
1
14,856
I think the absence of this might have been the cause of the dump file corruption. We didn't wait for the pxz command to exit, leading to a race condition between the cp and this, leading to corrupted files in some places. I came across this because the hashes created and printed were different from the hashes of the actual files in dev also.
metabrainz-listenbrainz-server
py
@@ -27,7 +27,7 @@ type blockchainConfig struct { } type LoggingConfig struct { - Level logging.Level `required:"true" default:"debug"` + Level *logging.Level `required:"true" default:"info"` } func NewConfig(path string) (*Config, error) {
1
package dwh import ( "github.com/jinzhu/configor" "github.com/pkg/errors" "github.com/sonm-io/core/accounts" "github.com/sonm-io/core/insonmnia/logging" ) type Config struct { Logging LoggingConfig `yaml:"logging"` GRPCListenAddr string `yaml:"grpc_address" default:"127.0.0.1:15021"` HTTPListenAddr string `yaml:"http_address" default:"127.0.0.1:15022"` Eth accounts.EthConfig `yaml:"ethereum" required:"true"` Storage *storageConfig `yaml:"storage" required:"true"` Blockchain *blockchainConfig `yaml:"blockchain"` MetricsListenAddr string `yaml:"metrics_listen_addr" default:"127.0.0.1:14004"` } type storageConfig struct { Backend string `required:"true" yaml:"driver"` Endpoint string `required:"true" yaml:"endpoint"` } type blockchainConfig struct { EthEndpoint string `required:"true" yaml:"eth_endpoint"` } type LoggingConfig struct { Level logging.Level `required:"true" default:"debug"` } func NewConfig(path string) (*Config, error) { cfg := &Config{} err := configor.Load(cfg, path) if err != nil { return nil, err } if _, ok := setupDBCallbacks[cfg.Storage.Backend]; !ok { return nil, errors.Errorf("backend `%s` is not supported", cfg.Storage.Backend) } return cfg, nil }
1
6,847
Out of the scope.
sonm-io-core
go
@@ -1,11 +1,11 @@ describe('ColumnSorting', () => { - var id = 'testContainer'; + const id = 'testContainer'; beforeEach(function() { this.$container = $(`<div id="${id}" style="overflow: auto; width: 300px; height: 200px;"></div>`).appendTo('body'); - this.sortByColumn = function(columnIndex) { - var element = this.$container.find(`th span.columnSorting:eq(${columnIndex})`); + this.sort = function(columnIndex) { + const element = this.$container.find(`th span.columnSorting:eq(${columnIndex})`); element.simulate('mousedown'); element.simulate('mouseup');
1
describe('ColumnSorting', () => { var id = 'testContainer'; beforeEach(function() { this.$container = $(`<div id="${id}" style="overflow: auto; width: 300px; height: 200px;"></div>`).appendTo('body'); this.sortByColumn = function(columnIndex) { var element = this.$container.find(`th span.columnSorting:eq(${columnIndex})`); element.simulate('mousedown'); element.simulate('mouseup'); }; }); afterEach(function() { if (this.$container) { destroy(); this.$container.remove(); } }); var arrayOfObjects = function() { return [ {id: 1, name: 'Ted', lastName: 'Right'}, {id: 2, name: 'Frank', lastName: 'Honest'}, {id: 3, name: 'Joan', lastName: 'Well'}, {id: 4, name: 'Sid', lastName: 'Strong'}, {id: 5, name: 'Jane', lastName: 'Neat'}, {id: 6, name: 'Chuck', lastName: 'Jackson'}, {id: 7, name: 'Meg', lastName: 'Jansen'}, {id: 8, name: 'Rob', lastName: 'Norris'}, {id: 9, name: 'Sean', lastName: 'O\'Hara'}, {id: 10, name: 'Eve', lastName: 'Branson'} ]; }; it('should sort table by first visible column', function() { var hot = handsontable({ data: [ [1, 9, 3, 4, 5, 6, 7, 8, 9], [9, 8, 7, 6, 5, 4, 3, 2, 1], [8, 7, 6, 5, 4, 3, 3, 1, 9], [0, 3, 0, 5, 6, 7, 8, 9, 1] ], colHeaders: true, columnSorting: true }); var htCore = getHtCore(); expect(htCore.find('tbody tr:eq(0) td:eq(0)').text()).toEqual('1'); expect(htCore.find('tbody tr:eq(0) td:eq(1)').text()).toEqual('9'); expect(htCore.find('tbody tr:eq(0) td:eq(2)').text()).toEqual('3'); expect(htCore.find('tbody tr:eq(0) td:eq(3)').text()).toEqual('4'); this.sortByColumn(0); expect(htCore.find('tbody tr:eq(0) td:eq(0)').text()).toEqual('0'); expect(htCore.find('tbody tr:eq(0) td:eq(1)').text()).toEqual('3'); expect(htCore.find('tbody tr:eq(0) td:eq(2)').text()).toEqual('0'); expect(htCore.find('tbody tr:eq(0) td:eq(3)').text()).toEqual('5'); }); it('should apply stable sort function #3606', () => { var hot = handsontable({ data: [ ['mercedes1', 'Mercedes', 'A 160', '01/14/2007'], ['citroen1', 'Citroen', 'C4 Coupe', '12/01/2007'], ['opel1', 'Opel', 'Astra', '02/02/2006'], ['bmw1', 'BMW', '320i Coupe', '07/24/2009'], ['citroen2', 'Citroen', 'C4 Coupe', '12/01/2012'], ['opel2', 'Opel', 'Astra', '02/02/2004'], ['mercedes2', 'Mercedes', 'A 160', '01/14/2008'], ['citroen3', 'Citroen', 'C4 Coupe', '12/01/2007'], ['mercedes3', 'Mercedes', 'A 160', '01/14/2009'], ['opel3', 'Opel', 'Astra', '02/02/2006'], ['bmw2', 'BMW', '320i Coupe', '07/24/2013'], ['bmw3', 'BMW', '320i Coupe', '07/24/2012'], ], columns: [ {}, {}, { type: 'date', dateFormat: 'mm/dd/yy' }, { type: 'numeric' } ], columnSorting: true }); hot.sort(1, true); // ASC expect(hot.getDataAtCol(0)).toEqual([ 'bmw1', 'bmw2', 'bmw3', 'citroen1', 'citroen2', 'citroen3', 'mercedes1', 'mercedes2', 'mercedes3', 'opel1', 'opel2', 'opel3' ]); hot.sort(1, false); // DESC expect(hot.getDataAtCol(0)).toEqual([ 'opel1', 'opel2', 'opel3', 'mercedes1', 'mercedes2', 'mercedes3', 'citroen1', 'citroen2', 'citroen3', 'bmw1', 'bmw2', 'bmw3' ]); }); it('should not throw error when trying run handsontable with columnSorting and autoRowSize in the same time.', () => { var errors = 0; try { handsontable({ data: arrayOfObjects(), autoRowSize: true, columnSorting: true }); } catch (e) { errors++; } expect(errors).toBe(0); }); it('should sort numbers descending after 2 clicks on table header', function() { handsontable({ data: arrayOfObjects(), colHeaders: true, columnSorting: true }); this.sortByColumn(0); this.sortByColumn(0); expect(this.$container.find('tr td').first().html()).toEqual('10'); }); it('should remove specified row from sorted table and NOT sort the table again', function() { var hot = handsontable({ data: [ [1, 'B'], [3, 'D'], [2, 'A'], [0, 'C'] ], colHeaders: true, columnSorting: true }); this.sortByColumn(0); var htCore = getHtCore(); expect(htCore.find('tbody tr:eq(0) td:eq(0)').text()).toEqual('0'); expect(htCore.find('tbody tr:eq(1) td:eq(0)').text()).toEqual('1'); expect(htCore.find('tbody tr:eq(2) td:eq(0)').text()).toEqual('2'); expect(htCore.find('tbody tr:eq(3) td:eq(0)').text()).toEqual('3'); expect(htCore.find('tbody tr').length).toEqual(4); // Now if sort is launched, sorting ordered will be reversed hot.sortOrder = false; hot.alter('remove_row', 0); expect(htCore.find('tbody tr').length).toEqual(3); expect(htCore.find('tbody tr:eq(0) td:eq(0)').text()).toEqual('1'); expect(htCore.find('tbody tr:eq(1) td:eq(0)').text()).toEqual('2'); expect(htCore.find('tbody tr:eq(2) td:eq(0)').text()).toEqual('3'); }); it('should add an empty row to sorted table', function() { var hot = handsontable({ data: [ [1, 'B'], [0, 'A'], [3, 'D'], [2, 'C'] ], colHeaders: true, columnSorting: true }); this.sortByColumn(0); var htCore = getHtCore(); expect(htCore.find('tbody tr').length).toEqual(4); expect(htCore.find('tbody tr:eq(0) td:eq(0)').text()).toEqual('0'); expect(htCore.find('tbody tr:eq(1) td:eq(0)').text()).toEqual('1'); expect(htCore.find('tbody tr:eq(2) td:eq(0)').text()).toEqual('2'); expect(htCore.find('tbody tr:eq(3) td:eq(0)').text()).toEqual('3'); hot.alter('insert_row', 1, 2); expect(htCore.find('tbody tr').length).toEqual(6); expect(htCore.find('tbody tr:eq(0) td:eq(0)').text()).toEqual('0'); expect(htCore.find('tbody tr:eq(1) td:eq(0)').text()).toEqual(''); expect(htCore.find('tbody tr:eq(2) td:eq(0)').text()).toEqual(''); expect(htCore.find('tbody tr:eq(3) td:eq(0)').text()).toEqual('1'); expect(htCore.find('tbody tr:eq(4) td:eq(0)').text()).toEqual('2'); expect(htCore.find('tbody tr:eq(5) td:eq(0)').text()).toEqual('3'); }); it('should add an empty row to sorted table at a given index', function() { var hot = handsontable({ data: [ [1, 'B'], [0, 'A'], [3, 'D'], [2, 'C'] ], colHeaders: true, columnSorting: true }); var htCore = getHtCore(); this.sortByColumn(0); expect(htCore.find('tbody tr:eq(3) td:eq(0)').text()).toEqual('3'); expect(htCore.find('tbody tr:eq(4) td:eq(0)').text()).toEqual(''); hot.alter('insert_row', 2); expect(htCore.find('tbody tr:eq(0) td:eq(0)').text()).toEqual('0'); expect(htCore.find('tbody tr:eq(1) td:eq(0)').text()).toEqual('1'); expect(htCore.find('tbody tr:eq(2) td:eq(0)').text()).toEqual(''); expect(htCore.find('tbody tr:eq(2) td:eq(0)').text()).toEqual(''); expect(htCore.find('tbody tr:eq(2) td:eq(0)').text()).toEqual(''); expect(htCore.find('tbody tr:eq(3) td:eq(0)').text()).toEqual('2'); }); it('should NOT sort the table after value update in sorted column', function() { var hot = handsontable({ data: [ [1, 'B'], [0, 'A'], [3, 'D'], [2, 'C'] ], colHeaders: true, columnSorting: true }); var htCore = getHtCore(); expect(htCore.find('tbody tr:eq(0) td:eq(0)').text()).toEqual('1'); this.sortByColumn(0); this.sortByColumn(0); expect(htCore.find('tbody tr:eq(0) td:eq(0)').text()).toEqual('3'); expect(htCore.find('tbody tr:eq(1) td:eq(0)').text()).toEqual('2'); hot.setDataAtCell(1, 0, 20); render(); expect(htCore.find('tbody tr:eq(0) td:eq(0)').text()).toEqual('3'); expect(htCore.find('tbody tr:eq(1) td:eq(0)').text()).toEqual('20'); }); it('defaultSort comparing function shouldn\'t change order when comparing empty string, null and undefined', () => { var hot = handsontable({}); var defaultSort = hot.getPlugin('columnSorting').defaultSort; expect(defaultSort(false, {})(['key1', null], ['key2', null])).toEqual(0); expect(defaultSort(false, {})(['key1', ''], ['key2', ''])).toEqual(0); expect(defaultSort(false, {})(['key1', undefined], ['key2', undefined])).toEqual(0); expect(defaultSort(false, {})(['key1', ''], ['key2', null])).toEqual(0); expect(defaultSort(false, {})(['key1', null], ['key2', ''])).toEqual(0); expect(defaultSort(false, {})(['key1', ''], ['key2', undefined])).toEqual(0); expect(defaultSort(false, {})(['key1', undefined], ['key2', ''])).toEqual(0); expect(defaultSort(false, {})(['key1', null], ['key2', undefined])).toEqual(0); expect(defaultSort(false, {})(['key1', undefined], ['key2', null])).toEqual(0); }); it('should place empty strings, null and undefined values at proper position (stability of default comparing function)', () => { var hot = handsontable({ data: [ [null, 'Ted Right'], [undefined, 'Jane Neat'], [null, 'Meg Jansen'], ['', 'Sean Hara'], ['', 'Eve Branson'], [6, 'Frank Honest'], [7, 'Joan Well'], [8, 'Sid Strong'], [9, 'Chuck Jackson'], [10, 'Rob Norris'], [11, 'Eve Well'] ], columnSorting: true }); hot.sort(0, true); // ASC expect(hot.getDataAtCol(1)).toEqual([ 'Frank Honest', 'Joan Well', 'Sid Strong', 'Chuck Jackson', 'Rob Norris', 'Eve Well', // empty cells below 'Ted Right', 'Jane Neat', 'Meg Jansen', 'Sean Hara', 'Eve Branson', ]); hot.sort(0, false); // DESC expect(hot.getDataAtCol(1)).toEqual([ 'Eve Well', 'Rob Norris', 'Chuck Jackson', 'Sid Strong', 'Joan Well', 'Frank Honest', // empty cells below 'Ted Right', 'Jane Neat', 'Meg Jansen', 'Sean Hara', 'Eve Branson', ]); }); it('should place empty strings, null and undefined values at proper position when `sortEmptyCells` option is enabled ' + '(API call, data type: default)', () => { var hot = handsontable({ data: [ [6, 'Frank Honest'], [null, 'Ted Right'], [7, 'Joan Well'], [8, 'Sid Strong'], [undefined, 'Jane Neat'], [9, 'Chuck Jackson'], [null, 'Meg Jansen'], [10, 'Rob Norris'], ['', 'Sean Hara'], ['', 'Eve Branson'] ], columnSorting: { sortEmptyCells: true } }); hot.sort(0, true); // ASC expect(hot.getDataAtCol(1)).toEqual([ 'Ted Right', 'Jane Neat', 'Meg Jansen', 'Sean Hara', 'Eve Branson', // empty cells above 'Frank Honest', 'Joan Well', 'Sid Strong', 'Chuck Jackson', 'Rob Norris' ]); hot.sort(0, false); // DESC expect(hot.getDataAtCol(1)).toEqual([ 'Rob Norris', 'Chuck Jackson', 'Sid Strong', 'Joan Well', 'Frank Honest', // empty cells below 'Ted Right', 'Jane Neat', 'Meg Jansen', 'Sean Hara', 'Eve Branson', ]); }); it('should place empty strings, null and undefined values at proper position when `sortEmptyCells` ' + 'option is enabled and `column` property of `columnSorting` option is set (data type: default)', function() { var hot = handsontable({ data: [ [6, 'Frank Honest'], [null, 'Ted Right'], [7, 'Joan Well'], [8, 'Sid Strong'], [undefined, 'Jane Neat'], [9, 'Chuck Jackson'], [null, 'Meg Jansen'], [10, 'Rob Norris'], ['', 'Sean Hara'], ['', 'Eve Branson'] ], columnSorting: { sortEmptyCells: true, sortOrder: true, column: 0 } }); // ASC expect(hot.getDataAtCol(1)).toEqual([ 'Ted Right', 'Jane Neat', 'Meg Jansen', 'Sean Hara', 'Eve Branson', // empty cells above 'Frank Honest', 'Joan Well', 'Sid Strong', 'Chuck Jackson', 'Rob Norris' ]); if (this.$container) { destroy(); this.$container.remove(); } hot = handsontable({ data: [ [6, 'Frank Honest'], [null, 'Ted Right'], [7, 'Joan Well'], [8, 'Sid Strong'], [undefined, 'Jane Neat'], [9, 'Chuck Jackson'], [null, 'Meg Jansen'], [10, 'Rob Norris'], ['', 'Sean Hara'], ['', 'Eve Branson'] ], columnSorting: { sortEmptyCells: true, sortOrder: false, column: 0 } }); // DESC expect(hot.getDataAtCol(1)).toEqual([ 'Rob Norris', 'Chuck Jackson', 'Sid Strong', 'Joan Well', 'Frank Honest', // empty cells below 'Ted Right', 'Jane Neat', 'Meg Jansen', 'Sean Hara', 'Eve Branson', ]); }); it('should place empty strings, null and undefined values at proper position when `sortEmptyCells` ' + 'option is enabled and `column` property of `columnSorting` option is set (data type: numeric)', function() { var hot = handsontable({ data: [ [6, 'Frank Honest'], [null, 'Ted Right'], [7, 'Joan Well'], [8, 'Sid Strong'], [undefined, 'Jane Neat'], [9, 'Chuck Jackson'], [null, 'Meg Jansen'], [10, 'Rob Norris'], ['', 'Sean Hara'], ['', 'Eve Branson'] ], columns: [ { type: 'numeric' }, {} ], columnSorting: { sortEmptyCells: true, sortOrder: true, column: 0 } }); // ASC expect(hot.getDataAtCol(1)).toEqual([ 'Ted Right', 'Jane Neat', 'Meg Jansen', 'Sean Hara', 'Eve Branson', // empty cells above 'Frank Honest', 'Joan Well', 'Sid Strong', 'Chuck Jackson', 'Rob Norris' ]); if (this.$container) { destroy(); this.$container.remove(); } hot = handsontable({ data: [ [6, 'Frank Honest'], [null, 'Ted Right'], [7, 'Joan Well'], [8, 'Sid Strong'], [undefined, 'Jane Neat'], [9, 'Chuck Jackson'], [null, 'Meg Jansen'], [10, 'Rob Norris'], ['', 'Sean Hara'], ['', 'Eve Branson'] ], columnSorting: { sortEmptyCells: true, sortOrder: false, column: 0 } }); // DESC expect(hot.getDataAtCol(1)).toEqual([ 'Rob Norris', 'Chuck Jackson', 'Sid Strong', 'Joan Well', 'Frank Honest', // empty cells below 'Ted Right', 'Jane Neat', 'Meg Jansen', 'Sean Hara', 'Eve Branson', ]); }); describe('data type: date', () => { it('dateSort comparing function shouldn\'t change order when comparing empty string, null and undefined', () => { var hot = handsontable({}); var dateSort = hot.getPlugin('columnSorting').dateSort; expect(dateSort(false, {})(['key1', null], ['key2', null])).toEqual(0); expect(dateSort(false, {})(['key1', ''], ['key2', ''])).toEqual(0); expect(dateSort(false, {})(['key1', undefined], ['key2', undefined])).toEqual(0); expect(dateSort(false, {})(['key1', ''], ['key2', null])).toEqual(0); expect(dateSort(false, {})(['key1', null], ['key2', ''])).toEqual(0); expect(dateSort(false, {})(['key1', ''], ['key2', undefined])).toEqual(0); expect(dateSort(false, {})(['key1', undefined], ['key2', ''])).toEqual(0); expect(dateSort(false, {})(['key1', null], ['key2', undefined])).toEqual(0); expect(dateSort(false, {})(['key1', undefined], ['key2', null])).toEqual(0); }); it('should place empty strings, null and undefined values at proper position when `sortEmptyCells` ' + 'option is enabled and `column` property of `columnSorting` option is set', function() { var hot = handsontable({ data: [ ['Citroen1', 'C4 Coupe', null], ['Mercedes1', 'A 160', '12/01/2008'], ['Mercedes2', 'A 160', '01/14/2006'], ['Citroen2', 'C4 Coupe', undefined], ['Audi1', 'A4 Avant', '11/19/2011'], ['Opel1', 'Astra', '02/02/2004'], ['Citroen3', 'C4 Coupe', null], ['BMW1', '320i Coupe', '07/24/2011'], ['Citroen4', 'C4 Coupe', ''], ['Citroen5', 'C4 Coupe', ''], ], columns: [ {}, {}, { type: 'date', dateFormat: 'MM/DD/YYYY' } ], columnSorting: { sortEmptyCells: true, sortOrder: true, column: 2 } }); // ASC expect(hot.getDataAtCol(0)).toEqual([ 'Citroen1', 'Citroen2', 'Citroen3', 'Citroen4', 'Citroen5', // empty cells above 'Opel1', 'Mercedes2', 'Mercedes1', 'BMW1', 'Audi1' ]); if (this.$container) { destroy(); this.$container.remove(); } hot = handsontable({ data: [ ['Citroen1', 'C4 Coupe', null], ['Mercedes1', 'A 160', '12/01/2008'], ['Mercedes2', 'A 160', '01/14/2006'], ['Citroen2', 'C4 Coupe', undefined], ['Audi1', 'A4 Avant', '11/19/2011'], ['Opel1', 'Astra', '02/02/2004'], ['Citroen3', 'C4 Coupe', null], ['BMW1', '320i Coupe', '07/24/2011'], ['Citroen4', 'C4 Coupe', ''], ['Citroen5', 'C4 Coupe', ''], ], columns: [ {}, {}, { type: 'date', dateFormat: 'MM/DD/YYYY' } ], columnSorting: { sortEmptyCells: true, sortOrder: false, column: 2 } }); // DESC expect(hot.getDataAtCol(0)).toEqual([ 'Audi1', 'BMW1', 'Mercedes1', 'Mercedes2', 'Opel1', // empty cells below 'Citroen1', 'Citroen2', 'Citroen3', 'Citroen4', 'Citroen5' ]); }); it('should sort date columns (MM/DD/YYYY)', () => { var hot = handsontable({ data: [ ['Mercedes', 'A 160', '01/14/2006', 6999.9999], ['Citroen', 'C4 Coupe', '12/01/2008', 8330], ['Audi', 'A4 Avant', '11/19/2011', 33900], ['Opel', 'Astra', '02/02/2004', 7000], ['BMW', '320i Coupe', '07/24/2011', 30500] ], columns: [ {}, {}, { type: 'date', dateFormat: 'MM/DD/YYYY' }, { type: 'numeric' } ], colHeaders: true, columnSorting: true }); hot.sort(2, true); // ASC expect(hot.getDataAtRow(0)).toEqual(['Opel', 'Astra', '02/02/2004', 7000]); expect(hot.getDataAtRow(1)).toEqual(['Mercedes', 'A 160', '01/14/2006', 6999.9999]); expect(hot.getDataAtRow(2)).toEqual(['Citroen', 'C4 Coupe', '12/01/2008', 8330]); expect(hot.getDataAtRow(3)).toEqual(['BMW', '320i Coupe', '07/24/2011', 30500]); expect(hot.getDataAtRow(4)).toEqual(['Audi', 'A4 Avant', '11/19/2011', 33900]); hot.sort(2, false); // DESC expect(hot.getDataAtRow(0)).toEqual(['Audi', 'A4 Avant', '11/19/2011', 33900]); expect(hot.getDataAtRow(1)).toEqual(['BMW', '320i Coupe', '07/24/2011', 30500]); expect(hot.getDataAtRow(2)).toEqual(['Citroen', 'C4 Coupe', '12/01/2008', 8330]); expect(hot.getDataAtRow(3)).toEqual(['Mercedes', 'A 160', '01/14/2006', 6999.9999]); expect(hot.getDataAtRow(4)).toEqual(['Opel', 'Astra', '02/02/2004', 7000]); }); it('should sort date columns (DD/MM/YYYY)', () => { var hot = handsontable({ data: [ ['Mercedes', 'A 160', '01/12/2012', 6999.9999], ['Citroen', 'C4 Coupe', '12/01/2013', 8330], ['Audi', 'A4 Avant', '11/10/2014', 33900], ['Opel', 'Astra', '02/02/2015', 7000], ['BMW', '320i Coupe', '07/02/2013', 30500] ], columns: [ {}, {}, { type: 'date', dateFormat: 'DD/MM/YYYY' }, { type: 'numeric' } ], colHeaders: true, columnSorting: true }); hot.sort(2, true); // ASC expect(hot.getDataAtRow(0)).toEqual(['Mercedes', 'A 160', '01/12/2012', 6999.9999]); expect(hot.getDataAtRow(1)).toEqual(['Citroen', 'C4 Coupe', '12/01/2013', 8330]); expect(hot.getDataAtRow(2)).toEqual(['BMW', '320i Coupe', '07/02/2013', 30500]); expect(hot.getDataAtRow(3)).toEqual(['Audi', 'A4 Avant', '11/10/2014', 33900]); expect(hot.getDataAtRow(4)).toEqual(['Opel', 'Astra', '02/02/2015', 7000]); hot.sort(2, false); // DESC expect(hot.getDataAtRow(0)).toEqual(['Opel', 'Astra', '02/02/2015', 7000]); expect(hot.getDataAtRow(1)).toEqual(['Audi', 'A4 Avant', '11/10/2014', 33900]); expect(hot.getDataAtRow(2)).toEqual(['BMW', '320i Coupe', '07/02/2013', 30500]); expect(hot.getDataAtRow(3)).toEqual(['Citroen', 'C4 Coupe', '12/01/2013', 8330]); expect(hot.getDataAtRow(4)).toEqual(['Mercedes', 'A 160', '01/12/2012', 6999.9999]); }); it('should sort date columns (MMMM Do YYYY)', () => { var hot = handsontable({ data: [ ['Mercedes', 'A 160', 'October 28th 2016', 6999.9999], ['Citroen', 'C4 Coupe', 'October 27th 2001', 8330], ['Audi', 'A4 Avant', 'July 8th 1999', 33900], ['Opel', 'Astra', 'June 1st 2001', 7000], ['BMW', '320i Coupe', 'August 3rd 2001', 30500] ], columns: [ {}, {}, { type: 'date', dateFormat: 'MMMM Do YYYY' }, { type: 'numeric' } ], colHeaders: true, columnSorting: true }); hot.sort(2, true); // ASC expect(hot.getDataAtRow(0)).toEqual(['Audi', 'A4 Avant', 'July 8th 1999', 33900]); expect(hot.getDataAtRow(1)).toEqual(['Opel', 'Astra', 'June 1st 2001', 7000]); expect(hot.getDataAtRow(2)).toEqual(['BMW', '320i Coupe', 'August 3rd 2001', 30500]); expect(hot.getDataAtRow(3)).toEqual(['Citroen', 'C4 Coupe', 'October 27th 2001', 8330]); expect(hot.getDataAtRow(4)).toEqual(['Mercedes', 'A 160', 'October 28th 2016', 6999.9999]); hot.sort(2, false); // DESC expect(hot.getDataAtRow(0)).toEqual(['Mercedes', 'A 160', 'October 28th 2016', 6999.9999]); expect(hot.getDataAtRow(1)).toEqual(['Citroen', 'C4 Coupe', 'October 27th 2001', 8330]); expect(hot.getDataAtRow(2)).toEqual(['BMW', '320i Coupe', 'August 3rd 2001', 30500]); expect(hot.getDataAtRow(3)).toEqual(['Opel', 'Astra', 'June 1st 2001', 7000]); expect(hot.getDataAtRow(4)).toEqual(['Audi', 'A4 Avant', 'July 8th 1999', 33900]); }); it('should sort date columns along with empty and null values', () => { var hot = handsontable({ data: [ ['Mercedes', 'A 160', '01/14/2006', 6999.9999], ['Citroen', 'C4 Coupe', '12/01/2008', 8330], ['Citroen', 'C4 Coupe null', null, 8330], ['Citroen', 'C4 Coupe empty', '', 8330], ['Audi', 'A4 Avant', '11/19/2011', 33900], ['Opel', 'Astra', '02/02/2004', 7000], ['BMW', '320i Coupe', '07/24/2011', 30500] ], columns: [ {}, {}, { type: 'date', dateFormat: 'mm/dd/yy' }, { type: 'numeric' } ], colHeaders: true, columnSorting: true }); hot.sort(2, true); // ASC expect(hot.getDataAtRow(0)).toEqual(['Mercedes', 'A 160', '01/14/2006', 6999.9999]); expect(hot.getDataAtRow(1)).toEqual(['Opel', 'Astra', '02/02/2004', 7000]); expect(hot.getDataAtRow(2)).toEqual(['BMW', '320i Coupe', '07/24/2011', 30500]); expect(hot.getDataAtRow(3)).toEqual(['Audi', 'A4 Avant', '11/19/2011', 33900]); expect(hot.getDataAtRow(4)).toEqual(['Citroen', 'C4 Coupe', '12/01/2008', 8330]); hot.sort(2, false); // DESC expect(hot.getDataAtRow(0)).toEqual(['Citroen', 'C4 Coupe', '12/01/2008', 8330]); expect(hot.getDataAtRow(1)).toEqual(['Audi', 'A4 Avant', '11/19/2011', 33900]); expect(hot.getDataAtRow(2)).toEqual(['BMW', '320i Coupe', '07/24/2011', 30500]); expect(hot.getDataAtRow(3)).toEqual(['Opel', 'Astra', '02/02/2004', 7000]); expect(hot.getDataAtRow(4)).toEqual(['Mercedes', 'A 160', '01/14/2006', 6999.9999]); }); }); describe('data type: time', () => { it('should properly rewrite time into correct format after sort', (done) => { var hot = handsontable({ data: [ ['0:00:01 am'], ['5:30:14 pm'], ['8:00:00 pm'], ['11:15:05 am'], ['4:07:48 am'] ], columns: [ { type: 'time', dateFormat: 'h:mm:ss a', correctFormat: true } ], colHeaders: true, columnSorting: { column: 0, sortOrder: false } }); hot.setDataAtCell(0, 0, '19:55', 'edit'); setTimeout(() => { expect(hot.getDataAtCell(0, 0)).toEqual('7:55:00 pm'); done(); }, 250); }); }); it('should properly sort numeric data', function() { var hot = handsontable({ data: [ ['Mercedes', 'A 160', '01/14/2006', '6999.9999'], ['Citroen', 'C4 Coupe', '12/01/2008', 8330], ['Citroen', 'C4 Coupe null', null, '8330'], ['Citroen', 'C4 Coupe empty', '', 8333], ['Audi', 'A4 Avant', '11/19/2011', '33900'], ['Opel', 'Astra', '02/02/2004', '7000'], ['BMW', '320i Coupe', '07/24/2011', 30500] ], columns: [ {}, {}, {}, { type: 'numeric' } ], colHeaders: true, columnSorting: true }); var htCore = getHtCore(); this.sortByColumn(3); expect(hot.getDataAtCol(3)).toEqual(['6999.9999', '7000', 8330, '8330', 8333, 30500, '33900']); this.sortByColumn(3); expect(hot.getDataAtCol(3)).toEqual(['33900', 30500, 8333, 8330, '8330', '7000', '6999.9999']); this.sortByColumn(3); expect(hot.getDataAtCol(3)).toEqual(['6999.9999', 8330, '8330', 8333, '33900', '7000', 30500]); }); it('numericSort comparing function shouldn\'t change order when comparing empty string, null and undefined', () => { var hot = handsontable({}); var numericSort = hot.getPlugin('columnSorting').numericSort; expect(numericSort(false, {})(['key1', null], ['key2', null])).toEqual(0); expect(numericSort(false, {})(['key1', ''], ['key2', ''])).toEqual(0); expect(numericSort(false, {})(['key1', undefined], ['key2', undefined])).toEqual(0); expect(numericSort(false, {})(['key1', ''], ['key2', null])).toEqual(0); expect(numericSort(false, {})(['key1', null], ['key2', ''])).toEqual(0); expect(numericSort(false, {})(['key1', ''], ['key2', undefined])).toEqual(0); expect(numericSort(false, {})(['key1', undefined], ['key2', ''])).toEqual(0); expect(numericSort(false, {})(['key1', null], ['key2', undefined])).toEqual(0); expect(numericSort(false, {})(['key1', undefined], ['key2', null])).toEqual(0); }); it('should sort table with multiple row headers', function() { var hot = handsontable({ data: [ [1, 'B'], [0, 'D'], [3, 'A'], [2, 'C'] ], columns: [ {}, {}, { type: 'date', dateFormat: 'mm/dd/yy' }, { type: 'numeric' } ], colHeaders: true, columnSorting: true, removeRowPlugin: true // this plugin ads an extra row header, so now we have 2 instead of 1 }); expect(this.$container.find('tbody tr:eq(0) td:eq(0)').text()).toEqual('1'); this.sortByColumn(0); // sort by first column expect(this.$container.find('tbody tr:eq(0) td:eq(0)').text()).toEqual('0'); expect(this.$container.find('tbody tr:eq(0) td:eq(1)').text()).toEqual('D'); this.sortByColumn(1); // sort by second column expect(this.$container.find('tbody tr:eq(0) td:eq(1)').text()).toEqual('A'); }); it('should allow to define sorting column and order during initialization', function() { var hot = handsontable({ data: [ [1, 'B'], [0, 'D'], [3, 'A'], [2, 'C'] ], colHeaders: true, columnSorting: { column: 0, sortOrder: true } }); expect(this.$container.find('tbody tr:eq(0) td:eq(0)').text()).toEqual('0'); expect(this.$container.find('tbody tr:eq(0) td:eq(1)').text()).toEqual('D'); }); it('should allow to change sorting column with updateSettings', function() { var hot = handsontable({ data: [ [1, 'B'], [0, 'D'], [3, 'A'], [2, 'C'] ], colHeaders: true, columnSorting: { column: 0, sortOrder: true } }); expect(this.$container.find('tbody tr:eq(0) td:eq(0)').text()).toEqual('0'); expect(this.$container.find('tbody tr:eq(0) td:eq(1)').text()).toEqual('D'); updateSettings({ columnSorting: { column: 1, sortOrder: true } }); expect(this.$container.find('tbody tr:eq(0) td:eq(0)').text()).toEqual('3'); expect(this.$container.find('tbody tr:eq(0) td:eq(1)').text()).toEqual('A'); }); it('should allow to change sorting order with updateSettings', function() { var hot = handsontable({ data: [ [1, 'B'], [0, 'D'], [3, 'A'], [2, 'C'] ], colHeaders: true, columnSorting: { column: 0, sortOrder: true } }); expect(this.$container.find('tbody tr:eq(0) td:eq(0)').text()).toEqual('0'); updateSettings({ columnSorting: { column: 0, sortOrder: false } }); expect(this.$container.find('tbody tr:eq(0) td:eq(0)').text()).toEqual('3'); }); it('should allow to change if sorting empty cells with updateSettings', () => { var hot = handsontable({ data: [ [1, 'B'], [2, ''], [3, 'A'], [4, ''], [6, 'E'], [7, ''], [8, 'F'], ], colHeaders: true, columnSorting: { column: 1, sortOrder: false, sortEmptyCells: false } }); updateSettings({ columnSorting: { column: 1, sortOrder: true, sortEmptyCells: true } }); // ASC with empty cells sorting expect(hot.getDataAtCol(0)).toEqual([2, 4, 7, 3, 1, 6, 8]); updateSettings({ columnSorting: { column: 1, sortOrder: true, sortEmptyCells: false } }); // ASC without empty cells sorting expect(hot.getDataAtCol(0)).toEqual([3, 1, 6, 8, 2, 4, 7]); }); it('should NOT sort spare rows', () => { var myData = [ {a: 'aaa', b: 2, c: 3}, {a: 'z', b: 11, c: -4}, {a: 'dddd', b: 13, c: 13}, {a: 'bbbb', b: 10, c: 11} ]; function customIsEmptyRow(row) { var data = this.getSourceData(); return data[row].isNew; } handsontable({ data: myData, rowHeaders: true, colHeaders: ['A', 'B', 'C'], columns: [ {data: 'a', type: 'text'}, {data: 'b', type: 'text'}, {data: 'c', type: 'text'} ], dataSchema: {isNew: true, a: false}, // default for a to avoid #bad value# columnSorting: true, minSpareRows: 3, isEmptyRow: customIsEmptyRow }); // ASC updateSettings({ columnSorting: { column: 0, sortOrder: true } }); expect(getData()).toEqual([ ['aaa', 2, 3], ['bbbb', 10, 11], ['dddd', 13, 13], ['z', 11, -4], [false, null, null], [false, null, null], [false, null, null] ]); updateSettings({ columnSorting: { column: 0, sortOrder: false } }); expect(getData()).toEqual([ ['z', 11, -4], ['dddd', 13, 13], ['bbbb', 10, 11], ['aaa', 2, 3], [false, null, null], [false, null, null], [false, null, null] ]); }); it('should reset column sorting with updateSettings', function() { var hot = handsontable({ data: [ [1, 'B'], [0, 'D'], [3, 'A'], [2, 'C'] ], colHeaders: true, columnSorting: { column: 0, sortOrder: true } }); expect(this.$container.find('tbody tr:eq(0) td:eq(0)').text()).toEqual('0'); updateSettings({ columnSorting: void 0 }); expect(this.$container.find('tbody tr:eq(0) td:eq(0)').text()).toEqual('1'); }); it('should expose sort method when columnSorting is enabled', () => { var hot = handsontable(); expect(hot.getSettings().columnSorting).toBeFalsy(); expect(hot.sort).toBeUndefined(); updateSettings({ columnSorting: true }); expect(hot.getSettings().columnSorting).toBe(true); expect(hot.sort).toBeDefined(); expect(typeof hot.sort).toBe('function'); updateSettings({ columnSorting: false }); expect(hot.getSettings().columnSorting).toBeFalsy(); expect(hot.sort).toBeUndefined(); }); it('should sort table using HOT.sort method', function() { var hot = handsontable({ data: [ [1, 'B'], [0, 'D'], [3, 'A'], [2, 'C'] ], columnSorting: true }); expect(this.$container.find('tbody tr:eq(0) td:eq(0)').text()).toEqual('1'); expect(this.$container.find('tbody tr:eq(1) td:eq(0)').text()).toEqual('0'); expect(this.$container.find('tbody tr:eq(2) td:eq(0)').text()).toEqual('3'); expect(this.$container.find('tbody tr:eq(3) td:eq(0)').text()).toEqual('2'); hot.sort(0, true); expect(this.$container.find('tbody tr:eq(0) td:eq(0)').text()).toEqual('0'); expect(this.$container.find('tbody tr:eq(1) td:eq(0)').text()).toEqual('1'); expect(this.$container.find('tbody tr:eq(2) td:eq(0)').text()).toEqual('2'); expect(this.$container.find('tbody tr:eq(3) td:eq(0)').text()).toEqual('3'); }); it('should reset column sorting with updateSettings', function() { var hot = handsontable({ data: [ [1, 'B'], [0, 'D'], [3, 'A'], [2, 'C'] ], colHeaders: true, columnSorting: { column: 0, sortOrder: true } }); expect(this.$container.find('tbody tr:eq(0) td:eq(0)').text()).toEqual('0'); updateSettings({ columnSorting: void 0 }); expect(this.$container.find('tbody tr:eq(0) td:eq(0)').text()).toEqual('1'); }); it('should fire beforeColumnSort event before sorting data', function() { var hot = handsontable({ data: [ [2], [4], [1], [3] ], columnSorting: true }); this.beforeColumnSortHandler = function() { expect(this.$container.find('tbody tr:eq(0) td:eq(0)').text()).toEqual('2'); expect(this.$container.find('tbody tr:eq(0) td:eq(0)').text()).toEqual('4'); expect(this.$container.find('tbody tr:eq(0) td:eq(0)').text()).toEqual('1'); expect(this.$container.find('tbody tr:eq(0) td:eq(0)').text()).toEqual('3'); }; spyOn(this, 'beforeColumnSortHandler'); hot.addHook('beforeColumnSort', this.beforeColumnSortHandler); var sortColumn = 0; var sortOrder = true; hot.sort(sortColumn, sortOrder); expect(this.beforeColumnSortHandler.calls.count()).toEqual(1); expect(this.beforeColumnSortHandler).toHaveBeenCalledWith(sortColumn, sortOrder, void 0, void 0, void 0, void 0); }); it('should not sorting column when beforeColumnSort returns false', (done) => { var hot = handsontable({ data: [ [2], [4], [1], [3] ], columnSorting: true, beforeColumnSort() { return false; } }); hot.sort(0, true); setTimeout(() => { expect(spec().$container.find('tbody tr:eq(0) td:eq(0)').text()).toEqual('2'); expect(spec().$container.find('tbody tr:eq(1) td:eq(0)').text()).toEqual('4'); expect(spec().$container.find('tbody tr:eq(2) td:eq(0)').text()).toEqual('1'); expect(spec().$container.find('tbody tr:eq(3) td:eq(0)').text()).toEqual('3'); done(); }, 200); }); it('should add beforeColumnSort event listener in constructor', () => { var beforeColumnSortCallback = jasmine.createSpy('beforeColumnSortHandler'); var hot = handsontable({ data: [[2], [4], [1], [3]], columnSorting: true, beforeColumnSort: beforeColumnSortCallback }); var sortColumn = 0; var sortOrder = true; hot.sort(sortColumn, sortOrder); expect(beforeColumnSortCallback.calls.count()).toEqual(1); expect(beforeColumnSortCallback).toHaveBeenCalledWith(sortColumn, sortOrder, void 0, void 0, void 0, void 0); }); it('should fire afterColumnSort event before data has been sorted but before table render', () => { var hot = handsontable({ data: [ [2], [4], [1], [3] ], columnSorting: true }); var rendered = false; var afterColumnSortHandler = jasmine.createSpy('afterColumnSortHandler'); var afterRenderSpy = jasmine.createSpy('afterRender'); hot.addHook('afterColumnSort', function() { expect(rendered).toBe(false); afterColumnSortHandler.apply(afterColumnSortHandler, arguments); }); hot.addHook('afterRender', function() { rendered = true; afterRenderSpy.apply(afterRenderSpy, arguments); }); var sortColumn = 0; var sortOrder = true; afterRenderSpy.calls.reset(); hot.sort(sortColumn, sortOrder); expect(afterColumnSortHandler.calls.count()).toBe(1); expect(afterColumnSortHandler).toHaveBeenCalledWith(sortColumn, sortOrder, void 0, void 0, void 0, void 0); expect(afterRenderSpy.calls.count()).toBe(1); }); it('should add afterColumnSort event listener in constructor', () => { var afterColumnSortCallback = jasmine.createSpy('afterColumnSortHandler'); var hot = handsontable({ data: [[2], [4], [1], [3]], columnSorting: true, afterColumnSort: afterColumnSortCallback }); var sortColumn = 0; var sortOrder = true; hot.sort(sortColumn, sortOrder); expect(afterColumnSortCallback.calls.count()).toEqual(1); expect(afterColumnSortCallback).toHaveBeenCalledWith(sortColumn, sortOrder, void 0, void 0, void 0, void 0); }); it('should insert row when plugin is enabled, but table hasn\'t been sorted', () => { var hot = handsontable({ data: [ [1, 'B'], [0, 'D'], [3, 'A'], [2, 'C'] ], columnSorting: true }); expect(countRows()).toEqual(4); expect(hot.sortColumn).toBeUndefined(); alter('insert_row'); expect(countRows()).toEqual(5); }); it('should remove row when plugin is enabled, but table hasn\'t been sorted', () => { var hot = handsontable({ data: [ [1, 'B'], [0, 'D'], [3, 'A'], [2, 'C'] ], columnSorting: true }); expect(countRows()).toEqual(4); expect(hot.sortColumn).toBeUndefined(); alter('remove_row'); expect(countRows()).toEqual(3); }); it('should display new row added directly to dataSource, when observeChanges plugin is enabled', function(done) { var data = [ [1, 'B'], [0, 'A'], [3, 'D'], [2, 'C'] ]; var hot = handsontable({ data, colHeaders: true, columnSorting: true, observeChanges: true }); var htCore = getHtCore(); expect(htCore.find('tbody tr:eq(0) td:eq(0)').text()).toEqual('1'); expect(htCore.find('tbody tr:eq(1) td:eq(0)').text()).toEqual('0'); expect(htCore.find('tbody tr:eq(2) td:eq(0)').text()).toEqual('3'); expect(htCore.find('tbody tr:eq(3) td:eq(0)').text()).toEqual('2'); this.sortByColumn(0); expect(htCore.find('tbody tr:eq(0) td:eq(0)').text()).toEqual('0'); expect(htCore.find('tbody tr:eq(1) td:eq(0)').text()).toEqual('1'); expect(htCore.find('tbody tr:eq(2) td:eq(0)').text()).toEqual('2'); expect(htCore.find('tbody tr:eq(3) td:eq(0)').text()).toEqual('3'); expect(htCore.find('tbody tr').length).toEqual(4); var afterChangesObservedCallback = jasmine.createSpy('afterChangesObservedCallback'); hot.addHook('afterChangesObserved', afterChangesObservedCallback); data.push([5, 'E']); setTimeout(() => { expect(countRows()).toEqual(5); expect(spec().$container.find('tbody tr:eq(4) td:eq(0)').text()).toEqual('5'); expect(spec().$container.find('tbody tr:eq(4) td:eq(1)').text()).toEqual('E'); done(); }, 200); }); it('should not display new row added directly to dataSource, when observeChanges plugin is explicitly disabled', function(done) { var data = [ [1, 'B'], [0, 'A'], [3, 'D'], [2, 'C'] ]; var hot = handsontable({ data, colHeaders: true, columnSorting: true, observeChanges: false }); var afterChangesObservedCallback = jasmine.createSpy('afterChangesObservedCallback'); hot.addHook('afterChangesObserved', afterChangesObservedCallback); var htCore = getHtCore(); expect(htCore.find('tbody tr:eq(0) td:eq(0)').text()).toEqual('1'); expect(htCore.find('tbody tr:eq(1) td:eq(0)').text()).toEqual('0'); expect(htCore.find('tbody tr:eq(2) td:eq(0)').text()).toEqual('3'); expect(htCore.find('tbody tr:eq(3) td:eq(0)').text()).toEqual('2'); this.sortByColumn(0); expect(htCore.find('tbody tr:eq(0) td:eq(0)').text()).toEqual('0'); expect(htCore.find('tbody tr:eq(1) td:eq(0)').text()).toEqual('1'); expect(htCore.find('tbody tr:eq(2) td:eq(0)').text()).toEqual('2'); expect(htCore.find('tbody tr:eq(3) td:eq(0)').text()).toEqual('3'); expect(htCore.find('tbody tr').length).toEqual(4); data.push([5, 'E']); setTimeout(() => { expect(htCore.find('tbody tr:eq(0) td:eq(0)').text()).toEqual('0'); expect(htCore.find('tbody tr:eq(1) td:eq(0)').text()).toEqual('1'); expect(htCore.find('tbody tr:eq(2) td:eq(0)').text()).toEqual('2'); expect(htCore.find('tbody tr:eq(3) td:eq(0)').text()).toEqual('3'); expect(htCore.find('tbody tr').length).toEqual(4); expect(afterChangesObservedCallback).not.toHaveBeenCalled(); done(); }, 100); }); it('should display new row added directly to dataSource, when observeChanges plugin status is undefined', (done) => { var data = [ [1, 'B'], [0, 'A'], [3, 'D'], [2, 'C'] ]; var onUpdateSettings = jasmine.createSpy('onUpdateSettings'); var hot = handsontable({ data, colHeaders: true, columnSorting: true, afterUpdateSettings: onUpdateSettings }); var afterChangesObservedCallback = jasmine.createSpy('afterChangesObservedCallback'); hot.addHook('afterChangesObserved', afterChangesObservedCallback); var htCore = getHtCore(); // columnSorting enables observeChanges plugin by asynchronously invoking updateSettings setTimeout(() => { expect(htCore.find('tbody tr:eq(0) td:eq(0)').text()).toEqual('1'); expect(htCore.find('tbody tr:eq(1) td:eq(0)').text()).toEqual('0'); expect(htCore.find('tbody tr:eq(2) td:eq(0)').text()).toEqual('3'); expect(htCore.find('tbody tr:eq(3) td:eq(0)').text()).toEqual('2'); spec().sortByColumn(0); expect(htCore.find('tbody tr:eq(0) td:eq(0)').text()).toEqual('0'); expect(htCore.find('tbody tr:eq(1) td:eq(0)').text()).toEqual('1'); expect(htCore.find('tbody tr:eq(2) td:eq(0)').text()).toEqual('2'); expect(htCore.find('tbody tr:eq(3) td:eq(0)').text()).toEqual('3'); expect(htCore.find('tbody tr').length).toEqual(4); data.push([5, 'E']); }, 100); setTimeout(() => { expect(countRows()).toEqual(5); expect(htCore.find('tbody tr:eq(4) td:eq(0)').text()).toEqual('5'); expect(htCore.find('tbody tr:eq(4) td:eq(1)').text()).toEqual('E'); done(); }, 2000); // 2s delayed needs for safari env }); it('should apply sorting when there are two tables and only one has sorting enabled and has been already sorted (#1020)', function() { var hot = handsontable({ data: [ [1, 'B'], [0, 'D'], [3, 'A'], [2, 'C'] ], columnSorting: { column: 1 } }); this.$container2 = $(`<div id="${id}-2"></div>`).appendTo('body'); this.$container2.handsontable(); var hot2 = this.$container2.handsontable('getInstance'); selectCell(0, 1); keyDown('enter'); expect($('.handsontableInput').val()).toEqual('A'); this.$container2.handsontable('destroy'); this.$container2.remove(); }); it('should reset sorting after loading new data', function() { var hot = handsontable({ data: [ [1, 'B'], [0, 'D'], [3, 'A'], [2, 'C'] ], columnSorting: true }); expect(this.$container.find('tbody tr:eq(0) td:eq(0)').text()).toEqual('1'); expect(this.$container.find('tbody tr:eq(1) td:eq(0)').text()).toEqual('0'); expect(this.$container.find('tbody tr:eq(2) td:eq(0)').text()).toEqual('3'); expect(this.$container.find('tbody tr:eq(3) td:eq(0)').text()).toEqual('2'); hot.sort(0, true); expect(this.$container.find('tbody tr:eq(0) td:eq(0)').text()).toEqual('0'); expect(this.$container.find('tbody tr:eq(1) td:eq(0)').text()).toEqual('1'); expect(this.$container.find('tbody tr:eq(2) td:eq(0)').text()).toEqual('2'); expect(this.$container.find('tbody tr:eq(3) td:eq(0)').text()).toEqual('3'); loadData([ [50, 'E'], [10, 'G'], [30, 'F'], [60, 'I'], [40, 'J'], [20, 'H'] ]); expect(this.$container.find('tbody tr:eq(0) td:eq(0)').text()).toEqual('50'); expect(this.$container.find('tbody tr:eq(1) td:eq(0)').text()).toEqual('10'); expect(this.$container.find('tbody tr:eq(2) td:eq(0)').text()).toEqual('30'); expect(this.$container.find('tbody tr:eq(3) td:eq(0)').text()).toEqual('60'); expect(this.$container.find('tbody tr:eq(4) td:eq(0)').text()).toEqual('40'); expect(this.$container.find('tbody tr:eq(5) td:eq(0)').text()).toEqual('20'); }); it('should reset sorting after loading new data (default sorting column and order set)', function() { var hot = handsontable({ data: [ [1, 'B'], [0, 'D'], [3, 'A'], [2, 'C'] ], columnSorting: { column: 1, sortOrder: true } }); expect(this.$container.find('tbody tr:eq(0) td:eq(0)').text()).toEqual('3'); expect(this.$container.find('tbody tr:eq(1) td:eq(0)').text()).toEqual('1'); expect(this.$container.find('tbody tr:eq(2) td:eq(0)').text()).toEqual('2'); expect(this.$container.find('tbody tr:eq(3) td:eq(0)').text()).toEqual('0'); expect(this.$container.find('tbody tr:eq(0) td:eq(1)').text()).toEqual('A'); expect(this.$container.find('tbody tr:eq(1) td:eq(1)').text()).toEqual('B'); expect(this.$container.find('tbody tr:eq(2) td:eq(1)').text()).toEqual('C'); expect(this.$container.find('tbody tr:eq(3) td:eq(1)').text()).toEqual('D'); hot.sort(0, true); expect(this.$container.find('tbody tr:eq(0) td:eq(0)').text()).toEqual('0'); expect(this.$container.find('tbody tr:eq(1) td:eq(0)').text()).toEqual('1'); expect(this.$container.find('tbody tr:eq(2) td:eq(0)').text()).toEqual('2'); expect(this.$container.find('tbody tr:eq(3) td:eq(0)').text()).toEqual('3'); loadData([ [50, 'E'], [10, 'G'], [30, 'F'], [60, 'I'], [40, 'J'], [20, 'H'] ]); expect(this.$container.find('tbody tr:eq(0) td:eq(0)').text()).toEqual('50'); expect(this.$container.find('tbody tr:eq(1) td:eq(0)').text()).toEqual('30'); expect(this.$container.find('tbody tr:eq(2) td:eq(0)').text()).toEqual('10'); expect(this.$container.find('tbody tr:eq(3) td:eq(0)').text()).toEqual('20'); expect(this.$container.find('tbody tr:eq(4) td:eq(0)').text()).toEqual('60'); expect(this.$container.find('tbody tr:eq(5) td:eq(0)').text()).toEqual('40'); expect(this.$container.find('tbody tr:eq(0) td:eq(1)').text()).toEqual('E'); expect(this.$container.find('tbody tr:eq(1) td:eq(1)').text()).toEqual('F'); expect(this.$container.find('tbody tr:eq(2) td:eq(1)').text()).toEqual('G'); expect(this.$container.find('tbody tr:eq(3) td:eq(1)').text()).toEqual('H'); expect(this.$container.find('tbody tr:eq(4) td:eq(1)').text()).toEqual('I'); expect(this.$container.find('tbody tr:eq(5) td:eq(1)').text()).toEqual('J'); }); it('should return updated data at specyfied row after sorted', function() { var hot = handsontable({ data: [ [1, 'Ted', 'Right'], [2, 'Frank', 'Honest'], [3, 'Joan', 'Well'], [4, 'Sid', 'Strong'], [5, 'Jane', 'Neat'] ], colHeaders: true, rowHeaders: true, columnSorting: true }); this.sortByColumn(0); expect(getDataAtRow(0)).toEqual([1, 'Ted', 'Right']); expect(getDataAtRow(4)).toEqual([5, 'Jane', 'Neat']); this.sortByColumn(0); expect(getDataAtRow(0)).toEqual([5, 'Jane', 'Neat']); expect(getDataAtRow(4)).toEqual([1, 'Ted', 'Right']); this.sortByColumn(0); expect(getDataAtRow(0)).toEqual([1, 'Ted', 'Right']); expect(getDataAtRow(4)).toEqual([5, 'Jane', 'Neat']); }); it('should return updated data at specyfied col after sorted', function() { var hot = handsontable({ data: [ [1, 'Ted', 'Right'], [2, 'Frank', 'Honest'], [3, 'Joan', 'Well'], [4, 'Sid', 'Strong'], [5, 'Jane', 'Neat'] ], colHeaders: true, rowHeaders: true, columnSorting: true }); this.sortByColumn(0); expect(getDataAtCol(0)).toEqual([1, 2, 3, 4, 5]); expect(getDataAtCol(1)).toEqual(['Ted', 'Frank', 'Joan', 'Sid', 'Jane']); this.sortByColumn(0); expect(getDataAtCol(0)).toEqual([5, 4, 3, 2, 1]); expect(getDataAtCol(1)).toEqual(['Jane', 'Sid', 'Joan', 'Frank', 'Ted']); this.sortByColumn(0); expect(getDataAtCol(0)).toEqual([1, 2, 3, 4, 5]); expect(getDataAtCol(1)).toEqual(['Ted', 'Frank', 'Joan', 'Sid', 'Jane']); }); it('should return original data source at specified row after sorted', function() { var hot = handsontable({ data: [ [1, 'Ted', 'Right'], [2, 'Frank', 'Honest'], [3, 'Joan', 'Well'], [4, 'Sid', 'Strong'], [5, 'Jane', 'Neat'] ], colHeaders: true, rowHeaders: true, columnSorting: true }); this.sortByColumn(0); expect(getDataAtRow(0)).toEqual([1, 'Ted', 'Right']); expect(getDataAtRow(4)).toEqual([5, 'Jane', 'Neat']); expect(getSourceDataAtRow(0)).toEqual([1, 'Ted', 'Right']); expect(getSourceDataAtRow(4)).toEqual([5, 'Jane', 'Neat']); this.sortByColumn(0); expect(getDataAtRow(0)).toEqual([5, 'Jane', 'Neat']); expect(getDataAtRow(4)).toEqual([1, 'Ted', 'Right']); expect(getSourceDataAtRow(0)).toEqual([1, 'Ted', 'Right']); expect(getSourceDataAtRow(4)).toEqual([5, 'Jane', 'Neat']); }); it('should return original data source at specified col after sorted', function() { var hot = handsontable({ data: [ [1, 'Ted', 'Right'], [2, 'Frank', 'Honest'], [3, 'Joan', 'Well'], [4, 'Sid', 'Strong'], [5, 'Jane', 'Neat'] ], colHeaders: true, rowHeaders: true, columnSorting: true }); this.sortByColumn(0); expect(getDataAtCol(0)).toEqual([1, 2, 3, 4, 5]); expect(getDataAtCol(1)).toEqual(['Ted', 'Frank', 'Joan', 'Sid', 'Jane']); expect(getSourceDataAtCol(0)).toEqual([1, 2, 3, 4, 5]); expect(getSourceDataAtCol(1)).toEqual(['Ted', 'Frank', 'Joan', 'Sid', 'Jane']); this.sortByColumn(0); expect(getDataAtCol(0)).toEqual([5, 4, 3, 2, 1]); expect(getDataAtCol(1)).toEqual(['Jane', 'Sid', 'Joan', 'Frank', 'Ted']); expect(getSourceDataAtCol(0)).toEqual([1, 2, 3, 4, 5]); expect(getSourceDataAtCol(1)).toEqual(['Ted', 'Frank', 'Joan', 'Sid', 'Jane']); this.sortByColumn(0); expect(getDataAtCol(0)).toEqual([1, 2, 3, 4, 5]); expect(getDataAtCol(1)).toEqual(['Ted', 'Frank', 'Joan', 'Sid', 'Jane']); expect(getSourceDataAtCol(0)).toEqual([1, 2, 3, 4, 5]); expect(getSourceDataAtCol(1)).toEqual(['Ted', 'Frank', 'Joan', 'Sid', 'Jane']); }); it('should ignore case when sorting', function() { var hot = handsontable({ data: [ [1, 'albuquerque'], [2, 'Alabama'], [3, 'Missouri'] ], colHeaders: true, columnSorting: true }); this.sortByColumn(1); expect(getDataAtCol(0)).toEqual([2, 1, 3]); expect(getDataAtCol(1)).toEqual(['Alabama', 'albuquerque', 'Missouri']); this.sortByColumn(1); expect(getDataAtCol(0)).toEqual([3, 1, 2]); expect(getDataAtCol(1)).toEqual(['Missouri', 'albuquerque', 'Alabama']); }); it('should push empty cells to the end of sorted column', function() { var hot = handsontable({ data: [ [1, 'Ted', 'Right'], [2, '', 'Honest'], [3, '', 'Well'], [4, 'Sid', 'Strong'], [5, 'Jane', 'Neat'], ], colHeaders: true, rowHeaders: true, columnSorting: true, minSpareRows: 1 }); this.sortByColumn(1); expect(getDataAtCol(0)).toEqual([5, 4, 1, 2, 3, null]); expect(getDataAtCol(1)).toEqual(['Jane', 'Sid', 'Ted', '', '', null]); this.sortByColumn(1); expect(getDataAtCol(0)).toEqual([1, 4, 5, 2, 3, null]); expect(getDataAtCol(1)).toEqual(['Ted', 'Sid', 'Jane', '', '', null]); }); it('should push numeric values before non-numeric values, when sorting ascending using the default sorting function', function() { var hot = handsontable({ data: [ [1, 'Ted', 123], [2, '', 'Some'], [3, '', 321], [4, 'Sid', 'String'], [5, 'Jane', 46] ], colHeaders: true, columnSorting: true }); this.sortByColumn(2); expect(getDataAtCol(2)).toEqual([46, 123, 321, 'Some', 'String']); this.sortByColumn(2); expect(getDataAtCol(2)).toEqual(['String', 'Some', 321, 123, 46]); }); it('should add a sorting indicator to the column header after it\'s been sorted, only if sortIndicator property is set to true', function() { var hot = handsontable({ data: [ [1, 'Ted', 'Right'], [2, '', 'Honest'], [3, '', 'Well'], [4, 'Sid', 'Strong'], [5, 'Jane', 'Neat'], ], colHeaders: true, columnSorting: true }); this.sortByColumn(1); var sortedColumn = this.$container.find('th span.columnSorting')[1], afterValue = window.getComputedStyle(sortedColumn, ':after').getPropertyValue('content'); expect(afterValue === '' || afterValue === 'none').toBe(true); // --------------------------------- // INDICATOR SET FOR THE WHOLE TABLE // --------------------------------- hot.updateSettings({ sortIndicator: true }); this.sortByColumn(1); // descending (updateSettings doesn't reset sorting stack) sortedColumn = this.$container.find('th span.columnSorting')[1]; afterValue = window.getComputedStyle(sortedColumn, ':after').getPropertyValue('content'); expect(afterValue.indexOf(String.fromCharCode(9660))).toBeGreaterThan(-1); this.sortByColumn(1); sortedColumn = this.$container.find('th span.columnSorting')[1]; afterValue = window.getComputedStyle(sortedColumn, ':after').getPropertyValue('content'); expect(afterValue === '' || afterValue === 'none').toBe(true); this.sortByColumn(1); // ascending sortedColumn = this.$container.find('th span.columnSorting')[1]; afterValue = window.getComputedStyle(sortedColumn, ':after').getPropertyValue('content'); expect(afterValue.indexOf(String.fromCharCode(9650))).toBeGreaterThan(-1); // --------------------------------- // INDICATOR SET FOR A SINGLE COLUMN // --------------------------------- hot.updateSettings({ sortIndicator: void 0, columns: [ {}, {}, {sortIndicator: true} ] }); this.sortByColumn(0); sortedColumn = this.$container.find('th span.columnSorting')[0]; afterValue = window.getComputedStyle(sortedColumn, ':after').getPropertyValue('content'); expect(afterValue === '' || afterValue === 'none').toBe(true); this.sortByColumn(1); // descending sortedColumn = this.$container.find('th span.columnSorting')[1]; afterValue = window.getComputedStyle(sortedColumn, ':after').getPropertyValue('content'); expect(afterValue === '' || afterValue === 'none').toBe(true); this.sortByColumn(2); sortedColumn = this.$container.find('th span.columnSorting')[2]; afterValue = window.getComputedStyle(sortedColumn, ':after').getPropertyValue('content'); expect(afterValue.indexOf(String.fromCharCode(9650))).toBeGreaterThan(-1); }); it('should change sorting indicator state on every `hot.sort()` method call (continuously for the same column)', function() { var hot = handsontable({ data: [ [1, 'Ted', 'Right'], [2, '', 'Honest'], [3, '', 'Well'], [4, 'Sid', 'Strong'], [5, 'Jane', 'Neat'], ], colHeaders: true, columnSorting: true, sortIndicator: true, }); hot.sort(1); // ascending var sortedColumn = this.$container.find('th span.columnSorting')[1]; var afterValue = window.getComputedStyle(sortedColumn, ':after').getPropertyValue('content'); expect(afterValue.indexOf(String.fromCharCode(9650))).toBeGreaterThan(-1); hot.sort(1); // descending sortedColumn = this.$container.find('th span.columnSorting')[1]; afterValue = window.getComputedStyle(sortedColumn, ':after').getPropertyValue('content'); expect(afterValue.indexOf(String.fromCharCode(9660))).toBeGreaterThan(-1); hot.sort(1); sortedColumn = this.$container.find('th span.columnSorting')[1]; afterValue = window.getComputedStyle(sortedColumn, ':after').getPropertyValue('content'); expect(afterValue === '' || afterValue === 'none').toBe(true); hot.sort(1); // ascending sortedColumn = this.$container.find('th span.columnSorting')[1]; afterValue = window.getComputedStyle(sortedColumn, ':after').getPropertyValue('content'); expect(afterValue.indexOf(String.fromCharCode(9650))).toBeGreaterThan(-1); hot.sort(1); // descending sortedColumn = this.$container.find('th span.columnSorting')[1]; afterValue = window.getComputedStyle(sortedColumn, ':after').getPropertyValue('content'); expect(afterValue.indexOf(String.fromCharCode(9660))).toBeGreaterThan(-1); }); it('should change sorting indicator state on every `hot.sort()` method (calling for different columns)', function() { var hot = handsontable({ data: [ [1, 'Ted', 'Right'], [2, '', 'Honest'], [3, '', 'Well'], [4, 'Sid', 'Strong'], [5, 'Jane', 'Neat'], ], colHeaders: true, columnSorting: true, sortIndicator: true, }); hot.sort(1); // ascending var sortedColumn = this.$container.find('th span.columnSorting')[1]; var afterValue = window.getComputedStyle(sortedColumn, ':after').getPropertyValue('content'); expect(afterValue.indexOf(String.fromCharCode(9650))).toBeGreaterThan(-1); hot.sort(2); // ascending sortedColumn = this.$container.find('th span.columnSorting')[2]; afterValue = window.getComputedStyle(sortedColumn, ':after').getPropertyValue('content'); expect(afterValue.indexOf(String.fromCharCode(9650))).toBeGreaterThan(-1); hot.sort(1); // ascending sortedColumn = this.$container.find('th span.columnSorting')[1]; afterValue = window.getComputedStyle(sortedColumn, ':after').getPropertyValue('content'); expect(afterValue.indexOf(String.fromCharCode(9650))).toBeGreaterThan(-1); hot.sort(2, false); // descending sortedColumn = this.$container.find('th span.columnSorting')[2]; afterValue = window.getComputedStyle(sortedColumn, ':after').getPropertyValue('content'); expect(afterValue.indexOf(String.fromCharCode(9660))).toBeGreaterThan(-1); hot.sort(2, false); // descending sortedColumn = this.$container.find('th span.columnSorting')[2]; afterValue = window.getComputedStyle(sortedColumn, ':after').getPropertyValue('content'); expect(afterValue.indexOf(String.fromCharCode(9660))).toBeGreaterThan(-1); hot.sort(2, true); // ascending sortedColumn = this.$container.find('th span.columnSorting')[2]; afterValue = window.getComputedStyle(sortedColumn, ':after').getPropertyValue('content'); expect(afterValue.indexOf(String.fromCharCode(9650))).toBeGreaterThan(-1); }); it('should change sorting indicator state when initial column sorting was provided', function() { var hot = handsontable({ data: [ [1, 'Ted', 'Right'], [2, '', 'Honest'], [3, '', 'Well'], [4, 'Sid', 'Strong'], [5, 'Jane', 'Neat'], ], colHeaders: true, columnSorting: { column: 1, sortOrder: false }, sortIndicator: true, }); // descending var sortedColumn = this.$container.find('th span.columnSorting')[1]; var afterValue = window.getComputedStyle(sortedColumn, ':after').getPropertyValue('content'); expect(afterValue.indexOf(String.fromCharCode(9660))).toBeGreaterThan(-1); hot.sort(1); // default sortedColumn = this.$container.find('th span.columnSorting')[1]; afterValue = window.getComputedStyle(sortedColumn, ':after').getPropertyValue('content'); expect(afterValue === '' || afterValue === 'none').toBe(true); hot.sort(1); // ascending sortedColumn = this.$container.find('th span.columnSorting')[1]; afterValue = window.getComputedStyle(sortedColumn, ':after').getPropertyValue('content'); expect(afterValue.indexOf(String.fromCharCode(9650))).toBeGreaterThan(-1); hot.sort(1); // descending sortedColumn = this.$container.find('th span.columnSorting')[1]; afterValue = window.getComputedStyle(sortedColumn, ':after').getPropertyValue('content'); expect(afterValue.indexOf(String.fromCharCode(9660))).toBeGreaterThan(-1); hot.sort(1); // default sortedColumn = this.$container.find('th span.columnSorting')[1]; afterValue = window.getComputedStyle(sortedColumn, ':after').getPropertyValue('content'); expect(afterValue === '' || afterValue === 'none').toBe(true); }); it('should properly sort the table, when it\'s scrolled to the far right', () => { var data = [ ['Jasmine Ferguson', 'Britney Carey', 'Kelly Decker', 'Lacey Mcleod', 'Leona Shaffer', 'Kelli Ochoa', 'Adele Roberson', 'Viola Snow', 'Barron Cherry', 'Calhoun Lane', 'Elvia Andrews', 'Katheryn Dale', 'Dorthy Hale', 'Munoz Randall', 'Fields Morse', 'Hubbard Nichols', 'Chang Yang', 'Osborn Anthony', 'Owens Warner', 'Gloria Hampton'], ['Lane Hill', 'Belinda Mathews', 'York Gray', 'Celina Stone', 'Victoria Mays', 'Angelina Lott', 'Joyce Mason', 'Shawn Rodriguez', 'Susanna Mayo', 'Wolf Fuller', 'Long Hester', 'Dudley Doyle', 'Wilder Sutton', 'Oneal Avery', 'James Mclaughlin', 'Lenora Guzman', 'Mcmahon Sullivan', 'Abby Weeks', 'Beverly Joseph', 'Rosalind Church'], ['Myrtle Landry', 'Hays Huff', 'Hernandez Benjamin', 'Mclaughlin Garza', 'Franklin Barton', 'Lara Buchanan', 'Ratliff Beck', 'Rosario Munoz', 'Isabelle Dalton', 'Smith Woodard', 'Marjorie Marshall', 'Spears Stein', 'Brianna Bowman', 'Marci Clay', 'Palmer Harrell', 'Ball Levy', 'Shelley Mendoza', 'Morrow Glass', 'Baker Knox', 'Adrian Holman'], ['Trisha Howell', 'Brooke Harrison', 'Anthony Watkins', 'Ellis Cobb', 'Sheppard Dillon', 'Mathis Bray', 'Foreman Burns', 'Lina Glenn', 'Giles Pollard', 'Weiss Ballard', 'Lynnette Smith', 'Flores Kline', 'Graciela Singleton', 'Santiago Mcclure', 'Claudette Battle', 'Nita Holloway', 'Eula Wolfe', 'Pruitt Stokes', 'Felicia Briggs', 'Melba Bradshaw'] ]; var hot = handsontable({ data, colHeaders: true, columnSorting: true }); hot.view.wt.wtOverlays.leftOverlay.scrollTo(15); hot.render(); hot.sort(15); expect(getDataAtCell(0, 15)).toEqual('Ball Levy'); expect(getDataAtCell(1, 15)).toEqual('Hubbard Nichols'); expect(getDataAtCell(2, 15)).toEqual('Lenora Guzman'); expect(getDataAtCell(3, 15)).toEqual('Nita Holloway'); hot.sort(15); expect(getDataAtCell(3, 15)).toEqual('Ball Levy'); expect(getDataAtCell(2, 15)).toEqual('Hubbard Nichols'); expect(getDataAtCell(1, 15)).toEqual('Lenora Guzman'); expect(getDataAtCell(0, 15)).toEqual('Nita Holloway'); hot.sort(15); expect(getDataAtCell(0, 15)).toEqual('Hubbard Nichols'); expect(getDataAtCell(1, 15)).toEqual('Lenora Guzman'); expect(getDataAtCell(2, 15)).toEqual('Ball Levy'); expect(getDataAtCell(3, 15)).toEqual('Nita Holloway'); }); it('should allow specifiyng a custom sorting function', () => { var data = [['1 inch'], ['1 yard'], ['2 feet'], ['0.2 miles']]; var hot = handsontable({ data, colHeaders: true, columnSorting: true, columns: [ { sortFunction(sortOrder) { return function(a, b) { var unitsRatios = { inch: 1, yard: 36, feet: 12, miles: 63360 }; var newA = a[1], newB = b[1]; Handsontable.helper.objectEach(unitsRatios, (val, prop) => { if (a[1].indexOf(prop) > -1) { newA = parseFloat(a[1].replace(prop, '')) * val; return false; } }); Handsontable.helper.objectEach(unitsRatios, (val, prop) => { if (b[1].indexOf(prop) > -1) { newB = parseFloat(b[1].replace(prop, '')) * val; return false; } }); if (newA < newB) { return sortOrder ? -1 : 1; } if (newA > newB) { return sortOrder ? 1 : -1; } return 0; }; } } ] }); expect(getDataAtCell(0, 0)).toEqual('1 inch'); expect(getDataAtCell(1, 0)).toEqual('1 yard'); expect(getDataAtCell(2, 0)).toEqual('2 feet'); expect(getDataAtCell(3, 0)).toEqual('0.2 miles'); hot.sort(0); expect(getDataAtCell(0, 0)).toEqual('1 inch'); expect(getDataAtCell(1, 0)).toEqual('2 feet'); expect(getDataAtCell(2, 0)).toEqual('1 yard'); expect(getDataAtCell(3, 0)).toEqual('0.2 miles'); hot.sort(0); expect(getDataAtCell(0, 0)).toEqual('0.2 miles'); expect(getDataAtCell(1, 0)).toEqual('1 yard'); expect(getDataAtCell(2, 0)).toEqual('2 feet'); expect(getDataAtCell(3, 0)).toEqual('1 inch'); hot.sort(0); expect(getDataAtCell(0, 0)).toEqual('1 inch'); expect(getDataAtCell(1, 0)).toEqual('1 yard'); expect(getDataAtCell(2, 0)).toEqual('2 feet'); expect(getDataAtCell(3, 0)).toEqual('0.2 miles'); }); it('should properly sort integers with nulls', function() { var hot = handsontable({ data: [ ['12'], [null], ['10'], ['-5'], [null], ['1000'] ], colHeaders: true, columnSorting: true }); this.sortByColumn(0); expect(getDataAtCol(0)).toEqual(['-5', '10', '12', '1000', null, null]); this.sortByColumn(0); expect(getDataAtCol(0)).toEqual(['1000', '12', '10', '-5', null, null]); }); it('should properly sort floating points', function() { var hot = handsontable({ data: [ ['0.0561'], ['-10.67'], ['-4.1'], ['-0.01'], ['-127'], ['1000'] ], colHeaders: true, columnSorting: true }); this.sortByColumn(0); expect(getDataAtCol(0)).toEqual(['-127', '-10.67', '-4.1', '-0.01', '0.0561', '1000']); this.sortByColumn(0); expect(getDataAtCol(0)).toEqual(['1000', '0.0561', '-0.01', '-4.1', '-10.67', '-127']); }); it('should properly sort floating points with nulls', function() { var hot = handsontable({ data: [ ['0.0561'], ['-10.67'], [null], ['-4.1'], ['-0.01'], [null], ['-127'], ['1000'], [null] ], colHeaders: true, columnSorting: true }); this.sortByColumn(0); expect(getDataAtCol(0)).toEqual(['-127', '-10.67', '-4.1', '-0.01', '0.0561', '1000', null, null, null]); this.sortByColumn(0); expect(getDataAtCol(0)).toEqual(['1000', '0.0561', '-0.01', '-4.1', '-10.67', '-127', null, null, null]); }); it('should properly sort floating points with non-numerical values', function() { var hot = handsontable({ data: [ ['0.0561'], ['-10.67'], ['a'], ['-4.1'], ['-0.01'], ['b'], ['-127'], ['1000'], ['hello'] ], colHeaders: true, columnSorting: true }); this.sortByColumn(0); expect(getDataAtCol(0)).toEqual(['-127', '-10.67', '-4.1', '-0.01', '0.0561', '1000', 'a', 'b', 'hello']); this.sortByColumn(0); expect(getDataAtCol(0)).toEqual(['hello', 'b', 'a', '1000', '0.0561', '-0.01', '-4.1', '-10.67', '-127']); }); it('should modify row translating process when soring is applied (visual to physical and vice versa)', function() { var hot = handsontable({ data: [ [2], [4], [1], [3] ], colHeaders: true, columnSorting: true }); this.sortByColumn(0); expect(hot.toPhysicalRow(0)).toBe(2); expect(hot.toPhysicalRow(1)).toBe(0); expect(hot.toPhysicalRow(2)).toBe(3); expect(hot.toPhysicalRow(3)).toBe(1); expect(hot.toVisualRow(0)).toBe(1); expect(hot.toVisualRow(1)).toBe(3); expect(hot.toVisualRow(2)).toBe(0); expect(hot.toVisualRow(3)).toBe(2); }); describe('should return sorted properly data when maxRows or / and minSpareRow options are set', () => { var testSorting = function(desc, config, result) { it(desc, () => { handsontable({ data: Handsontable.helper.createSpreadsheetData(config.rows, config.columns), maxRows: config.maxRow, minSpareRows: config.minSpareRows, columnSorting: { column: config.sortByColumnIndex, sortOrder: config.sortOrder } }); expect(getData().length).toEqual(result.dataLength); for (var i = 0; i < result.expectations.length; i += 1) { expect(getDataAtCell(result.expectations[i].rowIndex, result.expectations[i].columnIndex)).toEqual(result.expectations[i].value); } }); }; testSorting( 'maxRows < data.length', {rows: 9, columns: 9, maxRow: 6, sortByColumnIndex: 1, sortOrder: false}, {dataLength: 6, expectations: [{rowIndex: 0, columnIndex: 2, value: 'C6'}]} ); testSorting( 'maxRows > data.length', {rows: 8, columns: 8, maxRow: 20, sortByColumnIndex: 1, sortOrder: false}, {dataLength: 8, expectations: [{rowIndex: 0, columnIndex: 2, value: 'C8'}]} ); testSorting( 'minSpareRows is set; maxRows < data.length', {rows: 9, columns: 9, maxRow: 5, minSpareRows: 3, sortByColumnIndex: 1, sortOrder: false}, {dataLength: 5, expectations: [{rowIndex: 0, columnIndex: 2, value: 'C5'}]} ); testSorting( 'minSpareRows is set; maxRows === data.length', {rows: 6, columns: 6, maxRow: 9, minSpareRows: 3, sortByColumnIndex: 1, sortOrder: false}, {dataLength: 6 + 3, expectations: [{rowIndex: 0, columnIndex: 2, value: 'C6'}]} ); testSorting( 'minSpareRows is set; maxRows > data.length', {rows: 9, columns: 9, maxRow: 15, minSpareRows: 2, sortByColumnIndex: 1, sortOrder: false}, {dataLength: 9 + 2, expectations: [{rowIndex: 0, columnIndex: 2, value: 'C9'}]} ); }); });
1
14,775
Maybe `sortByColumnHeader` would be more precise? The plugin has a method with the same name and it could be confusing.
handsontable-handsontable
js
@@ -36,7 +36,8 @@ export function diffChildren( oldDom, isHydrating ) { - let i, j, oldVNode, childVNode, newDom, firstChildDom, refs; + let i, j, oldVNode, childVNode, newDom, firstChildDom; + let refs = []; // This is a compression of oldParentVNode!=null && oldParentVNode != EMPTY_OBJ && oldParentVNode._children || EMPTY_ARR // as EMPTY_OBJ._children should be `undefined`.
1
import { diff, unmount, applyRef } from './index'; import { createVNode, Fragment } from '../create-element'; import { EMPTY_OBJ, EMPTY_ARR } from '../constants'; import { removeNode } from '../util'; import { getDomSibling } from '../component'; /** * Diff the children of a virtual node * @param {import('../internal').PreactElement} parentDom The DOM element whose * children are being diffed * @param {import('../internal').ComponentChildren[]} renderResult * @param {import('../internal').VNode} newParentVNode The new virtual * node whose children should be diff'ed against oldParentVNode * @param {import('../internal').VNode} oldParentVNode The old virtual * node whose children should be diff'ed against newParentVNode * @param {object} globalContext The current context object - modified by getChildContext * @param {boolean} isSvg Whether or not this DOM node is an SVG node * @param {Array<import('../internal').PreactElement>} excessDomChildren * @param {Array<import('../internal').Component>} commitQueue List of components * which have callbacks to invoke in commitRoot * @param {import('../internal').PreactElement} oldDom The current attached DOM * element any new dom elements should be placed around. Likely `null` on first * render (except when hydrating). Can be a sibling DOM element when diffing * Fragments that have siblings. In most cases, it starts out as `oldChildren[0]._dom`. * @param {boolean} isHydrating Whether or not we are in hydration */ export function diffChildren( parentDom, renderResult, newParentVNode, oldParentVNode, globalContext, isSvg, excessDomChildren, commitQueue, oldDom, isHydrating ) { let i, j, oldVNode, childVNode, newDom, firstChildDom, refs; // This is a compression of oldParentVNode!=null && oldParentVNode != EMPTY_OBJ && oldParentVNode._children || EMPTY_ARR // as EMPTY_OBJ._children should be `undefined`. let oldChildren = (oldParentVNode && oldParentVNode._children) || EMPTY_ARR; let oldChildrenLength = oldChildren.length; // Only in very specific places should this logic be invoked (top level `render` and `diffElementNodes`). // I'm using `EMPTY_OBJ` to signal when `diffChildren` is invoked in these situations. I can't use `null` // for this purpose, because `null` is a valid value for `oldDom` which can mean to skip to this logic // (e.g. if mounting a new tree in which the old DOM should be ignored (usually for Fragments). if (oldDom == EMPTY_OBJ) { if (excessDomChildren != null) { oldDom = excessDomChildren[0]; } else if (oldChildrenLength) { oldDom = getDomSibling(oldParentVNode, 0); } else { oldDom = null; } } newParentVNode._children = []; for (i = 0; i < renderResult.length; i++) { childVNode = renderResult[i]; if (childVNode == null || typeof childVNode == 'boolean') { childVNode = newParentVNode._children[i] = null; } // If this newVNode is being reused (e.g. <div>{reuse}{reuse}</div>) in the same diff, // or we are rendering a component (e.g. setState) copy the oldVNodes so it can have // it's own DOM & etc. pointers else if (typeof childVNode == 'string' || typeof childVNode == 'number') { childVNode = newParentVNode._children[i] = createVNode( null, childVNode, null, null, childVNode ); } else if (Array.isArray(childVNode)) { childVNode = newParentVNode._children[i] = createVNode( Fragment, { children: childVNode }, null, null, null ); } else if (childVNode._depth > 0) { // VNode is already in use, clone it. This can happen in the following // scenario: // const reuse = <div /> // <div>{reuse}<span />{reuse}</div> childVNode = newParentVNode._children[i] = createVNode( childVNode.type, childVNode.props, childVNode.key, null, childVNode._original ); } else { childVNode = newParentVNode._children[i] = childVNode; } // Terser removes the `continue` here and wraps the loop body // in a `if (childVNode) { ... } condition if (childVNode == null) { continue; } childVNode._parent = newParentVNode; childVNode._depth = newParentVNode._depth + 1; // Check if we find a corresponding element in oldChildren. // If found, delete the array item by setting to `undefined`. // We use `undefined`, as `null` is reserved for empty placeholders // (holes). oldVNode = oldChildren[i]; if ( oldVNode === null || (oldVNode && childVNode.key == oldVNode.key && childVNode.type === oldVNode.type) ) { oldChildren[i] = undefined; } else { // Either oldVNode === undefined or oldChildrenLength > 0, // so after this loop oldVNode == null or oldVNode is a valid value. for (j = 0; j < oldChildrenLength; j++) { oldVNode = oldChildren[j]; // If childVNode is unkeyed, we only match similarly unkeyed nodes, otherwise we match by key. // We always match by type (in either case). if ( oldVNode && childVNode.key == oldVNode.key && childVNode.type === oldVNode.type ) { oldChildren[j] = undefined; break; } oldVNode = null; } } oldVNode = oldVNode || EMPTY_OBJ; // Morph the old element into the new one, but don't append it to the dom yet diff( parentDom, childVNode, oldVNode, globalContext, isSvg, excessDomChildren, commitQueue, oldDom, isHydrating ); newDom = childVNode._dom; if ((j = childVNode.ref) && oldVNode.ref != j) { if (!refs) refs = []; if (oldVNode.ref) refs.push(oldVNode.ref, null, childVNode); refs.push(j, childVNode._component || newDom, childVNode); } if (newDom != null) { if (firstChildDom == null) { firstChildDom = newDom; } if ( typeof childVNode.type == 'function' && childVNode._children === oldVNode._children ) { childVNode._nextDom = oldDom = reorderChildren( childVNode, oldDom, parentDom ); } else { oldDom = placeChild( parentDom, childVNode, oldVNode, oldChildren, excessDomChildren, newDom, oldDom ); } // Browsers will infer an option's `value` from `textContent` when // no value is present. This essentially bypasses our code to set it // later in `diff()`. It works fine in all browsers except for IE11 // where it breaks setting `select.value`. There it will be always set // to an empty string. Re-applying an options value will fix that, so // there are probably some internal data structures that aren't // updated properly. // // To fix it we make sure to reset the inferred value, so that our own // value check in `diff()` won't be skipped. if (!isHydrating && newParentVNode.type === 'option') { // @ts-ignore We have validated that the type of parentDOM is 'option' // in the above check parentDom.value = ''; } else if (typeof newParentVNode.type == 'function') { // Because the newParentVNode is Fragment-like, we need to set it's // _nextDom property to the nextSibling of its last child DOM node. // // `oldDom` contains the correct value here because if the last child // is a Fragment-like, then oldDom has already been set to that child's _nextDom. // If the last child is a DOM VNode, then oldDom will be set to that DOM // node's nextSibling. newParentVNode._nextDom = oldDom; } } else if ( oldDom && oldVNode._dom == oldDom && oldDom.parentNode != parentDom ) { // The above condition is to handle null placeholders. See test in placeholder.test.js: // `efficiently replace null placeholders in parent rerenders` oldDom = getDomSibling(oldVNode); } } newParentVNode._dom = firstChildDom; // Remove children that are not part of any vnode. if (excessDomChildren != null && typeof newParentVNode.type != 'function') { for (i = excessDomChildren.length; i--; ) { if (excessDomChildren[i] != null) removeNode(excessDomChildren[i]); } } // Remove remaining oldChildren if there are any. for (i = oldChildrenLength; i--; ) { if (oldChildren[i] != null) { if ( typeof newParentVNode.type == 'function' && oldChildren[i]._dom != null && oldChildren[i]._dom == newParentVNode._nextDom ) { // If the newParentVNode.__nextDom points to a dom node that is about to // be unmounted, then get the next sibling of that vnode and set // _nextDom to it newParentVNode._nextDom = getDomSibling(oldParentVNode, i + 1); } unmount(oldChildren[i], oldChildren[i]); } } // Set refs only after unmount if (refs) { for (i = 0; i < refs.length; i++) { applyRef(refs[i], refs[++i], refs[++i]); } } } function reorderChildren(childVNode, oldDom, parentDom) { for (let tmp = 0; tmp < childVNode._children.length; tmp++) { let vnode = childVNode._children[tmp]; if (vnode) { // We typically enter this code path on sCU bailout, where we copy // oldVNode._children to newVNode._children. If that is the case, we need // to update the old children's _parent pointer to point to the newVNode // (childVNode here). vnode._parent = childVNode; if (typeof vnode.type == 'function') { oldDom = reorderChildren(vnode, oldDom, parentDom); } else { oldDom = placeChild( parentDom, vnode, vnode, childVNode._children, null, vnode._dom, oldDom ); } } } return oldDom; } /** * Flatten and loop through the children of a virtual node * @param {import('../index').ComponentChildren} children The unflattened * children of a virtual node * @returns {import('../internal').VNode[]} */ export function toChildArray(children, out) { out = out || []; if (children == null || typeof children == 'boolean') { } else if (Array.isArray(children)) { children.some(child => { toChildArray(child, out); }); } else { out.push(children); } return out; } function placeChild( parentDom, childVNode, oldVNode, oldChildren, excessDomChildren, newDom, oldDom ) { let nextDom; if (childVNode._nextDom !== undefined) { // Only Fragments or components that return Fragment like VNodes will // have a non-undefined _nextDom. Continue the diff from the sibling // of last DOM child of this child VNode nextDom = childVNode._nextDom; // Eagerly cleanup _nextDom. We don't need to persist the value because // it is only used by `diffChildren` to determine where to resume the diff after // diffing Components and Fragments. Once we store it the nextDOM local var, we // can clean up the property childVNode._nextDom = undefined; } else if ( excessDomChildren == oldVNode || newDom != oldDom || newDom.parentNode == null ) { // NOTE: excessDomChildren==oldVNode above: // This is a compression of excessDomChildren==null && oldVNode==null! // The values only have the same type when `null`. outer: if (oldDom == null || oldDom.parentNode !== parentDom) { parentDom.appendChild(newDom); nextDom = null; } else { // `j<oldChildrenLength; j+=2` is an alternative to `j++<oldChildrenLength/2` for ( let sibDom = oldDom, j = 0; (sibDom = sibDom.nextSibling) && j < oldChildren.length; j += 2 ) { if (sibDom == newDom) { break outer; } } parentDom.insertBefore(newDom, oldDom); nextDom = oldDom; } } // If we have pre-calculated the nextDOM node, use it. Else calculate it now // Strictly check for `undefined` here cuz `null` is a valid value of `nextDom`. // See more detail in create-element.js:createVNode if (nextDom !== undefined) { oldDom = nextDom; } else { oldDom = newDom.nextSibling; } return oldDom; }
1
16,465
@developit I vaguely remember you mentioning something about some JS engines having an escape analysis feature that makes objects (and arrays?) locally declared that never leave the function essentially free from a GC perspective or something. Do you think we could benefit from that here? Or should we leave this as it is?
preactjs-preact
js
@@ -1252,6 +1252,8 @@ func invertOpForLocalNotifications(oldOp op) (newOp op, err error) { } case *GCOp: newOp = op + case *resolutionOp: + newOp = op } // Now reverse all the block updates. Don't bother with bare Refs
1
// Copyright 2016 Keybase Inc. All rights reserved. // Use of this source code is governed by a BSD // license that can be found in the LICENSE file. package libkbfs import ( "context" "errors" "fmt" "reflect" "strings" "time" "github.com/keybase/go-codec/codec" "github.com/keybase/kbfs/kbfscodec" ) // op represents a single file-system remote-sync operation type op interface { AddRefBlock(ptr BlockPointer) DelRefBlock(ptr BlockPointer) AddUnrefBlock(ptr BlockPointer) DelUnrefBlock(ptr BlockPointer) AddUpdate(oldPtr BlockPointer, newPtr BlockPointer) SizeExceptUpdates() uint64 allUpdates() []blockUpdate Refs() []BlockPointer Unrefs() []BlockPointer String() string StringWithRefs(numRefIndents int) string setWriterInfo(writerInfo) getWriterInfo() writerInfo setFinalPath(p path) getFinalPath() path setLocalTimestamp(t time.Time) getLocalTimestamp() time.Time checkValid() error // checkConflict compares the function's target op with the given // op, and returns a resolution if one is needed (or nil // otherwise). The resulting action (if any) assumes that this // method's target op is the unmerged op, and the given op is the // merged op. checkConflict(ctx context.Context, renamer ConflictRenamer, mergedOp op, isFile bool) ( crAction, error) // getDefaultAction should be called on an unmerged op only after // all conflicts with the corresponding change have been checked, // and it returns the action to take against the merged branch // given that there are no conflicts. getDefaultAction(mergedPath path) crAction } // op codes const ( createOpCode kbfscodec.ExtCode = iota + kbfscodec.ExtCodeOpsRangeStart rmOpCode renameOpCode syncOpCode setAttrOpCode resolutionOpCode rekeyOpCode gcOpCode // for deleting old blocks during an MD history truncation ) // blockUpdate represents a block that was updated to have a new // BlockPointer. // // NOTE: Don't add or modify anything in this struct without // considering how old clients will handle them. type blockUpdate struct { Unref BlockPointer `codec:"u,omitempty"` Ref BlockPointer `codec:"r,omitempty"` } func makeBlockUpdate(unref, ref BlockPointer) (blockUpdate, error) { bu := blockUpdate{} err := bu.setUnref(unref) if err != nil { return blockUpdate{}, err } err = bu.setRef(ref) if err != nil { return blockUpdate{}, err } return bu, nil } func (u blockUpdate) checkValid() error { if u.Unref == (BlockPointer{}) { return errors.New("nil unref") } if u.Ref == (BlockPointer{}) { return errors.New("nil ref") } return nil } func (u *blockUpdate) setUnref(ptr BlockPointer) error { if ptr == (BlockPointer{}) { return fmt.Errorf("setUnref called with nil ptr") } u.Unref = ptr return nil } func (u *blockUpdate) setRef(ptr BlockPointer) error { if ptr == (BlockPointer{}) { return fmt.Errorf("setRef called with nil ptr") } u.Ref = ptr return nil } // list codes const ( opsListCode kbfscodec.ExtCode = iota + kbfscodec.ExtCodeListRangeStart ) type opsList []op // OpCommon are data structures needed by all ops. It is only // exported for serialization purposes. type OpCommon struct { RefBlocks []BlockPointer `codec:"r,omitempty"` UnrefBlocks []BlockPointer `codec:"u,omitempty"` Updates []blockUpdate `codec:"o,omitempty"` codec.UnknownFieldSetHandler // writerInfo is the keybase username and device that generated this // operation. // Not exported; only used during conflict resolution. writerInfo writerInfo // finalPath is the final resolved path to the node that this // operation affects in a set of MD updates. Not exported; only // used locally. finalPath path // localTimestamp should be set to the localTimestamp of the // corresponding ImmutableRootMetadata when ops need individual // timestamps. Not exported; only used locally. localTimestamp time.Time } // AddRefBlock adds this block to the list of newly-referenced blocks // for this op. func (oc *OpCommon) AddRefBlock(ptr BlockPointer) { oc.RefBlocks = append(oc.RefBlocks, ptr) } // DelRefBlock removes the first reference of the given block from the // list of newly-referenced blocks for this op. func (oc *OpCommon) DelRefBlock(ptr BlockPointer) { for i, ref := range oc.RefBlocks { if ptr == ref { oc.RefBlocks = append(oc.RefBlocks[:i], oc.RefBlocks[i+1:]...) break } } } // AddUnrefBlock adds this block to the list of newly-unreferenced blocks // for this op. func (oc *OpCommon) AddUnrefBlock(ptr BlockPointer) { oc.UnrefBlocks = append(oc.UnrefBlocks, ptr) } // DelUnrefBlock removes the first unreference of the given block from // the list of unreferenced blocks for this op. func (oc *OpCommon) DelUnrefBlock(ptr BlockPointer) { for i, unref := range oc.UnrefBlocks { if ptr == unref { oc.UnrefBlocks = append(oc.UnrefBlocks[:i], oc.UnrefBlocks[i+1:]...) break } } } // AddUpdate adds a mapping from an old block to the new version of // that block, for this op. func (oc *OpCommon) AddUpdate(oldPtr BlockPointer, newPtr BlockPointer) { // Either pointer may be zero, if we're building an op that // will be fixed up later. bu := blockUpdate{oldPtr, newPtr} oc.Updates = append(oc.Updates, bu) } // Refs returns a slice containing all the blocks that were initially // referenced during this op. func (oc *OpCommon) Refs() []BlockPointer { return oc.RefBlocks } // Unrefs returns a slice containing all the blocks that were // unreferenced during this op. func (oc *OpCommon) Unrefs() []BlockPointer { return oc.UnrefBlocks } func (oc *OpCommon) setWriterInfo(info writerInfo) { oc.writerInfo = info } func (oc *OpCommon) getWriterInfo() writerInfo { return oc.writerInfo } func (oc *OpCommon) setFinalPath(p path) { oc.finalPath = p } func (oc *OpCommon) getFinalPath() path { return oc.finalPath } func (oc *OpCommon) setLocalTimestamp(t time.Time) { oc.localTimestamp = t } func (oc *OpCommon) getLocalTimestamp() time.Time { return oc.localTimestamp } func (oc *OpCommon) checkUpdatesValid() error { for i, update := range oc.Updates { err := update.checkValid() if err != nil { return fmt.Errorf( "update[%d]=%v got error: %v", i, update, err) } } return nil } func (oc *OpCommon) stringWithRefs(numRefIndents int) string { indent := strings.Repeat("\t", numRefIndents) res := "" for i, update := range oc.Updates { res += indent + fmt.Sprintf( "Update[%d]: %v -> %v\n", i, update.Unref, update.Ref) } for i, ref := range oc.RefBlocks { res += indent + fmt.Sprintf("Ref[%d]: %v\n", i, ref) } for i, unref := range oc.UnrefBlocks { res += indent + fmt.Sprintf("Unref[%d]: %v\n", i, unref) } return res } // createOp is an op representing a file or subdirectory creation type createOp struct { OpCommon NewName string `codec:"n"` Dir blockUpdate `codec:"d"` Type EntryType `codec:"t"` // If true, this create op represents half of a rename operation. // This op should never be persisted. renamed bool // If true, during conflict resolution the blocks of the file will // be copied. forceCopy bool // If this is set, ths create op needs to be turned has been // turned into a symlink creation locally to avoid a cycle during // conflict resolution, and the following field represents the // text of the symlink. This op should never be persisted. crSymPath string } func newCreateOp(name string, oldDir BlockPointer, t EntryType) (*createOp, error) { co := &createOp{ NewName: name, } err := co.Dir.setUnref(oldDir) if err != nil { return nil, err } co.Type = t return co, nil } func newCreateOpForRootDir() *createOp { return &createOp{ Type: Dir, } } func (co *createOp) AddUpdate(oldPtr BlockPointer, newPtr BlockPointer) { if co.Dir == (blockUpdate{}) { panic("AddUpdate called on create op with empty Dir " + "(probably create op for root dir)") } if oldPtr == co.Dir.Unref { err := co.Dir.setRef(newPtr) if err != nil { panic(err) } return } co.OpCommon.AddUpdate(oldPtr, newPtr) } func (co *createOp) SizeExceptUpdates() uint64 { return uint64(len(co.NewName)) } func (co *createOp) allUpdates() []blockUpdate { updates := make([]blockUpdate, len(co.Updates)) copy(updates, co.Updates) return append(updates, co.Dir) } func (co *createOp) checkValid() error { if co.NewName == "" { // Must be for root dir. return nil } err := co.Dir.checkValid() if err != nil { return fmt.Errorf("createOp.Dir=%v got error: %v", co.Dir, err) } return co.checkUpdatesValid() } func (co *createOp) String() string { res := fmt.Sprintf("create %s (%s)", co.NewName, co.Type) if co.renamed { res += " (renamed)" } return res } func (co *createOp) StringWithRefs(numRefIndents int) string { res := co.String() + "\n" indent := strings.Repeat("\t", numRefIndents) res += indent + fmt.Sprintf("Dir: %v -> %v\n", co.Dir.Unref, co.Dir.Ref) res += co.stringWithRefs(numRefIndents) return res } func (co *createOp) checkConflict( ctx context.Context, renamer ConflictRenamer, mergedOp op, isFile bool) (crAction, error) { switch realMergedOp := mergedOp.(type) { case *createOp: // Conflicts if this creates the same name and one of them // isn't creating a directory. sameName := (realMergedOp.NewName == co.NewName) if sameName && (realMergedOp.Type != Dir || co.Type != Dir) { if realMergedOp.Type != Dir && (co.Type == Dir || co.crSymPath != "") { // Rename the merged entry only if the unmerged one is // a directory (or to-be-sympath'd directory) and the // merged one is not. toName, err := renamer.ConflictRename( ctx, mergedOp, co.NewName) if err != nil { return nil, err } return &renameMergedAction{ fromName: co.NewName, toName: toName, symPath: co.crSymPath, }, nil } // Otherwise rename the unmerged entry (guaranteed to be a file). toName, err := renamer.ConflictRename( ctx, co, co.NewName) if err != nil { return nil, err } return &renameUnmergedAction{ fromName: co.NewName, toName: toName, symPath: co.crSymPath, }, nil } // If they are both directories, and one of them is a rename, // then we have a conflict and need to rename the renamed one. // // TODO: Implement a better merging strategy for when an // existing directory gets into a rename conflict with another // existing or new directory. if sameName && realMergedOp.Type == Dir && co.Type == Dir && (realMergedOp.renamed || co.renamed) { // Always rename the unmerged one toName, err := renamer.ConflictRename( ctx, co, co.NewName) if err != nil { return nil, err } return &copyUnmergedEntryAction{ fromName: co.NewName, toName: toName, symPath: co.crSymPath, unique: true, }, nil } } // Doesn't conflict with any rmOps, because the default action // will just re-create it in the merged branch as necessary. return nil, nil } func (co *createOp) getDefaultAction(mergedPath path) crAction { if co.forceCopy { return &renameUnmergedAction{ fromName: co.NewName, toName: co.NewName, symPath: co.crSymPath, } } return &copyUnmergedEntryAction{ fromName: co.NewName, toName: co.NewName, symPath: co.crSymPath, } } // rmOp is an op representing a file or subdirectory removal type rmOp struct { OpCommon OldName string `codec:"n"` Dir blockUpdate `codec:"d"` // Indicates that the resolution process should skip this rm op. // Likely indicates the rm half of a cycle-creating rename. dropThis bool } func newRmOp(name string, oldDir BlockPointer) (*rmOp, error) { ro := &rmOp{ OldName: name, } err := ro.Dir.setUnref(oldDir) if err != nil { return nil, err } return ro, nil } func (ro *rmOp) AddUpdate(oldPtr BlockPointer, newPtr BlockPointer) { if oldPtr == ro.Dir.Unref { err := ro.Dir.setRef(newPtr) if err != nil { panic(err) } return } ro.OpCommon.AddUpdate(oldPtr, newPtr) } func (ro *rmOp) SizeExceptUpdates() uint64 { return uint64(len(ro.OldName)) } func (ro *rmOp) allUpdates() []blockUpdate { updates := make([]blockUpdate, len(ro.Updates)) copy(updates, ro.Updates) return append(updates, ro.Dir) } func (ro *rmOp) checkValid() error { err := ro.Dir.checkValid() if err != nil { return fmt.Errorf("rmOp.Dir=%v got error: %v", ro.Dir, err) } return ro.checkUpdatesValid() } func (ro *rmOp) String() string { return fmt.Sprintf("rm %s", ro.OldName) } func (ro *rmOp) StringWithRefs(numRefIndents int) string { res := ro.String() + "\n" indent := strings.Repeat("\t", numRefIndents) res += indent + fmt.Sprintf("Dir: %v -> %v\n", ro.Dir.Unref, ro.Dir.Ref) res += ro.stringWithRefs(numRefIndents) return res } func (ro *rmOp) checkConflict( ctx context.Context, renamer ConflictRenamer, mergedOp op, isFile bool) (crAction, error) { switch realMergedOp := mergedOp.(type) { case *createOp: if realMergedOp.NewName == ro.OldName { // Conflicts if this creates the same name. This can only // happen if the merged branch deleted the old node and // re-created it, in which case it is totally fine to drop // this rm op for the original node. return &dropUnmergedAction{op: ro}, nil } case *rmOp: if realMergedOp.OldName == ro.OldName { // Both removed the same file. return &dropUnmergedAction{op: ro}, nil } } return nil, nil } func (ro *rmOp) getDefaultAction(mergedPath path) crAction { if ro.dropThis { return &dropUnmergedAction{op: ro} } return &rmMergedEntryAction{name: ro.OldName} } // renameOp is an op representing a rename of a file/subdirectory from // one directory to another. If this is a rename within the same // directory, NewDir will be equivalent to blockUpdate{}. renameOp // records the moved pointer, even though it doesn't change as part of // the operation, to make it possible to track the full path of // directories for the purposes of conflict resolution. type renameOp struct { OpCommon OldName string `codec:"on"` OldDir blockUpdate `codec:"od"` NewName string `codec:"nn"` NewDir blockUpdate `codec:"nd"` Renamed BlockPointer `codec:"re"` RenamedType EntryType `codec:"rt"` } func newRenameOp(oldName string, oldOldDir BlockPointer, newName string, oldNewDir BlockPointer, renamed BlockPointer, renamedType EntryType) (*renameOp, error) { ro := &renameOp{ OldName: oldName, NewName: newName, Renamed: renamed, RenamedType: renamedType, } err := ro.OldDir.setUnref(oldOldDir) if err != nil { return nil, err } // If we are renaming within a directory, let the NewDir remain empty. if oldOldDir != oldNewDir { err := ro.NewDir.setUnref(oldNewDir) if err != nil { return nil, err } } return ro, nil } func (ro *renameOp) AddUpdate(oldPtr BlockPointer, newPtr BlockPointer) { if oldPtr == ro.OldDir.Unref { err := ro.OldDir.setRef(newPtr) if err != nil { panic(err) } return } if ro.NewDir != (blockUpdate{}) && oldPtr == ro.NewDir.Unref { err := ro.NewDir.setRef(newPtr) if err != nil { panic(err) } return } ro.OpCommon.AddUpdate(oldPtr, newPtr) } func (ro *renameOp) SizeExceptUpdates() uint64 { return uint64(len(ro.NewName) + len(ro.NewName)) } func (ro *renameOp) allUpdates() []blockUpdate { updates := make([]blockUpdate, len(ro.Updates)) copy(updates, ro.Updates) if ro.NewDir != (blockUpdate{}) { return append(updates, ro.NewDir, ro.OldDir) } return append(updates, ro.OldDir) } func (ro *renameOp) checkValid() error { err := ro.OldDir.checkValid() if err != nil { return fmt.Errorf("renameOp.OldDir=%v got error: %v", ro.OldDir, err) } if ro.NewDir != (blockUpdate{}) { err = ro.NewDir.checkValid() if err != nil { return fmt.Errorf("renameOp.NewDir=%v got error: %v", ro.NewDir, err) } } return ro.checkUpdatesValid() } func (ro *renameOp) String() string { return fmt.Sprintf("rename %s -> %s (%s)", ro.OldName, ro.NewName, ro.RenamedType) } func (ro *renameOp) StringWithRefs(numRefIndents int) string { res := ro.String() + "\n" indent := strings.Repeat("\t", numRefIndents) res += indent + fmt.Sprintf("OldDir: %v -> %v\n", ro.OldDir.Unref, ro.OldDir.Ref) if ro.NewDir != (blockUpdate{}) { res += indent + fmt.Sprintf("NewDir: %v -> %v\n", ro.NewDir.Unref, ro.NewDir.Ref) } else { res += indent + fmt.Sprintf("NewDir: same as above\n") } res += indent + fmt.Sprintf("Renamed: %v\n", ro.Renamed) res += ro.stringWithRefs(numRefIndents) return res } func (ro *renameOp) checkConflict( ctx context.Context, renamer ConflictRenamer, mergedOp op, isFile bool) (crAction, error) { return nil, fmt.Errorf("Unexpected conflict check on a rename op: %s", ro) } func (ro *renameOp) getDefaultAction(mergedPath path) crAction { return nil } // WriteRange represents a file modification. Len is 0 for a // truncate. type WriteRange struct { Off uint64 `codec:"o"` Len uint64 `codec:"l,omitempty"` // 0 for truncates codec.UnknownFieldSetHandler } func (w WriteRange) isTruncate() bool { return w.Len == 0 } // End returns the index of the largest byte not affected by this // write. It only makes sense to call this for non-truncates. func (w WriteRange) End() uint64 { if w.isTruncate() { panic("Truncates don't have an end") } return w.Off + w.Len } // Affects returns true if the regions affected by this write // operation and `other` overlap in some way. Specifically, it // returns true if: // // - both operations are writes and their write ranges overlap; // - one operation is a write and one is a truncate, and the truncate is // within the write's range or before it; or // - both operations are truncates. func (w WriteRange) Affects(other WriteRange) bool { if w.isTruncate() { if other.isTruncate() { return true } // A truncate affects a write if it lands inside or before the // write. return other.End() > w.Off } else if other.isTruncate() { return w.End() > other.Off } // Both are writes -- do their ranges overlap? return (w.Off <= other.End() && other.End() <= w.End()) || (other.Off <= w.End() && w.End() <= other.End()) } // syncOp is an op that represents a series of writes to a file. type syncOp struct { OpCommon File blockUpdate `codec:"f"` Writes []WriteRange `codec:"w"` // If true, this says that if there is a conflict involving this // op, we should keep the unmerged name rather than construct a // conflict name (probably because the new name already // diverges from the name in the other branch). keepUnmergedTailName bool } func newSyncOp(oldFile BlockPointer) (*syncOp, error) { so := &syncOp{} err := so.File.setUnref(oldFile) if err != nil { return nil, err } so.resetUpdateState() return so, nil } func (so *syncOp) resetUpdateState() { so.Updates = nil } func (so *syncOp) AddUpdate(oldPtr BlockPointer, newPtr BlockPointer) { if oldPtr == so.File.Unref { err := so.File.setRef(newPtr) if err != nil { panic(err) } return } so.OpCommon.AddUpdate(oldPtr, newPtr) } func (so *syncOp) addWrite(off uint64, length uint64) WriteRange { latestWrite := WriteRange{Off: off, Len: length} so.Writes = append(so.Writes, latestWrite) return latestWrite } func (so *syncOp) addTruncate(off uint64) WriteRange { latestWrite := WriteRange{Off: off, Len: 0} so.Writes = append(so.Writes, latestWrite) return latestWrite } func (so *syncOp) SizeExceptUpdates() uint64 { return uint64(len(so.Writes) * 16) } func (so *syncOp) allUpdates() []blockUpdate { updates := make([]blockUpdate, len(so.Updates)) copy(updates, so.Updates) return append(updates, so.File) } func (so *syncOp) checkValid() error { err := so.File.checkValid() if err != nil { return fmt.Errorf("syncOp.File=%v got error: %v", so.File, err) } return so.checkUpdatesValid() } func (so *syncOp) String() string { var writes []string for _, r := range so.Writes { writes = append(writes, fmt.Sprintf("{off=%d, len=%d}", r.Off, r.Len)) } return fmt.Sprintf("sync [%s]", strings.Join(writes, ", ")) } func (so *syncOp) StringWithRefs(numRefIndents int) string { res := so.String() + "\n" indent := strings.Repeat("\t", numRefIndents) res += indent + fmt.Sprintf("File: %v -> %v\n", so.File.Unref, so.File.Ref) res += so.stringWithRefs(numRefIndents) return res } func (so *syncOp) checkConflict( ctx context.Context, renamer ConflictRenamer, mergedOp op, isFile bool) (crAction, error) { switch mergedOp.(type) { case *syncOp: // Any sync on the same file is a conflict. (TODO: add // type-specific intelligent conflict resolvers for file // contents?) toName, err := renamer.ConflictRename( ctx, so, mergedOp.getFinalPath().tailName()) if err != nil { return nil, err } if so.keepUnmergedTailName { toName = so.getFinalPath().tailName() } return &renameUnmergedAction{ fromName: so.getFinalPath().tailName(), toName: toName, unmergedParentMostRecent: so.getFinalPath().parentPath().tailPointer(), mergedParentMostRecent: mergedOp.getFinalPath().parentPath(). tailPointer(), }, nil case *setAttrOp: // Someone on the merged path explicitly set an attribute, so // just copy the size and blockpointer over. return &copyUnmergedAttrAction{ fromName: so.getFinalPath().tailName(), toName: mergedOp.getFinalPath().tailName(), attr: []attrChange{sizeAttr}, }, nil } return nil, nil } func (so *syncOp) getDefaultAction(mergedPath path) crAction { return &copyUnmergedEntryAction{ fromName: so.getFinalPath().tailName(), toName: mergedPath.tailName(), symPath: "", } } // In the functions below. a collapsed []WriteRange is a sequence of // non-overlapping writes with strictly increasing Off, and maybe a // trailing truncate (with strictly greater Off). // coalesceWrites combines the given `wNew` with the head and tail of // the given collapsed `existingWrites` slice. For example, if the // new write is {5, 100}, and `existingWrites` = [{7,5}, {18,10}, // {98,10}], the returned write will be {5,103}. There may be a // truncate at the end of the returned slice as well. func coalesceWrites(existingWrites []WriteRange, wNew WriteRange) []WriteRange { if wNew.isTruncate() { panic("coalesceWrites cannot be called with a new truncate.") } if len(existingWrites) == 0 { return []WriteRange{wNew} } newOff := wNew.Off newEnd := wNew.End() wOldHead := existingWrites[0] wOldTail := existingWrites[len(existingWrites)-1] if !wOldTail.isTruncate() && wOldTail.End() > newEnd { newEnd = wOldTail.End() } if !wOldHead.isTruncate() && wOldHead.Off < newOff { newOff = wOldHead.Off } ret := []WriteRange{{Off: newOff, Len: newEnd - newOff}} if wOldTail.isTruncate() { ret = append(ret, WriteRange{Off: newEnd}) } return ret } // Assumes writes is already collapsed, i.e. a sequence of // non-overlapping writes with strictly increasing Off, and maybe a // trailing truncate (with strictly greater Off). func addToCollapsedWriteRange(writes []WriteRange, wNew WriteRange) []WriteRange { // Form three regions: head, mid, and tail: head is the maximal prefix // of writes less than (with respect to Off) and unaffected by wNew, // tail is the maximal suffix of writes greater than (with respect to // Off) and unaffected by wNew, and mid is everything else, i.e. the // range of writes affected by wNew. var headEnd int for ; headEnd < len(writes); headEnd++ { wOld := writes[headEnd] if wOld.Off >= wNew.Off || wNew.Affects(wOld) { break } } head := writes[:headEnd] if wNew.isTruncate() { // end is empty, since a truncate affects a suffix of writes. mid := writes[headEnd:] if len(mid) == 0 { // Truncate past the last write. return append(head, wNew) } else if mid[0].isTruncate() { // Min truncate wins if mid[0].Off < wNew.Off { return append(head, mid[0]) } return append(head, wNew) } else if mid[0].Off < wNew.Off { return append(head, WriteRange{ Off: mid[0].Off, Len: wNew.Off - mid[0].Off, }, wNew) } return append(head, wNew) } // wNew is a write. midEnd := headEnd for ; midEnd < len(writes); midEnd++ { wOld := writes[midEnd] if !wNew.Affects(wOld) { break } } mid := writes[headEnd:midEnd] end := writes[midEnd:] mid = coalesceWrites(mid, wNew) return append(head, append(mid, end...)...) } // collapseWriteRange returns a set of writes that represent the final // dirty state of this file after this syncOp, given a previous write // range. It coalesces overlapping dirty writes, and it erases any // writes that occurred before a truncation with an offset smaller // than its max dirty byte. // // This function assumes that `writes` has already been collapsed (or // is nil). // // NOTE: Truncates past a file's end get turned into writes by // folderBranchOps, but in the future we may have bona fide truncate // WriteRanges past a file's end. func (so *syncOp) collapseWriteRange(writes []WriteRange) ( newWrites []WriteRange) { newWrites = writes for _, wNew := range so.Writes { newWrites = addToCollapsedWriteRange(newWrites, wNew) } return newWrites } type attrChange uint16 const ( exAttr attrChange = iota mtimeAttr sizeAttr // only used during conflict resolution ) func (ac attrChange) String() string { switch ac { case exAttr: return "ex" case mtimeAttr: return "mtime" case sizeAttr: return "size" } return "<invalid attrChange>" } // setAttrOp is an op that represents changing the attributes of a // file/subdirectory with in a directory. type setAttrOp struct { OpCommon Name string `codec:"n"` Dir blockUpdate `codec:"d"` Attr attrChange `codec:"a"` File BlockPointer `codec:"f"` // If true, this says that if there is a conflict involving this // op, we should keep the unmerged name rather than construct a // conflict name (probably because the new name already // diverges from the name in the other branch). keepUnmergedTailName bool } func newSetAttrOp(name string, oldDir BlockPointer, attr attrChange, file BlockPointer) (*setAttrOp, error) { sao := &setAttrOp{ Name: name, } err := sao.Dir.setUnref(oldDir) if err != nil { return nil, err } sao.Attr = attr sao.File = file return sao, nil } func (sao *setAttrOp) AddUpdate(oldPtr BlockPointer, newPtr BlockPointer) { if oldPtr == sao.Dir.Unref { err := sao.Dir.setRef(newPtr) if err != nil { panic(err) } return } sao.OpCommon.AddUpdate(oldPtr, newPtr) } func (sao *setAttrOp) SizeExceptUpdates() uint64 { return uint64(len(sao.Name)) } func (sao *setAttrOp) allUpdates() []blockUpdate { updates := make([]blockUpdate, len(sao.Updates)) copy(updates, sao.Updates) return append(updates, sao.Dir) } func (sao *setAttrOp) checkValid() error { err := sao.Dir.checkValid() if err != nil { return fmt.Errorf("setAttrOp.Dir=%v got error: %v", sao.Dir, err) } return sao.checkUpdatesValid() } func (sao *setAttrOp) String() string { return fmt.Sprintf("setAttr %s (%s)", sao.Name, sao.Attr) } func (sao *setAttrOp) StringWithRefs(numRefIndents int) string { res := sao.String() + "\n" indent := strings.Repeat("\t", numRefIndents) res += indent + fmt.Sprintf("Dir: %v -> %v\n", sao.Dir.Unref, sao.Dir.Ref) res += indent + fmt.Sprintf("File: %v\n", sao.File) res += sao.stringWithRefs(numRefIndents) return res } func (sao *setAttrOp) checkConflict( ctx context.Context, renamer ConflictRenamer, mergedOp op, isFile bool) (crAction, error) { switch realMergedOp := mergedOp.(type) { case *setAttrOp: if realMergedOp.Attr == sao.Attr { var symPath string var causedByAttr attrChange if !isFile { // A directory has a conflict on an mtime attribute. // Create a symlink entry with the unmerged mtime // pointing to the merged entry. symPath = mergedOp.getFinalPath().tailName() causedByAttr = sao.Attr } // A set attr for the same attribute on the same file is a // conflict. fromName := sao.getFinalPath().tailName() toName, err := renamer.ConflictRename(ctx, sao, fromName) if err != nil { return nil, err } if sao.keepUnmergedTailName { toName = sao.getFinalPath().tailName() } return &renameUnmergedAction{ fromName: fromName, toName: toName, symPath: symPath, causedByAttr: causedByAttr, unmergedParentMostRecent: sao.getFinalPath().parentPath().tailPointer(), mergedParentMostRecent: mergedOp.getFinalPath().parentPath(). tailPointer(), }, nil } } return nil, nil } func (sao *setAttrOp) getDefaultAction(mergedPath path) crAction { return &copyUnmergedAttrAction{ fromName: sao.getFinalPath().tailName(), toName: mergedPath.tailName(), attr: []attrChange{sao.Attr}, } } // resolutionOp is an op that represents the block changes that took // place as part of a conflict resolution. type resolutionOp struct { OpCommon } func newResolutionOp() *resolutionOp { ro := &resolutionOp{} return ro } func (ro *resolutionOp) SizeExceptUpdates() uint64 { return 0 } func (ro *resolutionOp) allUpdates() []blockUpdate { return ro.Updates } func (ro *resolutionOp) checkValid() error { return ro.checkUpdatesValid() } func (ro *resolutionOp) String() string { return "resolution" } func (ro *resolutionOp) StringWithRefs(numRefIndents int) string { res := ro.String() + "\n" res += ro.stringWithRefs(numRefIndents) return res } func (ro *resolutionOp) checkConflict( ctx context.Context, renamer ConflictRenamer, mergedOp op, isFile bool) (crAction, error) { return nil, nil } func (ro *resolutionOp) getDefaultAction(mergedPath path) crAction { return nil } // rekeyOp is an op that represents a rekey on a TLF. type rekeyOp struct { OpCommon } func newRekeyOp() *rekeyOp { ro := &rekeyOp{} return ro } func (ro *rekeyOp) SizeExceptUpdates() uint64 { return 0 } func (ro *rekeyOp) allUpdates() []blockUpdate { return ro.Updates } func (ro *rekeyOp) checkValid() error { return ro.checkUpdatesValid() } func (ro *rekeyOp) String() string { return "rekey" } func (ro *rekeyOp) StringWithRefs(numRefIndents int) string { res := ro.String() + "\n" res += ro.stringWithRefs(numRefIndents) return res } func (ro *rekeyOp) checkConflict( ctx context.Context, renamer ConflictRenamer, mergedOp op, isFile bool) (crAction, error) { return nil, nil } func (ro *rekeyOp) getDefaultAction(mergedPath path) crAction { return nil } // GCOp is an op that represents garbage-collecting the history of a // folder (which may involve unreferencing blocks that previously held // operation lists. It may contain unref blocks before it is added to // the metadata ops list. type GCOp struct { OpCommon // LatestRev is the most recent MD revision that was // garbage-collected with this operation. // // The codec name overrides the one for RefBlocks in OpCommon, // which GCOp doesn't use. LatestRev MetadataRevision `codec:"r"` } func newGCOp(latestRev MetadataRevision) *GCOp { gco := &GCOp{ LatestRev: latestRev, } return gco } // SizeExceptUpdates implements op. func (gco *GCOp) SizeExceptUpdates() uint64 { return bpSize * uint64(len(gco.UnrefBlocks)) } func (gco *GCOp) allUpdates() []blockUpdate { return gco.Updates } func (gco *GCOp) checkValid() error { return gco.checkUpdatesValid() } func (gco *GCOp) String() string { return fmt.Sprintf("gc %d", gco.LatestRev) } // StringWithRefs implements the op interface for GCOp. func (gco *GCOp) StringWithRefs(numRefIndents int) string { res := gco.String() + "\n" res += gco.stringWithRefs(numRefIndents) return res } // checkConflict implements op. func (gco *GCOp) checkConflict( ctx context.Context, renamer ConflictRenamer, mergedOp op, isFile bool) (crAction, error) { return nil, nil } // getDefaultAction implements op. func (gco *GCOp) getDefaultAction(mergedPath path) crAction { return nil } // invertOpForLocalNotifications returns an operation that represents // an undoing of the effect of the given op. These are intended to be // used for local notifications only, and would not be useful for // finding conflicts (for example, we lose information about the type // of the file in a rmOp that we are trying to re-create). func invertOpForLocalNotifications(oldOp op) (newOp op, err error) { switch op := oldOp.(type) { default: panic(fmt.Sprintf("Unrecognized operation: %v", op)) case *createOp: newOp, err = newRmOp(op.NewName, op.Dir.Ref) if err != nil { return nil, err } case *rmOp: // Guess at the type, shouldn't be used for local notification // purposes. newOp, err = newCreateOp(op.OldName, op.Dir.Ref, File) if err != nil { return nil, err } case *renameOp: newDirRef := op.NewDir.Ref if op.NewDir == (blockUpdate{}) { newDirRef = op.OldDir.Ref } newOp, err = newRenameOp(op.NewName, newDirRef, op.OldName, op.OldDir.Ref, op.Renamed, op.RenamedType) if err != nil { return nil, err } case *syncOp: // Just replay the writes; for notifications purposes, they // will do the right job of marking the right bytes as // invalid. so, err := newSyncOp(op.File.Ref) if err != nil { return nil, err } so.Writes = make([]WriteRange, len(op.Writes)) copy(so.Writes, op.Writes) newOp = so case *setAttrOp: newOp, err = newSetAttrOp(op.Name, op.Dir.Ref, op.Attr, op.File) if err != nil { return nil, err } case *GCOp: newOp = op } // Now reverse all the block updates. Don't bother with bare Refs // and Unrefs since they don't matter for local notification // purposes. for _, update := range oldOp.allUpdates() { newOp.AddUpdate(update.Ref, update.Unref) } return newOp, nil } // NOTE: If you're updating opPointerizer and RegisterOps, make sure // to also update opPointerizerFuture and registerOpsFuture in // ops_test.go. // Our ugorji codec cannot decode our extension types as pointers, and // we need them to be pointers so they correctly satisfy the op // interface. So this function simply converts them into pointers as // needed. func opPointerizer(iface interface{}) reflect.Value { switch op := iface.(type) { default: return reflect.ValueOf(iface) case createOp: return reflect.ValueOf(&op) case rmOp: return reflect.ValueOf(&op) case renameOp: return reflect.ValueOf(&op) case syncOp: return reflect.ValueOf(&op) case setAttrOp: return reflect.ValueOf(&op) case resolutionOp: return reflect.ValueOf(&op) case rekeyOp: return reflect.ValueOf(&op) case GCOp: return reflect.ValueOf(&op) } } // RegisterOps registers all op types with the given codec. func RegisterOps(codec kbfscodec.Codec) { codec.RegisterType(reflect.TypeOf(createOp{}), createOpCode) codec.RegisterType(reflect.TypeOf(rmOp{}), rmOpCode) codec.RegisterType(reflect.TypeOf(renameOp{}), renameOpCode) codec.RegisterType(reflect.TypeOf(syncOp{}), syncOpCode) codec.RegisterType(reflect.TypeOf(setAttrOp{}), setAttrOpCode) codec.RegisterType(reflect.TypeOf(resolutionOp{}), resolutionOpCode) codec.RegisterType(reflect.TypeOf(rekeyOp{}), rekeyOpCode) codec.RegisterType(reflect.TypeOf(GCOp{}), gcOpCode) codec.RegisterIfaceSliceType(reflect.TypeOf(opsList{}), opsListCode, opPointerizer) }
1
16,520
Remind me why this is needed?
keybase-kbfs
go
@@ -64,6 +64,14 @@ public final class ConstantScoreQuery extends Query { return super.rewrite(reader); } + @Override + public void visit(QueryVisitor visitor) { + QueryVisitor v = visitor.getSubVisitor(BooleanClause.Occur.FILTER, this); + if (v != null) { + query.visit(v); + } + } + /** We return this as our {@link BulkScorer} so that if the CSQ * wraps a query with its own optimized top-level * scorer (e.g. BooleanScorer) we can use that
1
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.lucene.search; import java.io.IOException; import java.util.Collection; import java.util.Collections; import java.util.Objects; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.util.Bits; /** * A query that wraps another query and simply returns a constant score equal to * 1 for every document that matches the query. * It therefore simply strips of all scores and always returns 1. */ public final class ConstantScoreQuery extends Query { private final Query query; /** Strips off scores from the passed in Query. The hits will get a constant score * of 1. */ public ConstantScoreQuery(Query query) { this.query = Objects.requireNonNull(query, "Query must not be null"); } /** Returns the encapsulated query. */ public Query getQuery() { return query; } @Override public Query rewrite(IndexReader reader) throws IOException { Query rewritten = query.rewrite(reader); if (rewritten != query) { return new ConstantScoreQuery(rewritten); } if (rewritten.getClass() == ConstantScoreQuery.class) { return rewritten; } if (rewritten.getClass() == BoostQuery.class) { return new ConstantScoreQuery(((BoostQuery) rewritten).getQuery()); } return super.rewrite(reader); } /** We return this as our {@link BulkScorer} so that if the CSQ * wraps a query with its own optimized top-level * scorer (e.g. BooleanScorer) we can use that * top-level scorer. */ protected static class ConstantBulkScorer extends BulkScorer { final BulkScorer bulkScorer; final Weight weight; final float theScore; public ConstantBulkScorer(BulkScorer bulkScorer, Weight weight, float theScore) { this.bulkScorer = bulkScorer; this.weight = weight; this.theScore = theScore; } @Override public int score(LeafCollector collector, Bits acceptDocs, int min, int max) throws IOException { return bulkScorer.score(wrapCollector(collector), acceptDocs, min, max); } private LeafCollector wrapCollector(LeafCollector collector) { return new FilterLeafCollector(collector) { @Override public void setScorer(Scorable scorer) throws IOException { // we must wrap again here, but using the scorer passed in as parameter: in.setScorer(new FilterScorable(scorer) { @Override public float score() { return theScore; } }); } }; } @Override public long cost() { return bulkScorer.cost(); } } @Override public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { final Weight innerWeight = searcher.createWeight(query, ScoreMode.COMPLETE_NO_SCORES, 1f); if (scoreMode.needsScores()) { return new ConstantScoreWeight(this, boost) { @Override public BulkScorer bulkScorer(LeafReaderContext context) throws IOException { final BulkScorer innerScorer = innerWeight.bulkScorer(context); if (innerScorer == null) { return null; } return new ConstantBulkScorer(innerScorer, this, score()); } @Override public ScorerSupplier scorerSupplier(LeafReaderContext context) throws IOException { ScorerSupplier innerScorerSupplier = innerWeight.scorerSupplier(context); if (innerScorerSupplier == null) { return null; } return new ScorerSupplier() { @Override public Scorer get(long leadCost) throws IOException { final Scorer innerScorer = innerScorerSupplier.get(leadCost); final float score = score(); return new FilterScorer(innerScorer) { @Override public float score() throws IOException { return score; } @Override public float getMaxScore(int upTo) throws IOException { return score; } @Override public Collection<ChildScorable> getChildren() { return Collections.singleton(new ChildScorable(innerScorer, "constant")); } }; } @Override public long cost() { return innerScorerSupplier.cost(); } }; } @Override public Matches matches(LeafReaderContext context, int doc) throws IOException { return innerWeight.matches(context, doc); } @Override public Scorer scorer(LeafReaderContext context) throws IOException { ScorerSupplier scorerSupplier = scorerSupplier(context); if (scorerSupplier == null) { return null; } return scorerSupplier.get(Long.MAX_VALUE); } @Override public boolean isCacheable(LeafReaderContext ctx) { return innerWeight.isCacheable(ctx); } }; } else { return innerWeight; } } @Override public String toString(String field) { return new StringBuilder("ConstantScore(") .append(query.toString(field)) .append(')') .toString(); } @Override public boolean equals(Object other) { return sameClassAs(other) && query.equals(((ConstantScoreQuery) other).query); } @Override public int hashCode() { return 31 * classHash() + query.hashCode(); } }
1
28,604
same as above, can this be null now?
apache-lucene-solr
java
@@ -319,8 +319,8 @@ public class SparkOrcWriter implements OrcValueWriter<InternalRow> { // make sure the child is big enough cv.child.ensureSize(cv.childCount, true); // Add each element - for (int e = 0; e < cv.lengths[rowId]; ++e) { - children.addValue((int) (e + cv.offsets[rowId]), e, value, cv.child); + for (long e = 0; e < cv.lengths[rowId]; ++e) { + children.addValue((int) (e + cv.offsets[rowId]), (int) e, value, cv.child); } } }
1
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iceberg.spark.data; import java.util.List; import org.apache.iceberg.orc.OrcValueWriter; import org.apache.orc.TypeDescription; import org.apache.orc.storage.common.type.HiveDecimal; import org.apache.orc.storage.ql.exec.vector.BytesColumnVector; import org.apache.orc.storage.ql.exec.vector.ColumnVector; import org.apache.orc.storage.ql.exec.vector.DecimalColumnVector; import org.apache.orc.storage.ql.exec.vector.DoubleColumnVector; import org.apache.orc.storage.ql.exec.vector.ListColumnVector; import org.apache.orc.storage.ql.exec.vector.LongColumnVector; import org.apache.orc.storage.ql.exec.vector.MapColumnVector; import org.apache.orc.storage.ql.exec.vector.StructColumnVector; import org.apache.orc.storage.ql.exec.vector.TimestampColumnVector; import org.apache.orc.storage.ql.exec.vector.VectorizedRowBatch; import org.apache.spark.sql.catalyst.InternalRow; import org.apache.spark.sql.catalyst.expressions.SpecializedGetters; import org.apache.spark.sql.catalyst.util.ArrayData; import org.apache.spark.sql.catalyst.util.MapData; /** * This class acts as an adaptor from an OrcFileAppender to a * FileAppender&lt;InternalRow&gt;. */ public class SparkOrcWriter implements OrcValueWriter<InternalRow> { private final Converter[] converters; public SparkOrcWriter(TypeDescription schema) { converters = buildConverters(schema); } @Override public void write(InternalRow value, VectorizedRowBatch output) { int row = output.size++; for (int c = 0; c < converters.length; ++c) { converters[c].addValue(row, c, value, output.cols[c]); } } /** * The interface for the conversion from Spark's SpecializedGetters to * ORC's ColumnVectors. */ interface Converter { /** * Take a value from the Spark data value and add it to the ORC output. * @param rowId the row in the ColumnVector * @param column either the column number or element number * @param data either an InternalRow or ArrayData * @param output the ColumnVector to put the value into */ void addValue(int rowId, int column, SpecializedGetters data, ColumnVector output); } static class BooleanConverter implements Converter { @Override public void addValue(int rowId, int column, SpecializedGetters data, ColumnVector output) { if (data.isNullAt(column)) { output.noNulls = false; output.isNull[rowId] = true; } else { output.isNull[rowId] = false; ((LongColumnVector) output).vector[rowId] = data.getBoolean(column) ? 1 : 0; } } } static class ByteConverter implements Converter { @Override public void addValue(int rowId, int column, SpecializedGetters data, ColumnVector output) { if (data.isNullAt(column)) { output.noNulls = false; output.isNull[rowId] = true; } else { output.isNull[rowId] = false; ((LongColumnVector) output).vector[rowId] = data.getByte(column); } } } static class ShortConverter implements Converter { @Override public void addValue(int rowId, int column, SpecializedGetters data, ColumnVector output) { if (data.isNullAt(column)) { output.noNulls = false; output.isNull[rowId] = true; } else { output.isNull[rowId] = false; ((LongColumnVector) output).vector[rowId] = data.getShort(column); } } } static class IntConverter implements Converter { @Override public void addValue(int rowId, int column, SpecializedGetters data, ColumnVector output) { if (data.isNullAt(column)) { output.noNulls = false; output.isNull[rowId] = true; } else { output.isNull[rowId] = false; ((LongColumnVector) output).vector[rowId] = data.getInt(column); } } } static class LongConverter implements Converter { @Override public void addValue(int rowId, int column, SpecializedGetters data, ColumnVector output) { if (data.isNullAt(column)) { output.noNulls = false; output.isNull[rowId] = true; } else { output.isNull[rowId] = false; ((LongColumnVector) output).vector[rowId] = data.getLong(column); } } } static class FloatConverter implements Converter { @Override public void addValue(int rowId, int column, SpecializedGetters data, ColumnVector output) { if (data.isNullAt(column)) { output.noNulls = false; output.isNull[rowId] = true; } else { output.isNull[rowId] = false; ((DoubleColumnVector) output).vector[rowId] = data.getFloat(column); } } } static class DoubleConverter implements Converter { @Override public void addValue(int rowId, int column, SpecializedGetters data, ColumnVector output) { if (data.isNullAt(column)) { output.noNulls = false; output.isNull[rowId] = true; } else { output.isNull[rowId] = false; ((DoubleColumnVector) output).vector[rowId] = data.getDouble(column); } } } static class StringConverter implements Converter { @Override public void addValue(int rowId, int column, SpecializedGetters data, ColumnVector output) { if (data.isNullAt(column)) { output.noNulls = false; output.isNull[rowId] = true; } else { output.isNull[rowId] = false; byte[] value = data.getUTF8String(column).getBytes(); ((BytesColumnVector) output).setRef(rowId, value, 0, value.length); } } } static class BytesConverter implements Converter { @Override public void addValue(int rowId, int column, SpecializedGetters data, ColumnVector output) { if (data.isNullAt(column)) { output.noNulls = false; output.isNull[rowId] = true; } else { output.isNull[rowId] = false; // getBinary always makes a copy, so we don't need to worry about it // being changed behind our back. byte[] value = data.getBinary(column); ((BytesColumnVector) output).setRef(rowId, value, 0, value.length); } } } static class TimestampConverter implements Converter { @Override public void addValue(int rowId, int column, SpecializedGetters data, ColumnVector output) { if (data.isNullAt(column)) { output.noNulls = false; output.isNull[rowId] = true; } else { output.isNull[rowId] = false; TimestampColumnVector cv = (TimestampColumnVector) output; long micros = data.getLong(column); cv.time[rowId] = micros / 1_000; // millis cv.nanos[rowId] = (int) (micros % 1_000_000) * 1_000; // nanos } } } static class Decimal18Converter implements Converter { private final int precision; private final int scale; Decimal18Converter(TypeDescription schema) { precision = schema.getPrecision(); scale = schema.getScale(); } @Override public void addValue(int rowId, int column, SpecializedGetters data, ColumnVector output) { if (data.isNullAt(column)) { output.noNulls = false; output.isNull[rowId] = true; } else { output.isNull[rowId] = false; ((DecimalColumnVector) output).vector[rowId].setFromLongAndScale( data.getDecimal(column, precision, scale).toUnscaledLong(), scale); } } } static class Decimal38Converter implements Converter { private final int precision; private final int scale; Decimal38Converter(TypeDescription schema) { precision = schema.getPrecision(); scale = schema.getScale(); } @Override public void addValue(int rowId, int column, SpecializedGetters data, ColumnVector output) { if (data.isNullAt(column)) { output.noNulls = false; output.isNull[rowId] = true; } else { output.isNull[rowId] = false; ((DecimalColumnVector) output).vector[rowId].set( HiveDecimal.create(data.getDecimal(column, precision, scale) .toJavaBigDecimal())); } } } static class StructConverter implements Converter { private final Converter[] children; StructConverter(TypeDescription schema) { children = new Converter[schema.getChildren().size()]; for (int c = 0; c < children.length; ++c) { children[c] = buildConverter(schema.getChildren().get(c)); } } @Override public void addValue(int rowId, int column, SpecializedGetters data, ColumnVector output) { if (data.isNullAt(column)) { output.noNulls = false; output.isNull[rowId] = true; } else { output.isNull[rowId] = false; InternalRow value = data.getStruct(column, children.length); StructColumnVector cv = (StructColumnVector) output; for (int c = 0; c < children.length; ++c) { children[c].addValue(rowId, c, value, cv.fields[c]); } } } } static class ListConverter implements Converter { private final Converter children; ListConverter(TypeDescription schema) { children = buildConverter(schema.getChildren().get(0)); } @Override public void addValue(int rowId, int column, SpecializedGetters data, ColumnVector output) { if (data.isNullAt(column)) { output.noNulls = false; output.isNull[rowId] = true; } else { output.isNull[rowId] = false; ArrayData value = data.getArray(column); ListColumnVector cv = (ListColumnVector) output; // record the length and start of the list elements cv.lengths[rowId] = value.numElements(); cv.offsets[rowId] = cv.childCount; cv.childCount += cv.lengths[rowId]; // make sure the child is big enough cv.child.ensureSize(cv.childCount, true); // Add each element for (int e = 0; e < cv.lengths[rowId]; ++e) { children.addValue((int) (e + cv.offsets[rowId]), e, value, cv.child); } } } } static class MapConverter implements Converter { private final Converter keyConverter; private final Converter valueConverter; MapConverter(TypeDescription schema) { keyConverter = buildConverter(schema.getChildren().get(0)); valueConverter = buildConverter(schema.getChildren().get(1)); } @Override public void addValue(int rowId, int column, SpecializedGetters data, ColumnVector output) { if (data.isNullAt(column)) { output.noNulls = false; output.isNull[rowId] = true; } else { output.isNull[rowId] = false; MapData map = data.getMap(column); ArrayData key = map.keyArray(); ArrayData value = map.valueArray(); MapColumnVector cv = (MapColumnVector) output; // record the length and start of the list elements cv.lengths[rowId] = value.numElements(); cv.offsets[rowId] = cv.childCount; cv.childCount += cv.lengths[rowId]; // make sure the child is big enough cv.keys.ensureSize(cv.childCount, true); cv.values.ensureSize(cv.childCount, true); // Add each element for (int e = 0; e < cv.lengths[rowId]; ++e) { int pos = (int) (e + cv.offsets[rowId]); keyConverter.addValue(pos, e, key, cv.keys); valueConverter.addValue(pos, e, value, cv.values); } } } } private static Converter buildConverter(TypeDescription schema) { switch (schema.getCategory()) { case BOOLEAN: return new BooleanConverter(); case BYTE: return new ByteConverter(); case SHORT: return new ShortConverter(); case DATE: case INT: return new IntConverter(); case LONG: return new LongConverter(); case FLOAT: return new FloatConverter(); case DOUBLE: return new DoubleConverter(); case BINARY: return new BytesConverter(); case STRING: case CHAR: case VARCHAR: return new StringConverter(); case DECIMAL: return schema.getPrecision() <= 18 ? new Decimal18Converter(schema) : new Decimal38Converter(schema); case TIMESTAMP: return new TimestampConverter(); case STRUCT: return new StructConverter(schema); case LIST: return new ListConverter(schema); case MAP: return new MapConverter(schema); } throw new IllegalArgumentException("Unhandled type " + schema); } private static Converter[] buildConverters(TypeDescription schema) { if (schema.getCategory() != TypeDescription.Category.STRUCT) { throw new IllegalArgumentException("Top level must be a struct " + schema); } List<TypeDescription> children = schema.getChildren(); Converter[] result = new Converter[children.size()]; for (int c = 0; c < children.size(); ++c) { result[c] = buildConverter(children.get(c)); } return result; } }
1
17,666
This doesn't make sense to me. Should `cv.lengths` be an integer array instead of longs? It looks like they should always be integers since the values are assigned from `ArrayData#numElements()`. That's very likely an int.
apache-iceberg
java
@@ -1920,7 +1920,18 @@ namespace pwiz.Skyline.Model.Results } else if (id.StartsWith(MsDataFileImpl.PREFIX_PRECURSOR)) { - precursor = double.Parse(id.Substring(MsDataFileImpl.PREFIX_TOTAL.Length), CultureInfo.InvariantCulture); + var str = id.Substring(MsDataFileImpl.PREFIX_PRECURSOR.Length); + if (!double.TryParse(str, NumberStyles.Float, CultureInfo.InvariantCulture, out precursor)) + { + if (str.StartsWith(@"Q1=")) + { + str = str.Substring(3); + var tail = str.IndexOf(' '); + if (tail > 0) + str = str.Substring(0, tail); + } + precursor = double.Parse(str, CultureInfo.InvariantCulture); + } product = precursor; } else if (id.StartsWith(MsDataFileImpl.PREFIX_SINGLE))
1
/* * Original author: Brendan MacLean <brendanx .at. u.washington.edu>, * MacCoss Lab, Department of Genome Sciences, UW * * Copyright 2009 University of Washington - Seattle, WA * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ using System; using System.Collections.Generic; using System.Globalization; using System.IO; using System.Linq; using System.Runtime.InteropServices; using System.Text; using System.Text.RegularExpressions; using pwiz.Common.Chemistry; using pwiz.Common.Collections; using pwiz.Common.PeakFinding; using pwiz.Common.SystemUtil; using pwiz.ProteowizardWrapper; using pwiz.Skyline.Model.DocSettings; using pwiz.Skyline.Model.Results.Crawdad; using pwiz.Skyline.Model.Results.Scoring; using pwiz.Skyline.Properties; using pwiz.Skyline.Util; using pwiz.Skyline.Util.Extensions; namespace pwiz.Skyline.Model.Results { // This format was in use prior to Feb 2013, when the peak scoring work was added [StructLayout(LayoutKind.Sequential, Pack=4)] public struct ChromGroupHeaderInfo4 { public ChromGroupHeaderInfo4(float precursor, int fileIndex, int numTransitions, int startTransitionIndex, int numPeaks, int startPeakIndex, int maxPeakIndex, int numPoints, int compressedSize, long location) : this() { Precursor = precursor; FileIndex = fileIndex; NumTransitions = numTransitions; StartTransitionIndex = startTransitionIndex; NumPeaks = numPeaks; StartPeakIndex = startPeakIndex; MaxPeakIndex = maxPeakIndex; NumPoints = numPoints; CompressedSize = compressedSize; Align = 0; LocationPoints = location; } public ChromGroupHeaderInfo4(ChromGroupHeaderInfo header) : this() { Precursor = (float) header.Precursor; FileIndex = header.FileIndex; NumTransitions = header.NumTransitions; StartTransitionIndex = header.StartTransitionIndex; NumPeaks = header.NumPeaks; StartPeakIndex = header.StartPeakIndex; MaxPeakIndex = header.MaxPeakIndex; NumPoints = header.NumPoints; CompressedSize = header.CompressedSize; LocationPoints = header.LocationPoints; } public float Precursor { get; set; } public int FileIndex { get; private set; } public int NumTransitions { get; private set; } public int StartTransitionIndex { get; private set; } public int NumPeaks { get; private set; } public int StartPeakIndex { get; private set; } public int MaxPeakIndex { get; private set; } public int NumPoints { get; private set; } public int CompressedSize { get; private set; } public int Align { get; private set; } // Need even number of 4-byte values public long LocationPoints { get; private set; } public void Offset(int offsetFiles, int offsetTransitions, int offsetPeaks, long offsetPoints) { FileIndex += offsetFiles; StartTransitionIndex += offsetTransitions; StartPeakIndex += offsetPeaks; LocationPoints += offsetPoints; } public static StructSerializer<ChromGroupHeaderInfo4> StructSerializer() { return new StructSerializer<ChromGroupHeaderInfo4>() { DirectSerializer = DirectSerializer.Create(ReadArray, WriteArray) }; } #region Fast file I/O /// <summary> /// Direct read of an entire array using p-invoke of Win32 WriteFile. This seems /// to coexist with FileStream reading that the write version, but its use case /// is tightly limited. /// <para> /// Contributed by Randy Kern. See: /// http://randy.teamkern.net/2009/02/reading-arrays-from-files-in-c-without-extra-copy.html /// </para> /// </summary> /// <param name="file">File handler returned from <see cref="FileStream.SafeFileHandle"/></param> /// <param name="count">Number of elements to read</param> /// <returns>New array of elements</returns> private static unsafe ChromGroupHeaderInfo4[] ReadArray(SafeHandle file, int count) { ChromGroupHeaderInfo4[] results = new ChromGroupHeaderInfo4[count]; fixed (ChromGroupHeaderInfo4* p = results) { FastRead.ReadBytes(file, (byte*)p, sizeof(ChromGroupHeaderInfo4) * count); } return results; } /// <summary> /// Direct write of an entire array throw p-invoke of Win32 WriteFile. This cannot /// be mixed with standard writes to a FileStream, or .NET throws an exception /// about the file location not being what it expected. /// </summary> /// <param name="file">File handler returned from <see cref="FileStream.SafeFileHandle"/></param> /// <param name="groupHeaders">The array to write</param> public static unsafe void WriteArray(SafeHandle file, ChromGroupHeaderInfo4[] groupHeaders) { fixed (ChromGroupHeaderInfo4* p = groupHeaders) { FastWrite.WriteBytes(file, (byte*)p, sizeof(ChromGroupHeaderInfo4) * groupHeaders.Length); } } #endregion } [StructLayout(LayoutKind.Sequential, Pack = 1)] public struct ChromGroupHeaderInfo : IComparable<ChromGroupHeaderInfo> { ///////////////////////////////////////////////////////////////////// // CAREFUL: This ordering determines the layout of this struct on // disk from which it gets loaded directly into memory. // The order and size of each element has been very carefully // considered to avoid wasted space due to alignment. // ALSO: With any additions you need to tweak the writer code in // ChromatogramCache.WriteStructs since we write element by element. private int _textIdIndex; private int _startTransitionIndex; private int _startPeakIndex; private int _startScoreIndex; private int _numPoints; private int _compressedSize; private ushort _flagBits; private ushort _fileIndex; private ushort _textIdLen; private ushort _numTransitions; private byte _numPeaks; private byte _maxPeakIndex; private byte _isProcessedScans; private byte _align1; private ushort _statusId; private ushort _statusRank; private double _precursor; private long _locationPoints; // V11 fields private int _uncompressedSize; private float _startTime; private float _endTime; private float _collisionalCrossSection; ///////////////////////////////////////////////////////////////////// [Flags] public enum FlagValues { has_mass_errors = 0x01, has_calculated_mzs = 0x02, extracted_base_peak = 0x04, has_ms1_scan_ids = 0x08, has_sim_scan_ids = 0x10, has_frag_scan_ids = 0x20, polarity_negative = 0x40, // When set, only use negative scans. raw_chromatograms = 0x80, ion_mobility_type_bitmask = 0x700, // 3 bits for ion mobility type none, drift, inverse_mobility, spares dda_acquisition_method = 0x800, extracted_qc_trace = 0x1000 } /// <summary> /// Allow a little fewer points than the data structure can actually hold. /// </summary> public const int MAX_POINTS = ushort.MaxValue - 1000; private const byte NO_MAX_PEAK = 0xFF; /// <summary> /// Constructs header struct with TextIdIndex and TextIdCount left to be initialized /// in a subsequent call to <see cref="CalcTextIdIndex"/>. /// </summary> public ChromGroupHeaderInfo(SignedMz precursor, int fileIndex, int numTransitions, int startTransitionIndex, int numPeaks, int startPeakIndex, int startScoreIndex, int maxPeakIndex, int numPoints, int compressedSize, int uncompressedSize, long location, FlagValues flags, int statusId, int statusRank, float? startTime, float? endTime, double? collisionalCrossSection, eIonMobilityUnits ionMobilityUnits) : this(precursor, -1, 0, fileIndex, numTransitions, startTransitionIndex, numPeaks, startPeakIndex, startScoreIndex, maxPeakIndex, numPoints, compressedSize, uncompressedSize, location, flags, statusId, statusRank, startTime, endTime, collisionalCrossSection, ionMobilityUnits) { } /// <summary> /// Cunstructs header struct with all values populated. /// </summary> public ChromGroupHeaderInfo(SignedMz precursor, int textIdIndex, int textIdLen, int fileIndex, int numTransitions, int startTransitionIndex, int numPeaks, int startPeakIndex, int startScoreIndex, int maxPeakIndex, int numPoints, int compressedSize, int uncompressedSize, long location, FlagValues flags, int statusId, int statusRank, float? startTime, float? endTime, double? collisionalCrossSection, eIonMobilityUnits ionMobilityUnits) : this() { _precursor = precursor.Value; if (precursor.IsNegative) { flags |= FlagValues.polarity_negative; } else { flags &= ~FlagValues.polarity_negative; } flags = (flags & ~FlagValues.ion_mobility_type_bitmask) | (FlagValues) ((int) ionMobilityUnits << 8); _textIdIndex = textIdIndex; _textIdLen = CheckUShort(textIdLen); _fileIndex = CheckUShort(fileIndex); _numTransitions = CheckUShort(numTransitions); _startTransitionIndex = startTransitionIndex; _numPeaks = CheckByte(numPeaks); _startPeakIndex = startPeakIndex; _startScoreIndex = startScoreIndex; _maxPeakIndex = maxPeakIndex != -1 ? CheckByte(maxPeakIndex, byte.MaxValue - 1) : NO_MAX_PEAK; _numPoints = precursor == SignedMz.ZERO ? 0 : CheckUShort(numPoints); _compressedSize = compressedSize; _uncompressedSize = uncompressedSize; _locationPoints = location; _flagBits = (ushort)flags; _statusId = CheckUShort(statusId, true); _statusRank = CheckUShort(statusRank, true); _startTime = startTime ?? -1; _endTime = endTime ?? -1; _collisionalCrossSection = (float)(collisionalCrossSection ?? 0); if (_startTime < 0) { _startTime = -1; // Unknown } if (_endTime < 0) { _endTime = -1; // Unknown } if (_startTime >= _endTime) { _startTime = _endTime = -1; // Unknown } } public ChromGroupHeaderInfo(ChromGroupHeaderInfo4 headerInfo) : this(new SignedMz(headerInfo.Precursor), headerInfo.FileIndex, headerInfo.NumTransitions, headerInfo.StartTransitionIndex, headerInfo.NumPeaks, headerInfo.StartPeakIndex, -1, headerInfo.MaxPeakIndex, headerInfo.NumPoints, headerInfo.CompressedSize, -1, headerInfo.LocationPoints, 0, -1, -1, null, null, null, eIonMobilityUnits.none) { } public ChromGroupHeaderInfo ChangeChargeToNegative() { // For dealing with pre-V11 caches where we didn't record chromatogram polarity var chromGroupHeaderInfo = this; chromGroupHeaderInfo._flagBits |= (ushort)FlagValues.polarity_negative; return chromGroupHeaderInfo; } private static ushort CheckUShort(int value, bool allowNegativeOne = false) { return (ushort)CheckValue(value, ushort.MinValue, ushort.MaxValue, allowNegativeOne); } private static byte CheckByte(int value, int maxValue = byte.MaxValue) { return (byte)CheckValue(value, byte.MinValue, maxValue); } private static int CheckValue(int value, int min, int max, bool allowNegativeOne = false) { if (min > value || value > max) { if (!allowNegativeOne || value != -1) throw new ArgumentOutOfRangeException(string.Format(@"The value {0} must be between {1} and {2}.", value, min, max)); // CONSIDER: localize? Does user see this? } return value; } public int TextIdIndex { get { return _textIdIndex; } } public int StartTransitionIndex { get { return _startTransitionIndex; } } public int StartPeakIndex { get { return _startPeakIndex; } } public int StartScoreIndex { get { return _startScoreIndex; } } public int NumPoints { get { return _numPoints; } } public int CompressedSize { get { return _compressedSize; } } public ushort FlagBits { get { return _flagBits; } } public ushort FileIndex { get {return _fileIndex;} } public ushort TextIdLen { get { return _textIdLen; } } public ushort NumTransitions { get { return _numTransitions; } } public byte NumPeaks { get { return _numPeaks; } } // The number of peaks stored per chrom should be well under 128 public ushort StatusId { get { return _statusId; } } public ushort StatusRank { get { return _statusRank; } } public long LocationPoints { get{return _locationPoints;} } public int UncompressedSize { get{return _uncompressedSize;} } public bool IsProcessedScans { get { return _isProcessedScans != 0; } } public override string ToString() { return string.Format(@"{0:F04}, {1}, {2}", Precursor, NumTransitions, FileIndex); } public short MaxPeakIndex { get { if (_maxPeakIndex == NO_MAX_PEAK) return -1; return _maxPeakIndex; } } public FlagValues Flags { get { return (FlagValues)FlagBits; } } public bool HasCalculatedMzs { get { return (Flags & FlagValues.has_calculated_mzs) != 0; } } public bool HasMassErrors { get { return (Flags & FlagValues.has_mass_errors) != 0; } } public bool HasMs1ScanIds { get { return (Flags & FlagValues.has_ms1_scan_ids) != 0; } } public bool HasFragmentScanIds { get { return (Flags & FlagValues.has_frag_scan_ids) != 0; } } public bool HasSimScanIds { get { return (Flags & FlagValues.has_sim_scan_ids) != 0; } } public bool HasRawChromatograms { get { return (Flags & FlagValues.raw_chromatograms) != 0; } } public bool IsDda { get { return (Flags & FlagValues.dda_acquisition_method) != 0; } } public float? StartTime { get { return _startTime >= 0 ? _startTime : (float?) null; } } // For SRM data with same precursor but different RT interval public float? EndTime { get { return _endTime >= 0 ? _endTime : (float?)null; } } // For SRM data with same precursor but different RT interval public bool HasRawTimes() { return 0 != (Flags & FlagValues.raw_chromatograms); } public bool IsNotIncludedTime(double retentionTime) { return StartTime.HasValue && EndTime.HasValue && (retentionTime < StartTime.Value || EndTime.Value < retentionTime); } public bool NegativeCharge { get { return (Flags & FlagValues.polarity_negative) != 0; } } public SignedMz Precursor { get { return new SignedMz(_precursor, NegativeCharge); } } public float? CollisionalCrossSection { get { if (_collisionalCrossSection <= 0) return null; return _collisionalCrossSection; } } public eIonMobilityUnits IonMobilityUnits { get { return (eIonMobilityUnits)((int)(Flags & FlagValues.ion_mobility_type_bitmask) >> 8); } } public bool HasStatusId { get { return ((short)StatusId) != -1; } } public bool HasStatusRank { get { return ((short)StatusRank) != -1; } } public ChromExtractor Extractor { get { if (Flags.HasFlag(FlagValues.extracted_base_peak)) return ChromExtractor.base_peak; if (Flags.HasFlag(FlagValues.extracted_qc_trace)) return ChromExtractor.qc; return ChromExtractor.summed; } } public void Offset(int offsetFiles, int offsetTransitions, int offsetPeaks, int offsetScores, long offsetPoints) { _fileIndex += (ushort)offsetFiles; _startTransitionIndex += offsetTransitions; _startPeakIndex += offsetPeaks; if (_startScoreIndex != -1) _startScoreIndex += offsetScores; _locationPoints += offsetPoints; } public void ClearScores() { _startScoreIndex = -1; } public void CalcTextIdIndex(Target target, Dictionary<Target, int> dictTextIdToByteIndex, List<byte> listTextIdBytes) { if (target == null) { _textIdIndex = -1; _textIdLen = 0; } else { int textIdIndex; var textIdBytes = Encoding.UTF8.GetBytes(target.ToSerializableString()); if (!dictTextIdToByteIndex.TryGetValue(target, out textIdIndex)) { textIdIndex = listTextIdBytes.Count; listTextIdBytes.AddRange(textIdBytes); dictTextIdToByteIndex.Add(target, textIdIndex); } _textIdIndex = textIdIndex; _textIdLen = (ushort)textIdBytes.Length; } } public int CompareTo(ChromGroupHeaderInfo info) { // Sort by key, and then file index. int keyCompare = Precursor.CompareTo(info.Precursor); if (keyCompare != 0) return keyCompare; return FileIndex - info.FileIndex; } #region Fast file I/O public static IItemSerializer<ChromGroupHeaderInfo> ItemSerializer(int itemSizeOnDisk) { StructSerializer<ChromGroupHeaderInfo> structSerializer = new StructSerializer<ChromGroupHeaderInfo> { ItemSizeOnDisk = itemSizeOnDisk, DirectSerializer = DirectSerializer.Create(ReadArray, WriteArray) }; if (itemSizeOnDisk < GetStructSize(CacheFormatVersion.Eleven)) { return ConvertedItemSerializer.Create(structSerializer, chromGroupHeaderInfo => { chromGroupHeaderInfo._uncompressedSize = -1; return chromGroupHeaderInfo; }, chromGroupHeaderInfo=>chromGroupHeaderInfo); } return structSerializer; } /// <summary> /// Direct read of an entire array throw p-invoke of Win32 WriteFile. This seems /// to coexist with FileStream reading that the write version, but its use case /// is tightly limited. /// <para> /// Contributed by Randy Kern. See: /// http://randy.teamkern.net/2009/02/reading-arrays-from-files-in-c-without-extra-copy.html /// </para> /// </summary> /// <param name="file">File handler returned from <see cref="FileStream.SafeFileHandle"/></param> /// <param name="count">Number of elements to read</param> /// <returns>New array of elements</returns> private static unsafe ChromGroupHeaderInfo[] ReadArray(SafeHandle file, int count) { ChromGroupHeaderInfo[] results = new ChromGroupHeaderInfo[count]; fixed (ChromGroupHeaderInfo* p = results) { FastRead.ReadBytes(file, (byte*)p, sizeof(ChromGroupHeaderInfo) * count); } return results; } /// <summary> /// Direct write of an entire array throw p-invoke of Win32 WriteFile. This cannot /// be mixed with standard writes to a FileStream, or .NET throws an exception /// about the file location not being what it expected. /// </summary> /// <param name="file">File handler returned from <see cref="FileStream.SafeFileHandle"/></param> /// <param name="groupHeaders">The array to write</param> public static unsafe void WriteArray(SafeHandle file, ChromGroupHeaderInfo[] groupHeaders) { fixed (ChromGroupHeaderInfo* p = groupHeaders) { FastWrite.WriteBytes(file, (byte*)p, sizeof(ChromGroupHeaderInfo) * groupHeaders.Length); } } #endregion // Set default block size for BlockedArray<ChromTransition> public const int DEFAULT_BLOCK_SIZE = 100 * 1024 * 1024; // 100 megabytes public static unsafe int SizeOf { get { return sizeof(ChromGroupHeaderInfo); } } // For test purposes public static int DeltaSize11 { get { return Marshal.SizeOf<ChromGroupHeaderInfo>() - (int) Marshal.OffsetOf<ChromGroupHeaderInfo>(@"_uncompressedSize"); } } public static int GetStructSize(CacheFormatVersion cacheFormatVersion) { if (cacheFormatVersion <= CacheFormatVersion.Four) { return 48; } if (cacheFormatVersion < CacheFormatVersion.Eleven) { return 56; } return 72; } } /// <summary> /// Holds a ChromGroupHeaderInfo, and also remembers an index to disambiguate /// when two ChromGroupHeaderInfo's compare the same. /// </summary> public struct ChromGroupHeaderEntry : IComparable<ChromGroupHeaderEntry> { public ChromGroupHeaderEntry(int index, ChromGroupHeaderInfo chromGroupHeaderInfo) : this() { Index = index; ChromGroupHeaderInfo = chromGroupHeaderInfo; } public int Index { get; private set; } public ChromGroupHeaderInfo ChromGroupHeaderInfo { get; private set; } public int CompareTo(ChromGroupHeaderEntry other) { int result = ChromGroupHeaderInfo.CompareTo(other.ChromGroupHeaderInfo); if (result == 0) { result = Index.CompareTo(other.Index); } return result; } } public struct ChromTransition4 { public ChromTransition4(float product) : this() { Product = product; } public ChromTransition4(ChromTransition chromTransition) : this((float) chromTransition.Product) { } public float Product { get; private set; } #region Fast file I/O public static IItemSerializer<ChromTransition4> StructSerializer() { return new StructSerializer<ChromTransition4> { DirectSerializer = DirectSerializer.Create(ReadArray, WriteArray), }; } /// <summary> /// Direct read of an entire array throw p-invoke of Win32 WriteFile. This seems /// to coexist with FileStream reading that the write version, but its use case /// is tightly limited. /// <para> /// Contributed by Randy Kern. See: /// http://randy.teamkern.net/2009/02/reading-arrays-from-files-in-c-without-extra-copy.html /// </para> /// </summary> /// <param name="file">File handler returned from <see cref="FileStream.SafeFileHandle"/></param> /// <param name="count">Number of elements to read</param> /// <returns>New array of elements</returns> private static unsafe ChromTransition4[] ReadArray(SafeHandle file, int count) { ChromTransition4[] results = new ChromTransition4[count]; fixed (ChromTransition4* p = results) { FastRead.ReadBytes(file, (byte*)p, sizeof(ChromTransition4) * count); } return results; } /// <summary> /// Direct write of an entire array throw p-invoke of Win32 WriteFile. This cannot /// be mixed with standard writes to a FileStream, or .NET throws an exception /// about the file location not being what it expected. /// </summary> /// <param name="file">File handler returned from <see cref="FileStream.SafeFileHandle"/></param> /// <param name="setHeaders">The array to write</param> public static unsafe void WriteArray(SafeHandle file, ChromTransition4[] setHeaders) { fixed (ChromTransition4* p = &setHeaders[0]) { FastWrite.WriteBytes(file, (byte*)p, sizeof(ChromTransition4) * setHeaders.Length); } } #endregion #region object overrides public override string ToString() { return Product.ToString(LocalizationHelper.CurrentCulture); } #endregion } public struct ChromTransition5 { [Flags] public enum FlagValues { source1 = 0x01, // unknown = 00, fragment = 01 source2 = 0x02, // ms1 = 10, sim = 11 } public ChromTransition5(double product, float extractionWidth, ChromSource source) : this() { Product = product; ExtractionWidth = extractionWidth; Source = source; Align1 = 0; } public ChromTransition5(ChromTransition4 chromTransition4) : this(chromTransition4.Product, 0, ChromSource.unknown) { } public ChromTransition5(ChromTransition chromTransition) : this() { Product = chromTransition.Product; ExtractionWidth = chromTransition.ExtractionWidth; Source = chromTransition.Source; } public double Product { get; private set; } public float ExtractionWidth { get; private set; } // In m/z public ushort FlagBits { get; private set; } public ushort Align1 { get; private set; } // Explicitly declaring alignment padding the compiler will add anyway public FlagValues Flags { get { return (FlagValues) FlagBits; } } public ChromSource Source { get { // CONSIDER: Could just mask and cast switch (Flags & (FlagValues.source1 | FlagValues.source2)) { case 0: return ChromSource.unknown; case FlagValues.source2: return ChromSource.fragment; case FlagValues.source1: return ChromSource.ms1; default: return ChromSource.sim; } } set { FlagBits = (ushort) GetFlags(value); } } public FlagValues GetFlags(ChromSource source) { // CONSIDER: Could just cast switch (source) { case ChromSource.unknown: return 0; case ChromSource.fragment: return FlagValues.source2; case ChromSource.ms1: return FlagValues.source1; default: return FlagValues.source1 | FlagValues.source2; } } #region Fast file I/O public static IItemSerializer<ChromTransition5> StructSerializer() { return new StructSerializer<ChromTransition5>() { DirectSerializer = DirectSerializer.Create(ReadArray, WriteArray), }; } /// <summary> /// A 2x slower version of ReadArray than <see cref="ReadArray(SafeHandle,int)"/> /// that does not require a file handle. This one is covered in Randy Kern's blog, /// but is originally from Eric Gunnerson: /// <para> /// http://blogs.msdn.com/ericgu/archive/2004/04/13/112297.aspx /// </para> /// </summary> /// <param name="stream">Stream to from which to read the elements</param> /// <param name="count">Number of elements to read</param> /// <returns>New array of elements</returns> public static unsafe ChromTransition5[] ReadArray(Stream stream, int count) { // Use fast version, if this is a file var fileStream = stream as FileStream; if (fileStream != null) { try { return ReadArray(fileStream.SafeFileHandle, count); } catch (BulkReadException) { // Fall through and attempt to read the slow way } } // CONSIDER: Probably faster in this case to read the entire block, // and convert from bytes to single float values. ChromTransition5[] results = new ChromTransition5[count]; int size = sizeof (ChromTransition5); byte[] buffer = new byte[size]; for (int i = 0; i < count; ++i) { if (stream.Read(buffer, 0, size) != size) throw new InvalidDataException(); fixed (byte* pBuffer = buffer) { results[i] = *(ChromTransition5*) pBuffer; } } return results; } /// <summary> /// Direct read of an entire array throw p-invoke of Win32 WriteFile. This seems /// to coexist with FileStream reading that the write version, but its use case /// is tightly limited. /// <para> /// Contributed by Randy Kern. See: /// http://randy.teamkern.net/2009/02/reading-arrays-from-files-in-c-without-extra-copy.html /// </para> /// </summary> /// <param name="file">File handler returned from <see cref="FileStream.SafeFileHandle"/></param> /// <param name="count">Number of elements to read</param> /// <returns>New array of elements</returns> private static unsafe ChromTransition5[] ReadArray(SafeHandle file, int count) { ChromTransition5[] results = new ChromTransition5[count]; fixed (ChromTransition5* p = results) { FastRead.ReadBytes(file, (byte*)p, sizeof(ChromTransition5) * count); } return results; } /// <summary> /// Direct write of an entire array throw p-invoke of Win32 WriteFile. This cannot /// be mixed with standard writes to a FileStream, or .NET throws an exception /// about the file location not being what it expected. /// </summary> /// <param name="file">File handler returned from <see cref="FileStream.SafeFileHandle"/></param> /// <param name="setHeaders">The array to write</param> public static unsafe void WriteArray(SafeHandle file, ChromTransition5[] setHeaders) { fixed (ChromTransition5* p = &setHeaders[0]) { FastWrite.WriteBytes(file, (byte*)p, sizeof(ChromTransition5) * setHeaders.Length); } } #endregion #region object overrides /// <summary> /// For debugging only /// </summary> public override string ToString() { return string.Format(@"{0:F04} - {1}", Product, Source); } #endregion public static unsafe int DeltaSize5 { get { return sizeof(ChromTransition5) - sizeof(ChromTransition4); } } } /// <summary> /// Version 8 of ChromTransition adds ion mobility information /// </summary> [StructLayout(LayoutKind.Sequential, Pack=4)] public struct ChromTransition { private double _product; private float _extractionWidth; private float _ionMobilityValue; private float _ionMobilityExtractionWidth; private ushort _flagBits; private ushort _align1; [Flags] public enum FlagValues { unknown = 0x00, ms1 = 0x01, fragment = 0x02, sim = 0x03, missing_mass_errors = 0x04, } const FlagValues MASK_SOURCE = (FlagValues) 0x03; public ChromTransition(double product, float extractionWidth, float ionMobilityValue, float ionMobilityExtractionWidth, ChromSource source) : this() { _product = product; _extractionWidth = extractionWidth; _ionMobilityValue = ionMobilityValue; _ionMobilityExtractionWidth = ionMobilityExtractionWidth; Source = source; } public ChromTransition(ChromTransition5 chromTransition5) : this(chromTransition5.Product, // There was an issue with Manage Results > Rescore, which made it possible to corrupt // the chromatogram source until a commit by Nick in March 2014, and Brian introduced // the next version of this struct in the May, 2014. So considering the source unknown // for these older files seems safest, since we are moving to paying attention to the // source for chromatogram to transition matching. chromTransition5.ExtractionWidth, 0, 0, ChromSource.unknown) { } public ChromTransition(ChromTransition4 chromTransition4) : this(chromTransition4.Product, 0, 0, 0, ChromSource.unknown) { } public double Product { get { return _product; } } public float ExtractionWidth { get { return _extractionWidth; }} // In m/z public float IonMobilityValue { get { return _ionMobilityValue; } } // Units depend on ion mobility type public float IonMobilityExtractionWidth { get { return _ionMobilityExtractionWidth; } } // Units depend on ion mobility type public FlagValues Flags { get { return (FlagValues) _flagBits; } set { _flagBits = (ushort) value; } } public ChromSource Source { get { switch (Flags & MASK_SOURCE) { case FlagValues.unknown: return ChromSource.unknown; case FlagValues.fragment: return ChromSource.fragment; case FlagValues.ms1: return ChromSource.ms1; default: return ChromSource.sim; } } set { Flags = GetSourceFlags(value) | (Flags & ~MASK_SOURCE); } } public bool MissingMassErrors { get { return (Flags & FlagValues.missing_mass_errors) != 0; } set { Flags = (Flags & ~FlagValues.missing_mass_errors) | (value ? FlagValues.missing_mass_errors : 0); } } public static FlagValues GetSourceFlags(ChromSource source) { switch (source) { case ChromSource.unknown: return FlagValues.unknown; case ChromSource.fragment: return FlagValues.fragment; case ChromSource.ms1: return FlagValues.ms1; default: return FlagValues.sim; } } // Set default block size for BlockedArray<ChromTransition> public const int DEFAULT_BLOCK_SIZE = 100 * 1024 * 1024; // 100 megabytes // sizeof(ChromPeak) public static int SizeOf { get { unsafe { return sizeof(ChromTransition); } } } #region Fast file I/O public static StructSerializer<ChromTransition> StructSerializer(int structSizeOnDisk) { return new StructSerializer<ChromTransition>() { DirectSerializer = DirectSerializer.Create(ReadArray, null), ItemSizeOnDisk = structSizeOnDisk, }; } /// <summary> /// Direct read of an entire array throw p-invoke of Win32 WriteFile. This seems /// to coexist with FileStream reading that the write version, but its use case /// is tightly limited. /// <para> /// Contributed by Randy Kern. See: /// http://randy.teamkern.net/2009/02/reading-arrays-from-files-in-c-without-extra-copy.html /// </para> /// </summary> /// <param name="file">File handler returned from <see cref="FileStream.SafeFileHandle"/></param> /// <param name="count">Number of elements to read</param> /// <returns>New array of elements</returns> private static unsafe ChromTransition[] ReadArray(SafeHandle file, int count) { ChromTransition[] results = new ChromTransition[count]; fixed (ChromTransition* p = results) { FastRead.ReadBytes(file, (byte*)p, sizeof(ChromTransition) * count); } return results; } public static ChromTransition[] ReadArray(Stream stream, int count) { return new StructSerializer<ChromTransition>().ReadArray(stream, count); } public static int GetStructSize(CacheFormatVersion cacheFormatVersion) { if (cacheFormatVersion < CacheFormatVersion.Five) { return 4; } if (cacheFormatVersion <= CacheFormatVersion.Six) { return 16; } return 24; } // // NOTE: writing is handled by ChromatogramCache::WriteStructs, so any members // added here need to be added there - and in the proper order! // #endregion #region object overrides /// <summary> /// For debugging only /// </summary> public override string ToString() { return string.Format(@"{0:F04} {1:F04} - {2}", Product, IonMobilityValue, Source); } #endregion } [StructLayout(LayoutKind.Sequential, Pack = 4)] public struct ChromPeak : ISummaryPeakData { private readonly float _retentionTime; private readonly float _startTime; private readonly float _endTime; private readonly float _area; private readonly float _backgroundArea; private readonly float _height; private readonly float _fwhm; private uint _flagBits; private readonly short _pointsAcross; [Flags] public enum FlagValues { degenerate_fwhm = 0x0001, forced_integration = 0x0002, time_normalized = 0x0004, peak_truncation_known = 0x0008, peak_truncated = 0x0010, contains_id = 0x0020, used_id_alignment = 0x0040, // This is the last available flag // The high word of the flags is reserved for delta-mass-error mass_error_known = 0x8000, } // ReSharper disable InconsistentNaming // ReSharper disable UnassignedField.Global public static ChromPeak EMPTY; // Zero filled struct // ReSharper restore UnassignedField.Global // ReSharper restore InconsistentNaming // Set default block size for BlockedArray<ChromPeak> public const int DEFAULT_BLOCK_SIZE = 100*1024*1024; // 100 megabytes // sizeof(ChromPeak) public static int SizeOf { get { unsafe { return sizeof (ChromPeak); } } } public static short To10x(double f) { return (short) Math.Round(f*10); } public ChromPeak(IPeakFinder finder, IFoundPeak peak, FlagValues flags, TimeIntensities timeIntensities, IList<float> rawTimes) : this() { var times = timeIntensities.Times; var intensities = timeIntensities.Intensities; var massErrors = timeIntensities.MassErrors; // Get the interval being used to convert from Crawdad index based numbers // to numbers that are normalized with respect to time. double interval; if (peak.StartIndex + 1 < timeIntensities.NumPoints) { interval = times[peak.StartIndex + 1] - times[peak.StartIndex]; } else { interval = 0; } _retentionTime = times[peak.TimeIndex]; _startTime = times[peak.StartIndex]; _endTime = times[peak.EndIndex]; if ((flags & FlagValues.time_normalized) == 0 || finder.IsHeightAsArea) { _area = peak.Area; _backgroundArea = peak.BackgroundArea; } else { // Normalize area numbers by time in seconds, since this will be the least // dramatic change from Skyline v0.5, when the Crawdad index based areas // were used directly. double intervalSeconds = interval * 60; _area = (float)(peak.Area * intervalSeconds); _backgroundArea = (float) (peak.BackgroundArea * intervalSeconds); } _height = peak.Height; _fwhm = (float) (peak.Fwhm * interval); if (float.IsNaN(Fwhm)) _fwhm = 0; if (peak.FwhmDegenerate) flags |= FlagValues.degenerate_fwhm; // Calculate peak truncation as a peak extent at either end of the // recorded values, where the intensity is higher than the other extent // by more than 1% of the peak height. flags |= FlagValues.peak_truncation_known; const double truncationTolerance = 0.01; double deltaIntensityExtents = (intensities[peak.EndIndex] - intensities[peak.StartIndex]) / Height; if ((peak.StartIndex == 0 && deltaIntensityExtents < -truncationTolerance) || (peak.EndIndex == times.Count - 1 && deltaIntensityExtents > truncationTolerance)) { flags |= FlagValues.peak_truncated; } if (massErrors != null) { // Mass error is mean of mass errors in the peak, weighted by intensity double massError = 0; double totalIntensity = 0; // Subtract background intensity to reduce noise contribution to this mean value double backgroundIntensity = Math.Min(intensities[peak.StartIndex], intensities[peak.EndIndex]); for (int i = peak.StartIndex; i <= peak.EndIndex; i++) { double intensity = intensities[i] - backgroundIntensity; if (intensity <= 0) continue; double massErrorLocal = massErrors[i]; totalIntensity += intensity; massError += (massErrorLocal - massError)*intensity/totalIntensity; } // Only if intensity exceded the background at least once if (totalIntensity > 0) { flags |= FlagValues.mass_error_known; FlagBits = ((uint)To10x(massError)) << 16; } } FlagBits |= (uint) flags; if (rawTimes != null) { int startIndex = CollectionUtil.BinarySearch(rawTimes, StartTime); if (startIndex < 0) { startIndex = ~startIndex; } int endIndex = CollectionUtil.BinarySearch(rawTimes, EndTime); if (endIndex < 0) { endIndex = ~endIndex - 1; } int pointsAcross = endIndex - startIndex + 1; if (pointsAcross >= 0) { _pointsAcross = (short) Math.Min(pointsAcross, ushort.MaxValue); } } } public float RetentionTime { get { return _retentionTime; } } public float StartTime { get { return _startTime; } } public float EndTime { get { return _endTime; } } public float Area { get { return _area; } } public float BackgroundArea { get { return _backgroundArea; } } public float Height { get { return _height; } } public float Fwhm { get { return _fwhm; } } public uint FlagBits { get { return _flagBits; } private set { _flagBits = value; } } public short? PointsAcross { get { return _pointsAcross == 0 ? (short?)null : _pointsAcross; } } public override string ToString() { return string.Format(@"rt={0:F02}, area={1}", RetentionTime, Area); } public FlagValues Flags { get { // Mask off mass error bits return (FlagValues) (FlagBits & 0xFFFF); } } public bool IsEmpty { get { return EndTime == 0; } } public bool ContainsTime(float retentionTime) { return StartTime <= retentionTime && retentionTime <= EndTime; } public bool IsFwhmDegenerate { get { return (Flags & FlagValues.degenerate_fwhm) != 0; } } public bool IsForcedIntegration { get { return (Flags & FlagValues.forced_integration) != 0; } } public PeakIdentification Identified { get { if ((Flags & FlagValues.contains_id) == 0) return PeakIdentification.FALSE; else if ((Flags & FlagValues.used_id_alignment) == 0) return PeakIdentification.TRUE; return PeakIdentification.ALIGNED; } } public bool? IsTruncated { get { if ((Flags & FlagValues.peak_truncation_known) == 0) return null; return (Flags & FlagValues.peak_truncated) != 0; } } public float? MassError { get { if ((FlagBits & (uint) FlagValues.mass_error_known) == 0) return null; // Mass error is stored in the high 16 bits of the Flags // as 10x the calculated mass error in PPM. return ((short)(FlagBits >> 16))/10f; } } /// <summary> /// Removes the mass error bits from the upper 16 in order to keep /// from writing mass errors into older cache file formats until /// the v5 format version is ready. /// </summary> public ChromPeak RemoveMassError() { var copy = this; copy.FlagBits = (uint) (Flags & ~FlagValues.mass_error_known); return copy; } public static float Intersect(ChromPeak peak1, ChromPeak peak2) { return Intersect(peak1.StartTime, peak1.EndTime, peak2.StartTime, peak2.EndTime); } public static float Intersect(float startTime1, float endTime1, float startTime2, float endTime2) { return Math.Min(endTime1, endTime2) - Math.Max(startTime1, startTime2); } public static int GetStructSize(CacheFormatVersion formatVersion) { if (formatVersion < CacheFormatVersion.Twelve) { return 32; } return 36; } public static StructSerializer<ChromPeak> StructSerializer(int chromPeakSize) { return new StructSerializer<ChromPeak> { ItemSizeOnDisk = chromPeakSize, DirectSerializer = DirectSerializer.Create(ReadArray, WriteArray) }; } #region Fast file I/O /// <summary> /// Direct read of an entire array throw p-invoke of Win32 WriteFile. This seems /// to coexist with FileStream reading that the write version, but its use case /// is tightly limited. /// <para> /// Contributed by Randy Kern. See: /// http://randy.teamkern.net/2009/02/reading-arrays-from-files-in-c-without-extra-copy.html /// </para> /// </summary> /// <param name="file">File handler returned from <see cref="FileStream.SafeFileHandle"/></param> /// <param name="count">Number of elements to read</param> /// <returns>New array of elements</returns> private static unsafe ChromPeak[] ReadArray(SafeHandle file, int count) { ChromPeak[] results = new ChromPeak[count]; if (count > 0) { fixed (ChromPeak* p = results) { FastRead.ReadBytes(file, (byte*)p, sizeof(ChromPeak) * count); } } return results; } /// <summary> /// Direct write of an entire array throw p-invoke of Win32 WriteFile. This cannot /// be mixed with standard writes to a FileStream, or .NET throws an exception /// about the file location not being what it expected. /// </summary> /// <param name="file">File handler returned from <see cref="FileStream.SafeFileHandle"/></param> /// <param name="headers">The array to write</param> public static unsafe void WriteArray(SafeHandle file, ChromPeak[] headers) { fixed (ChromPeak* p = headers) { FastWrite.WriteBytes(file, (byte*)p, sizeof(ChromPeak) * headers.Length); } } public static byte[] GetBytes(ChromPeak p) { int size = Marshal.SizeOf(p); byte[] arr = new byte[size]; IntPtr ptr = Marshal.AllocHGlobal(size); Marshal.StructureToPtr(p, ptr, true); Marshal.Copy(ptr, arr, 0, size); Marshal.FreeHGlobal(ptr); return arr; } #endregion } public class ChromCachedFile : Immutable, IPathContainer { [Flags] public enum FlagValues { single_match_mz_known = 0x01, single_match_mz = 0x02, has_midas_spectra = 0x04, has_combined_ion_mobility = 0x08, ion_mobility_type_bitmask = 0x70, // 3 bits for ion mobility type drift, inverse_mobility, spares // 0x80 available used_ms1_centroids = 0x100, used_ms2_centroids = 0x200, } public static DateTime GetLastWriteTime(MsDataFileUri filePath) { return filePath.GetFileLastWriteTime(); } public static bool? IsSingleMatchMzFlags(FlagValues flags) { if ((flags & FlagValues.single_match_mz_known) == 0) return null; return (flags & FlagValues.single_match_mz) != 0; } private static bool HasMidasSpectraFlags(FlagValues flags) { return (flags & FlagValues.has_midas_spectra) != 0; } private static bool HasCombinedIonMobilityFlags(FlagValues flags) { return (flags & FlagValues.has_combined_ion_mobility) != 0; } public static eIonMobilityUnits IonMobilityUnitsFromFlags(FlagValues flags) { return (eIonMobilityUnits)((int)(flags & FlagValues.ion_mobility_type_bitmask) >> 4); } private static bool UsedMs1CentroidsFlags(FlagValues flags) { return (flags & FlagValues.used_ms1_centroids) != 0; } private static bool UsedMs2CentroidsFlags(FlagValues flags) { return (flags & FlagValues.used_ms2_centroids) != 0; } public ChromCachedFile(MsDataFileUri filePath, FlagValues flags, DateTime fileWriteTime, DateTime? runStartTime, float maxRT, float maxIntensity, eIonMobilityUnits ionMobilityUnits, string sampleId, string serialNumber, IEnumerable<MsInstrumentConfigInfo> instrumentInfoList) : this(filePath, flags, fileWriteTime, runStartTime, maxRT, maxIntensity, 0, 0, default(float?), ionMobilityUnits, sampleId, serialNumber, instrumentInfoList) { } public ChromCachedFile(MsDataFileUri fileUri, FlagValues flags, DateTime fileWriteTime, DateTime? runStartTime, float maxRT, float maxIntensity, int sizeScanIds, long locationScanIds, float? ticArea, eIonMobilityUnits ionMobilityUnits, string sampleId, string instrumentSerialNumber, IEnumerable<MsInstrumentConfigInfo> instrumentInfoList) { // BACKWARD COMPATIBILITY: Deal with legacy parameters which got stored on the file_path URI var filePath = fileUri as MsDataFilePath; if (filePath != null && filePath.LegacyCombineIonMobilitySpectra) // Skyline-daily 19.1.9.338 or 350 flags |= FlagValues.has_combined_ion_mobility; // Centroiding for a much longer time if (fileUri.LegacyGetCentroidMs1()) flags |= FlagValues.used_ms1_centroids; if (fileUri.LegacyGetCentroidMs2()) flags |= FlagValues.used_ms2_centroids; FilePath = fileUri.RemoveLegacyParameters(); Flags = (flags & ~FlagValues.ion_mobility_type_bitmask) | (FlagValues)((int)ionMobilityUnits << 4); FileWriteTime = fileWriteTime; RunStartTime = runStartTime; MaxRetentionTime = maxRT; MaxIntensity = maxIntensity; SizeScanIds = sizeScanIds; LocationScanIds = locationScanIds; TicArea = ticArea; SampleId = sampleId; InstrumentSerialNumber = instrumentSerialNumber; InstrumentInfoList = ImmutableList.ValueOf(instrumentInfoList) ?? ImmutableList<MsInstrumentConfigInfo>.EMPTY; } public MsDataFileUri FilePath { get; private set; } public FlagValues Flags { get; private set; } public DateTime FileWriteTime { get; private set; } public DateTime? RunStartTime { get; private set; } public float MaxRetentionTime { get; private set; } public float MaxIntensity { get; private set; } public int SizeScanIds { get; private set; } public long LocationScanIds { get; private set; } public ImmutableList<MsInstrumentConfigInfo> InstrumentInfoList { get; private set; } public float? TicArea { get; private set; } public eIonMobilityUnits IonMobilityUnits { get { return IonMobilityUnitsFromFlags(Flags); } } public string SampleId { get; private set; } public string InstrumentSerialNumber { get; private set; } public bool IsCurrent { get { return Equals(FileWriteTime, GetLastWriteTime(FilePath)); } } public bool? IsSingleMatchMz { get { return IsSingleMatchMzFlags(Flags); } } public bool HasMidasSpectra { get { return HasMidasSpectraFlags(Flags); } } public bool HasCombinedIonMobility { get { return HasCombinedIonMobilityFlags(Flags); } } public bool UsedMs1Centroids { get { return UsedMs1CentroidsFlags(Flags); } } public bool UsedMs2Centroids { get { return UsedMs2CentroidsFlags(Flags); } } public ChromCachedFile RelocateScanIds(long locationScanIds) { return ChangeProp(ImClone(this), im => im.LocationScanIds = locationScanIds); } public ChromCachedFile ChangeTicArea(float? ticArea) { return ChangeProp(ImClone(this), im => im.TicArea = ticArea); } public ChromCachedFile ChangeFilePath(MsDataFileUri filePath) { return ChangeProp(ImClone(this), im => im.FilePath = filePath); } public ChromCachedFile ChangeSampleId(string sampleId) { return ChangeProp(ImClone(this), im => im.SampleId = sampleId); } public ChromCachedFile ChangeSerialNumber(string serialNumber) { return ChangeProp(ImClone(this), im => im.InstrumentSerialNumber = serialNumber); } } /// <summary> /// A utility class that provides two methods. One for converting a collection of /// MsInstrumentConfigInfo objects into a string representation that can be written /// to the chromatogram cache file. /// The second method takes the string representation and parses the instrument information. /// </summary> public static class InstrumentInfoUtil { // Used for cache and testing public const string MODEL = "MODEL:"; public const string ANALYZER = "ANALYZER:"; public const string DETECTOR = "DETECTOR:"; public const string IONIZATION = "IONIZATION:"; public static IEnumerable<MsInstrumentConfigInfo> GetInstrumentInfo(string infoString) { if (String.IsNullOrEmpty(infoString)) { return Enumerable.Empty<MsInstrumentConfigInfo>(); } IList<MsInstrumentConfigInfo> instrumentConfigList = new List<MsInstrumentConfigInfo>(); using (StringReader reader = new StringReader(infoString)) { MsInstrumentConfigInfo instrumentInfo; while (ReadInstrumentConfig(reader, out instrumentInfo)) { if(!instrumentInfo.IsEmpty) instrumentConfigList.Add(instrumentInfo); } } return instrumentConfigList; } private static bool ReadInstrumentConfig(TextReader reader, out MsInstrumentConfigInfo instrumentInfo) { string model = null; string ionization = null; string analyzer = null; string detector = null; string line; bool readLine = false; while((line = reader.ReadLine()) != null) { readLine = true; if (Equals(string.Empty, line.Trim())) // We have come too far break; if (line.StartsWith(MODEL)) { model = line.Substring(MODEL.Length); } else if (line.StartsWith(IONIZATION)) { ionization = line.Substring(IONIZATION.Length); } else if (line.StartsWith(ANALYZER)) { analyzer = line.Substring(ANALYZER.Length); } else if (line.StartsWith(DETECTOR)) { detector = line.Substring(DETECTOR.Length); } else { throw new IOException(string.Format(Resources.InstrumentInfoUtil_ReadInstrumentConfig_Unexpected_line_in_instrument_config__0__, line)); } } if(readLine) { instrumentInfo = new MsInstrumentConfigInfo(model, ionization, analyzer, detector); return true; } instrumentInfo = null; return false; } public static string GetInstrumentInfoString(IEnumerable<MsInstrumentConfigInfo> instrumentConfigList) { if (instrumentConfigList == null) return string.Empty; StringBuilder infoString = new StringBuilder(); foreach (var configInfo in instrumentConfigList) { if (configInfo == null || configInfo.IsEmpty) continue; if (infoString.Length > 0) infoString.Append('\n'); // instrument model if(!string.IsNullOrWhiteSpace(configInfo.Model)) { infoString.Append(MODEL).Append(configInfo.Model).Append('\n'); } // ionization type if(!string.IsNullOrWhiteSpace(configInfo.Ionization)) { infoString.Append(IONIZATION).Append(configInfo.Ionization).Append('\n'); } // analyzer if (!string.IsNullOrWhiteSpace(configInfo.Analyzer)) { infoString.Append(ANALYZER).Append(configInfo.Analyzer).Append('\n'); } // detector if(!string.IsNullOrWhiteSpace(configInfo.Detector)) { infoString.Append(DETECTOR).Append(configInfo.Detector).Append('\n'); } } return infoString.ToString(); } } public interface IPathContainer { MsDataFileUri FilePath { get; } } public class PathComparer<TItem> : IEqualityComparer<TItem> where TItem : IPathContainer { public bool Equals(TItem f1, TItem f2) { if (ReferenceEquals(f1, null) || ReferenceEquals(f2, null)) { return ReferenceEquals(f1, null) && ReferenceEquals(f2, null); } return Equals(f1.FilePath, f2.FilePath); } public int GetHashCode(TItem f) { return f.FilePath.GetHashCode(); } } public enum ChromSource { fragment, sim, ms1, unknown } public enum ChromExtractor { summed, base_peak, qc } public class ChromKey : IComparable<ChromKey> { public static readonly ChromKey EMPTY = new ChromKey(null, SignedMz.ZERO, null, SignedMz.ZERO, 0, 0, ChromSource.unknown, ChromExtractor.summed, false, false, null, null); public ChromKey(byte[] textIdBytes, int textIdIndex, int textIdLen, SignedMz precursor, SignedMz product, double extractionWidth, IonMobilityFilter ionMobility, ChromSource source, ChromExtractor extractor, bool calculatedMzs, bool hasScanIds, double? optionalMinTime, double? optionalMaxTime, double? optionalCenterOfGravityTime = null) : this(textIdIndex != -1 ? Target.FromSerializableString(Encoding.UTF8.GetString(textIdBytes, textIdIndex, textIdLen)) : null, precursor, ionMobility, product, 0, extractionWidth, source, extractor, calculatedMzs, hasScanIds, optionalMinTime, optionalMaxTime, optionalCenterOfGravityTime) { } public ChromKey(Target target, SignedMz precursor, IonMobilityFilter ionMobilityFilter, SignedMz product, double ceValue, double extractionWidth, ChromSource source, ChromExtractor extractor, bool calculatedMzs, bool hasScanIds, double? optionalMinTime, double? optionalMaxTime, double? optionalCenterOfGravityTime = null) { Target = target; Precursor = precursor; IonMobilityFilter = ionMobilityFilter ?? IonMobilityFilter.EMPTY; Product = product; CollisionEnergy = (float) ceValue; ExtractionWidth = (float) extractionWidth; Source = source; Extractor = extractor; HasCalculatedMzs = calculatedMzs; HasScanIds = hasScanIds; OptionalMinTime = optionalMinTime; OptionalMaxTime = optionalMaxTime; OptionalCenterOfGravityTime = optionalCenterOfGravityTime; // Calculating these values on the fly shows up in a profiler in the CompareTo function // So, probably not worth the space saved in this class IsEmpty = Precursor == 0 && Product == 0 && source == ChromSource.unknown; if (OptionalMaxTime.HasValue && OptionalMinTime.HasValue && (OptionalMaxTime > OptionalMinTime)) OptionalMidTime = (OptionalMaxTime.Value + OptionalMinTime.Value) / 2; } public Target Target { get; private set; } // Modified sequence or custom ion id public SignedMz Precursor { get; private set; } public double? CollisionalCrossSectionSqA { get { return IonMobilityFilter == null ? null : IonMobilityFilter.CollisionalCrossSectionSqA; } } public eIonMobilityUnits IonMobilityUnits { get { return IonMobilityFilter == null ? eIonMobilityUnits.none : IonMobilityFilter.IonMobility.Units; } } public IonMobilityFilter IonMobilityFilter { get; private set; } public SignedMz Product { get; private set; } public float CollisionEnergy { get; private set; } public float ExtractionWidth { get; private set; } public ChromSource Source { get; private set; } public ChromExtractor Extractor { get; private set; } public bool HasCalculatedMzs { get; private set; } public bool HasScanIds { get; private set; } public bool IsEmpty { get; private set; } public double? OptionalMinTime { get; private set; } public double? OptionalMaxTime { get; private set; } public double? OptionalCenterOfGravityTime { get; private set; } // Only used in SRM, to help disambiguate chromatograms with same Q1>Q3 but different retention time intervals public double? OptionalMidTime { get; private set; } /// <summary> /// Adjust the product m/z to look like it does for vendors that allow /// product m/z shifting for parameter optimization. /// </summary> /// <param name="step">The step from the central predicted parameter value</param> /// <returns>A new ChromKey with adjusted product m/z and cleared CE value</returns> public ChromKey ChangeOptimizationStep(int step) { return new ChromKey(Target, Precursor, IonMobilityFilter, Product + step*ChromatogramInfo.OPTIMIZE_SHIFT_SIZE, 0, ExtractionWidth, Source, Extractor, HasCalculatedMzs, HasScanIds, OptionalMinTime, OptionalMaxTime, OptionalCenterOfGravityTime); } public ChromKey ChangeOptionalTimes(double? start, double? end, double? centerOfGravity) { return new ChromKey(Target, Precursor, IonMobilityFilter, Product, CollisionEnergy, ExtractionWidth, Source, Extractor, HasCalculatedMzs, HasScanIds, start, end, centerOfGravity); } /// <summary> /// For debugging only /// </summary> public override string ToString() { if (Target != null) return string.Format(@"{0:F04}, {1:F04} {4} - {2} - {3}", Precursor.RawValue, Product.RawValue, Source, Target, IonMobilityFilter); return string.Format(@"{0:F04}, {1:F04} {3} - {2}", Precursor.RawValue, Product.RawValue, Source, IonMobilityFilter); } public int CompareTo(ChromKey key) { // First deal with empty keys sorting to the end if (IsEmpty) return key.IsEmpty ? 0 : 1; if (key.IsEmpty) return -1; // Order by precursor values var c = ComparePrecursors(key); if (c != 0) return c; // Order by scan-type source, product m/z, extraction width c = CompareSource(key); if (c != 0) return c; c = Product.CompareTo(key.Product); if (c != 0) return c; c = CollisionEnergy.CompareTo(key.CollisionEnergy); if (c != 0) return c; return ExtractionWidth.CompareTo(key.ExtractionWidth); // CONSIDER(bspratt) - we're currently ignoring ion mobility for comparison } public int ComparePrecursors(ChromKey key) { // Order by precursor m/z, peptide sequence/custom ion id, extraction method // For SRM data, do not group discontiguous chromotagrams int c = Precursor.CompareTo(key.Precursor); if (c != 0) return c; c = CompareTarget(key); if (c != 0) return c; return Extractor.CompareTo(key.Extractor); } private int CompareTarget(ChromKey key) { if (Target != null && key.Target != null) { int c = Target.CompareTo(key.Target); if (c != 0) return c; } else if (Target != null) return 1; else if (key.Target != null) return -1; return 0; // both null } public int CompareSource(ChromKey key) { // Sort with all unknown sources after all known sources if (Source != ChromSource.unknown && key.Source != ChromSource.unknown) return Source.CompareTo(key.Source); // Flip comparison to put the known value first return key.Source.CompareTo(Source); } private const string SUFFIX_CE = "CE="; private static readonly Regex REGEX_ABI = new Regex(@"Q1=([^ ]+) Q3=([^ ]+) "); public static bool IsKeyId(string id) { return MsDataFileImpl.IsSingleIonCurrentId(id); // || id.StartsWith(PREFIX_TOTAL); Skip the TICs, since Skyline calculates these } public static ChromKey FromId(string idIn, bool parseCE) { try { double precursor, product; var isNegativeChargeNullable = MsDataFileImpl.IsNegativeChargeIdNullable(idIn); bool isNegativeCharge = isNegativeChargeNullable ?? false; var id = isNegativeChargeNullable.HasValue ? idIn.Substring(2) : idIn; var source = ChromSource.fragment; var extractor = ChromExtractor.summed; if (id == MsDataFileImpl.TIC) { precursor = product = 0; source = ChromSource.unknown; } else if (id == MsDataFileImpl.BPC) { precursor = product = 0; extractor = ChromExtractor.base_peak; source = ChromSource.unknown; } else if (id.StartsWith(MsDataFileImpl.PREFIX_TOTAL)) { precursor = double.Parse(id.Substring(MsDataFileImpl.PREFIX_TOTAL.Length), CultureInfo.InvariantCulture); product = 0; } else if (id.StartsWith(MsDataFileImpl.PREFIX_PRECURSOR)) { precursor = double.Parse(id.Substring(MsDataFileImpl.PREFIX_TOTAL.Length), CultureInfo.InvariantCulture); product = precursor; } else if (id.StartsWith(MsDataFileImpl.PREFIX_SINGLE)) { // Remove the prefix string mzPart = id.Substring(MsDataFileImpl.PREFIX_SINGLE.Length); // Check of ABI id format match string[] mzs; Match match = REGEX_ABI.Match(mzPart); if (match.Success) { mzs = new[] {match.Groups[1].Value, match.Groups[2].Value}; } // Try simpler comma separated format (Thermo) else { mzs = mzPart.Split(new[] { ',' }); if (mzs.Length != 2) { throw new InvalidDataException( string.Format(Resources.ChromKey_FromId_Invalid_chromatogram_ID__0__found_The_ID_must_include_both_precursor_and_product_mz_values, id)); } } precursor = double.Parse(mzs[0], CultureInfo.InvariantCulture); product = double.Parse(mzs[1], CultureInfo.InvariantCulture); } else { throw new ArgumentException(string.Format(Resources.ChromKey_FromId_The_value__0__is_not_a_valid_chromatogram_ID, id)); } float ceValue = 0; if (parseCE) { int ceIndex = id.LastIndexOf(SUFFIX_CE, StringComparison.Ordinal); float ceParsed; if (ceIndex != -1 && float.TryParse(id.Substring(ceIndex + SUFFIX_CE.Length), NumberStyles.AllowDecimalPoint | NumberStyles.Integer, CultureInfo.InvariantCulture, out ceParsed)) { // Shimadzu uses negative CE values internally, but Skyline uses positive CE values // Avoid sign confusion ceValue = Math.Abs(ceParsed); } } return new ChromKey(null, new SignedMz(precursor, isNegativeCharge), null, new SignedMz(product, isNegativeCharge), ceValue, 0, source, extractor, false, true, null, null); } catch (FormatException) { throw new InvalidDataException(string.Format(Resources.ChromKey_FromId_Invalid_chromatogram_ID__0__found_Failure_parsing_mz_values, idIn)); } } public static ChromKey FromQcTrace(MsDataFileImpl.QcTrace qcTrace) { var qcTextBytes = Encoding.UTF8.GetBytes(qcTrace.Name); return new ChromKey(qcTextBytes, 0, qcTextBytes.Length, SignedMz.ZERO, SignedMz.ZERO, 0, null, ChromSource.unknown, ChromExtractor.qc, false, false, null, null); } #region object overrides public bool Equals(ChromKey other) { return Equals(Target, other.Target) && Precursor.Equals(other.Precursor) && IonMobilityFilter.Equals(other.IonMobilityFilter) && Product.Equals(other.Product) && CollisionEnergy.Equals(other.CollisionEnergy) && ExtractionWidth.Equals(other.ExtractionWidth) && Source == other.Source && Extractor == other.Extractor && HasCalculatedMzs.Equals(other.HasCalculatedMzs) && HasScanIds.Equals(other.HasScanIds) && OptionalMinTime.Equals(other.OptionalMinTime) && OptionalMaxTime.Equals(other.OptionalMaxTime); } public override bool Equals(object obj) { if (ReferenceEquals(null, obj)) return false; return obj is ChromKey && Equals((ChromKey) obj); } public override int GetHashCode() { unchecked { var hashCode = (Target != null ? Target.GetHashCode() : 0); hashCode = (hashCode*397) ^ Precursor.GetHashCode(); hashCode = (hashCode*397) ^ IonMobilityFilter.GetHashCode(); hashCode = (hashCode*397) ^ Product.GetHashCode(); hashCode = (hashCode*397) ^ CollisionEnergy.GetHashCode(); hashCode = (hashCode*397) ^ ExtractionWidth.GetHashCode(); hashCode = (hashCode*397) ^ (int) Source; hashCode = (hashCode*397) ^ (int) Extractor; hashCode = (hashCode*397) ^ HasCalculatedMzs.GetHashCode(); hashCode = (hashCode*397) ^ HasScanIds.GetHashCode(); hashCode = (hashCode*397) ^ OptionalMinTime.GetHashCode(); hashCode = (hashCode*397) ^ OptionalMaxTime.GetHashCode(); return hashCode; } } #endregion } /// <summary> /// This exists to encourage more stable sorting of lists which were /// formerly lists of KeyValuePair(ChromKey,providerId) and were sorted on ChromKey only. /// In small molecule work, ChromKey collisions are common so this could be an unstable sort. /// </summary> public struct ChromKeyProviderIdPair : IComparable<ChromKeyProviderIdPair> { public ChromKeyProviderIdPair(ChromKey key, int providerId) { Key = key; ProviderId = providerId; } public ChromKey Key; public int ProviderId; public int CompareTo(ChromKeyProviderIdPair other) { var result = Key.CompareTo(other.Key); if (result == 0) result = ProviderId.CompareTo(other.ProviderId); return result; } public override string ToString() { return Key + string.Format(@" ({0})", ProviderId); } } /// <summary> /// Extra information about a chromatogram, which does not belong in ChromKey /// CONSIDER: Move other values from ChromKey to this class? /// </summary> public class ChromExtra { public ChromExtra(int statusId, int statusRank) { StatusId = (ushort) statusId; StatusRank = (ushort) statusRank; } public ushort StatusId { get; private set; } public ushort StatusRank { get; private set; } } public class ChromatogramGroupInfo { protected readonly ChromGroupHeaderInfo _groupHeaderInfo; protected readonly IDictionary<Type, int> _scoreTypeIndices; protected readonly byte[] _textIdBytes; protected readonly IList<ChromCachedFile> _allFiles; protected readonly IReadOnlyList<ChromTransition> _allTransitions; protected readonly IReadOnlyList<ChromPeak> _allPeaks; protected readonly IReadOnlyList<float> _allScores; public ChromatogramGroupInfo(ChromGroupHeaderInfo groupHeaderInfo, IDictionary<Type, int> scoreTypeIndices, byte[] textIdBytes, IList<ChromCachedFile> allFiles, IReadOnlyList<ChromTransition> allTransitions, IReadOnlyList<ChromPeak> allPeaks, IReadOnlyList<float> allScores) { _groupHeaderInfo = groupHeaderInfo; _scoreTypeIndices = scoreTypeIndices; _textIdBytes = textIdBytes; _allFiles = allFiles; _allTransitions = allTransitions; _allPeaks = allPeaks; _allScores = allScores; } protected ChromatogramGroupInfo() { } protected ChromatogramGroupInfo(ChromGroupHeaderInfo header, ChromatogramGroupInfo copyFrom) : this(header, copyFrom._scoreTypeIndices, copyFrom._textIdBytes, copyFrom._allFiles, copyFrom._allTransitions, copyFrom._allPeaks, copyFrom._allScores) { } internal ChromGroupHeaderInfo Header { get { return _groupHeaderInfo; } } public SignedMz PrecursorMz { get { return new SignedMz(_groupHeaderInfo.Precursor, _groupHeaderInfo.NegativeCharge); } } public string TextId { get { return _groupHeaderInfo.TextIdIndex != -1 ? Encoding.UTF8.GetString(_textIdBytes, _groupHeaderInfo.TextIdIndex, _groupHeaderInfo.TextIdLen) : null; } } public double? PrecursorCollisionalCrossSection { get { return _groupHeaderInfo.CollisionalCrossSection; } } public ChromCachedFile CachedFile { get { return _allFiles[_groupHeaderInfo.FileIndex]; } } public MsDataFileUri FilePath { get { return _allFiles[_groupHeaderInfo.FileIndex].FilePath; } } public DateTime FileWriteTime { get { return _allFiles[_groupHeaderInfo.FileIndex].FileWriteTime; } } public DateTime? RunStartTime { get { return _allFiles[_groupHeaderInfo.FileIndex].RunStartTime; } } public virtual int NumTransitions { get { return _groupHeaderInfo.NumTransitions; } } public int NumPeaks { get { return _groupHeaderInfo.NumPeaks; } } public int MaxPeakIndex { get { return _groupHeaderInfo.MaxPeakIndex; } } public int BestPeakIndex { get { return MaxPeakIndex; } } private byte[] DeferedCompressedBytes { get; set; } public TimeIntensitiesGroup TimeIntensitiesGroup { get; set; } public bool HasScore(Type scoreType) { return _scoreTypeIndices.ContainsKey(scoreType); } public float GetScore(Type scoreType, int peakIndex) { int scoreIndex; if (!_scoreTypeIndices.TryGetValue(scoreType, out scoreIndex)) return float.NaN; return _allScores[_groupHeaderInfo.StartScoreIndex + peakIndex*_scoreTypeIndices.Count + scoreIndex]; } public IEnumerable<ChromatogramInfo> TransitionPointSets { get { for (int i = 0; i < NumTransitions; i++) { yield return GetTransitionInfo(i); } } } public IEnumerable<ChromPeak> GetPeaks(int transitionIndex) { int startPeak = _groupHeaderInfo.StartPeakIndex + (transitionIndex * _groupHeaderInfo.NumPeaks); int endPeak = startPeak + _groupHeaderInfo.NumPeaks; for (int i = startPeak; i < endPeak; i++) yield return _allPeaks[i]; } public ChromatogramInfo GetTransitionInfo(int index) { return GetTransitionInfo(index, TransformChrom.interpolated); } public ChromatogramInfo GetTransitionInfo(int index, TransformChrom transform) { var chromatogramInfo = GetRawTransitionInfo(index); chromatogramInfo.Transform(transform); return chromatogramInfo; } public virtual ChromatogramInfo GetRawTransitionInfo(int index) { return new ChromatogramInfo(this, index); } protected SignedMz GetProductGlobal(int index) { return new SignedMz(_allTransitions[index].Product, _groupHeaderInfo.NegativeCharge); } private bool IsProductGlobalMatch(int index, TransitionDocNode nodeTran, float tolerance) { var source = _allTransitions[index].Source; bool isMs1Chromatogram = source == ChromSource.ms1 || source == ChromSource.sim; bool isTranMs1 = nodeTran == null || nodeTran.IsMs1; // Don't allow fragment ions to match data from MS1 if (!isTranMs1 && isMs1Chromatogram) return false; var globalMz = GetProductGlobal(index); var tranMz = nodeTran != null ? nodeTran.Mz : SignedMz.ZERO; return tranMz.CompareTolerant(globalMz, tolerance) == 0; } public SignedMz GetProductLocal(int transitionIndex) { return new SignedMz(_allTransitions[_groupHeaderInfo.StartTransitionIndex + transitionIndex].Product, _groupHeaderInfo.NegativeCharge); } protected ChromTransition GetChromTransitionGlobal(int index) { return _allTransitions[index]; } public ChromTransition GetChromTransitionLocal(int transitionIndex) { return _allTransitions[_groupHeaderInfo.StartTransitionIndex + transitionIndex]; } public ChromatogramInfo GetTransitionInfo(TransitionDocNode nodeTran, float tolerance, OptimizableRegression regression) { return GetTransitionInfo(nodeTran, tolerance, TransformChrom.interpolated, regression); } public virtual ChromatogramInfo GetTransitionInfo(TransitionDocNode nodeTran, float tolerance, TransformChrom transform, OptimizableRegression regression) { var productMz = nodeTran != null ? nodeTran.Mz : SignedMz.ZERO; int startTran = _groupHeaderInfo.StartTransitionIndex; int endTran = startTran + _groupHeaderInfo.NumTransitions; int? iNearest = null; double deltaNearestMz = double.MaxValue; for (int i = startTran; i < endTran; i++) { if (IsProductGlobalMatch(i, nodeTran, tolerance)) { int iMiddle; if (regression == null) { iMiddle = i; } else { // If there is optimization data, return only the middle value, which // was the regression value. int startOptTran, endOptTran; GetOptimizationBounds(productMz, i, startTran, endTran, out startOptTran, out endOptTran); iMiddle = (startOptTran + endOptTran) / 2; } double deltaMz = Math.Abs(productMz - GetProductGlobal(iMiddle)); if (deltaMz < deltaNearestMz) { iNearest = iMiddle; deltaNearestMz = deltaMz; } } } return iNearest.HasValue ? GetTransitionInfo(iNearest.Value - startTran, transform) : null; } public ChromatogramInfo[] GetAllTransitionInfo(TransitionDocNode nodeTran, float tolerance, OptimizableRegression regression, TransformChrom transform) { var listChromInfo = new List<ChromatogramInfo>(); GetAllTransitionInfo(nodeTran, tolerance, regression, listChromInfo, transform); return listChromInfo.ToArray(); } public void GetAllTransitionInfo(TransitionDocNode nodeTran, float tolerance, OptimizableRegression regression, List<ChromatogramInfo> listChromInfo, TransformChrom transform) { listChromInfo.Clear(); if (regression == null) { // ReSharper disable ExpressionIsAlwaysNull var info = GetTransitionInfo(nodeTran, tolerance, transform, regression); // ReSharper restore ExpressionIsAlwaysNull if (info != null) listChromInfo.Add(info); return; } var productMz = nodeTran != null ? nodeTran.Mz : SignedMz.ZERO; int startTran = _groupHeaderInfo.StartTransitionIndex; int endTran = startTran + _groupHeaderInfo.NumTransitions; for (int i = startTran; i < endTran; i++) { if (IsProductGlobalMatch(i, nodeTran, tolerance)) { int startOptTran, endOptTran; GetOptimizationBounds(productMz, i, startTran, endTran, out startOptTran, out endOptTran); for (int j = startOptTran; j <= endOptTran; j++) listChromInfo.Add(GetTransitionInfo(j - startTran)); i = Math.Max(i, endOptTran); } } } private void GetOptimizationBounds(SignedMz productMz, int i, int startTran, int endTran, out int startOptTran, out int endOptTran) { // CONSIDER: Tried to make this a little more fault tolerant, but that just caused // more problems. So, decided to leave this close to the original implementation. var productMzCurrent = GetProductGlobal(i); // First back up to find the beginning while (i > startTran && ChromatogramInfo.IsOptimizationSpacing(GetProductGlobal(i - 1), productMzCurrent)) { productMzCurrent = GetProductGlobal(--i); } startOptTran = i; // Walk forward until the end while (i < endTran - 1 && ChromatogramInfo.IsOptimizationSpacing(productMzCurrent, GetProductGlobal(i + 1))) { productMzCurrent = GetProductGlobal(++i); } endOptTran = i; } public ChromPeak GetTransitionPeak(int transitionIndex, int peakIndex) { return _allPeaks[_groupHeaderInfo.StartPeakIndex + transitionIndex*_groupHeaderInfo.NumPeaks + peakIndex]; } // ReSharper disable SuggestBaseTypeForParameter public virtual int MatchTransitions(PeptideDocNode nodePep, TransitionGroupDocNode nodeGroup, float tolerance, bool multiMatch) // ReSharper restore SuggestBaseTypeForParameter { int match = 0; ExplicitRetentionTimeInfo explicitRT = null; if (nodePep != null && nodePep.ExplicitRetentionTime != null) { // We have retention time info, use that in the match explicitRT = nodePep.ExplicitRetentionTime; } foreach (TransitionDocNode nodeTran in nodeGroup.Children) { int countMatches = CountTransitionMatches(nodeTran, tolerance, explicitRT); if (countMatches > 0) { match += multiMatch ? countMatches : 1; } } return match; } public int CountTransitionMatches(TransitionDocNode nodeTran, float tolerance, ExplicitRetentionTimeInfo explicitRT) { int countMatches = 0; if (explicitRT != null && Header.IsNotIncludedTime(explicitRT.RetentionTime)) return 0; for (int transitionNum = 0; transitionNum < NumTransitions; transitionNum++) { if (nodeTran.Mz.CompareTolerant(GetProductLocal(transitionNum), tolerance) == 0) { countMatches++; } } return countMatches; } public virtual void ReadChromatogram(ChromatogramCache cache, bool deferDecompression = false) { var compressedBytes = DeferedCompressedBytes ?? ReadCompressedBytes(cache); if (deferDecompression) DeferedCompressedBytes = compressedBytes; else { CompressedBytesToTimeIntensities(compressedBytes); DeferedCompressedBytes = null; } } public void EnsureDecompressed() { if (DeferedCompressedBytes != null) CompressedBytesToTimeIntensities(DeferedCompressedBytes); } public byte[] ReadCompressedBytes(ChromatogramCache cache) { Stream stream = cache.ReadStream.Stream; byte[] pointsCompressed = new byte[_groupHeaderInfo.CompressedSize]; lock (stream) { try { // Seek to stored location stream.Seek(_groupHeaderInfo.LocationPoints, SeekOrigin.Begin); // Single read to get all the points if (stream.Read(pointsCompressed, 0, pointsCompressed.Length) < pointsCompressed.Length) throw new IOException(Resources.ChromatogramGroupInfo_ReadChromatogram_Failure_trying_to_read_points); } catch (Exception) { // If an exception is thrown, close the stream in case the failure is something // like a network failure that can be remedied by re-opening the stream. cache.ReadStream.CloseStream(); throw; } } return pointsCompressed; } public void CompressedBytesToTimeIntensities(byte[] pointsCompressed) { int uncompressedSize = _groupHeaderInfo.UncompressedSize; if (uncompressedSize < 0) // Before version 11 { int numPoints = _groupHeaderInfo.NumPoints; int numTrans = _groupHeaderInfo.NumTransitions; bool hasErrors = _groupHeaderInfo.HasMassErrors; bool hasMs1ScanIds = _groupHeaderInfo.HasMs1ScanIds; bool hasFragmentScanIds = _groupHeaderInfo.HasFragmentScanIds; bool hasSimScanIds = _groupHeaderInfo.HasSimScanIds; uncompressedSize = ChromatogramCache.GetChromatogramsByteCount( numTrans, numPoints, hasErrors, hasMs1ScanIds, hasFragmentScanIds, hasSimScanIds); } var uncompressedBytes = pointsCompressed.Uncompress(uncompressedSize); if (_groupHeaderInfo.HasRawChromatograms) { TimeIntensitiesGroup = RawTimeIntensities.ReadFromStream(new MemoryStream(uncompressedBytes)); } else { var chromTransitions = Enumerable.Range(Header.StartTransitionIndex, Header.NumTransitions) .Select(i => _allTransitions[i]).ToArray(); TimeIntensitiesGroup = InterpolatedTimeIntensities.ReadFromStream(new MemoryStream(uncompressedBytes), Header, chromTransitions); } } public class PathEqualityComparer : IEqualityComparer<ChromatogramGroupInfo> { public bool Equals(ChromatogramGroupInfo x, ChromatogramGroupInfo y) { if (ReferenceEquals(x, null) || ReferenceEquals(y, null)) { return ReferenceEquals(x, null) && ReferenceEquals(y, null); } return Equals(x.FilePath, y.FilePath); } public int GetHashCode(ChromatogramGroupInfo obj) { return obj.FilePath.GetHashCode(); } } public static PathEqualityComparer PathComparer { get; private set; } static ChromatogramGroupInfo() { PathComparer = new PathEqualityComparer(); } } // ReSharper disable InconsistentNaming public enum TransformChrom { raw, interpolated, craw2d, craw1d, savitzky_golay } // ReSharper restore InconsistentNaming public class ChromatogramInfo { public const double OPTIMIZE_SHIFT_SIZE = 0.01; private const double OPTIMIZE_SHIFT_THRESHOLD = 0.001; public static bool IsOptimizationSpacing(double mz1, double mz2) { // Must be ordered correctly to be optimization spacing if (mz1 > mz2) return false; double delta = Math.Abs(Math.Abs(mz2 - mz1) - OPTIMIZE_SHIFT_SIZE); return delta <= OPTIMIZE_SHIFT_THRESHOLD; } private readonly ChromatogramGroupInfo _groupInfo; protected readonly int _transitionIndex; public ChromatogramInfo(ChromatogramGroupInfo groupInfo, int transitionIndex) { if (transitionIndex >= groupInfo.NumTransitions) { throw new IndexOutOfRangeException( string.Format(Resources.ChromatogramInfo_ChromatogramInfo_The_index__0__must_be_between_0_and__1__, transitionIndex, groupInfo.NumTransitions)); } _groupInfo = groupInfo; _transitionIndex = transitionIndex; var timeIntensitiesGroup = _groupInfo.TimeIntensitiesGroup; if (timeIntensitiesGroup != null) { TimeIntensities = timeIntensitiesGroup.TransitionTimeIntensities[_transitionIndex]; if (Header.HasRawTimes()) { RawTimes = TimeIntensities.Times; } } } public ChromatogramInfo(float[] times, float[] intensities) { TimeIntensities = new TimeIntensities(times, intensities, null, null); } public ChromatogramGroupInfo GroupInfo { get { return _groupInfo; } } public ChromGroupHeaderInfo Header { get { return _groupInfo.Header; } } public int NumPeaks { get { return _groupInfo != null ? _groupInfo.NumPeaks : 0; } } public int BestPeakIndex { get { return _groupInfo != null ? _groupInfo.BestPeakIndex : -1; } } public MsDataFileUri FilePath { get { return _groupInfo.FilePath; } } public SignedMz PrecursorMz { get { return _groupInfo.PrecursorMz; } } public SignedMz ProductMz { get { return _groupInfo.GetProductLocal(_transitionIndex); } } public ChromSource Source { get { return ChromTransition.Source; } } public double? ExtractionWidth { get { return FloatToNullableDouble(ChromTransition.ExtractionWidth); } } public double? IonMobility { get { return FloatToNullableDouble(ChromTransition.IonMobilityValue); } } public double? IonMobilityExtractionWidth { get { return FloatToNullableDouble(ChromTransition.IonMobilityExtractionWidth); } } public eIonMobilityUnits IonMobilityUnits { get { return Header.IonMobilityUnits; } } public IonMobilityFilter GetIonMobilityFilter() { return IonMobilityFilter.GetIonMobilityFilter(IonMobilityValue.GetIonMobilityValue(IonMobility, Header.IonMobilityUnits), IonMobilityExtractionWidth, _groupInfo.PrecursorCollisionalCrossSection); } private static double? FloatToNullableDouble(float value) { float extractionWidth = value; if (extractionWidth == 0) return null; return extractionWidth; } private ChromTransition ChromTransition { get { return _groupInfo.GetChromTransitionLocal(_transitionIndex); } } public IList<float> RawTimes { get; set; } public TimeIntensities TimeIntensities { get; set; } public IList<float> Times { get { return TimeIntensities == null ? null : TimeIntensities.Times; } } public IList<int> ScanIndexes { get { return TimeIntensities == null ? null : TimeIntensities.ScanIds; } } public IList<float> Intensities { get { return TimeIntensities == null ? null : TimeIntensities.Intensities; } set { TimeIntensities = TimeIntensities.ChangeIntensities(value); } } public IEnumerable<ChromPeak> Peaks { get { return _groupInfo.GetPeaks(_transitionIndex); } } /// <summary> /// Get the nth peak for this group (as opposed to the nth peak in the allPeaks list) /// </summary> public ChromPeak GetPeak(int peakIndex) { if (0 > peakIndex || peakIndex > NumPeaks) { throw new IndexOutOfRangeException( string.Format(Resources.ChromatogramInfo_ChromatogramInfo_The_index__0__must_be_between_0_and__1__, peakIndex, NumPeaks)); } return _groupInfo.GetTransitionPeak(_transitionIndex, peakIndex); } public ChromPeak CalcPeak(int startIndex, int endIndex, ChromPeak.FlagValues flags) { if (startIndex == endIndex) return ChromPeak.EMPTY; var finder = Crawdads.NewCrawdadPeakFinder(); finder.SetChromatogram(TimeIntensities.Times, TimeIntensities.Intensities); var peak = finder.GetPeak(startIndex, endIndex); return new ChromPeak(finder, peak, flags, TimeIntensities, RawTimes); } public int IndexOfPeak(double retentionTime) { // Find the closest peak within a tolerance of 0.001 (near the precision of a float) int i = 0, iMin = -1; double minDelta = double.MaxValue; foreach (var peak in Peaks) { double delta = Math.Abs(peak.RetentionTime - retentionTime); if (delta < minDelta) { minDelta = delta; iMin = i; } i++; } return minDelta < 0.001 ? iMin : -1; } public void AsArrays(out double[] times, out double[] intensities) { if (TimeIntensities == null) { times = intensities = new double[0]; } else { times = TimeIntensities.Times.Select(time => (double) time).ToArray(); intensities = TimeIntensities.Intensities.Select(intensity => (double) intensity).ToArray(); } } public double MaxIntensity { get { double max = 0; foreach (float intensity in TimeIntensities.Intensities) max = Math.Max(max, intensity); return max; } } public void SumIntensities(IList<ChromatogramInfo> listInfo) { foreach (var chromatogramInfo in listInfo) { if (chromatogramInfo == null || ReferenceEquals(this, chromatogramInfo)) { continue; } TimeIntensities = TimeIntensities.MergeTimesAndAddIntensities(chromatogramInfo.TimeIntensities); } } public void Transform(TransformChrom transformChrom) { switch (transformChrom) { case TransformChrom.interpolated: Interpolate(); break; case TransformChrom.craw2d: Interpolate(); Crawdad2DTransform(); break; case TransformChrom.craw1d: Interpolate(); Crawdad1DTransform(); break; case TransformChrom.savitzky_golay: Interpolate(); SavitzkyGolaySmooth(); break; } } public void Crawdad2DTransform() { if (Intensities == null) return; var peakFinder = Crawdads.NewCrawdadPeakFinder(); peakFinder.SetChromatogram(Times, Intensities); Intensities = peakFinder.Intensities2d.ToArray(); } public void Interpolate() { if (_groupInfo == null) return; var rawTimeIntensities = _groupInfo.TimeIntensitiesGroup as RawTimeIntensities; if (rawTimeIntensities == null) return; TimeIntensities = TimeIntensities.Interpolate(rawTimeIntensities.GetInterpolatedTimes(), rawTimeIntensities.InferZeroes); } public void Crawdad1DTransform() { if (Intensities == null) return; var peakFinder = Crawdads.NewCrawdadPeakFinder(); peakFinder.SetChromatogram(Times, Intensities); Intensities = peakFinder.Intensities1d.ToArray(); } public void SavitzkyGolaySmooth() { Intensities = SavitzkyGolaySmooth(Intensities.ToArray()); } public static float[] SavitzkyGolaySmooth(float[] intensities) { if (intensities == null || intensities.Length < 9) return intensities; var intRaw = intensities; var intSmooth = new float[intRaw.Length]; Array.Copy(intensities, intSmooth, 4); for (int i = 4; i < intRaw.Length - 4; i++) { double sum = 59 * intRaw[i] + 54 * (intRaw[i - 1] + intRaw[i + 1]) + 39 * (intRaw[i - 2] + intRaw[i + 2]) + 14 * (intRaw[i - 3] + intRaw[i + 3]) - 21 * (intRaw[i - 4] + intRaw[i + 4]); intSmooth[i] = (float)(sum / 231); } Array.Copy(intRaw, intRaw.Length - 4, intSmooth, intSmooth.Length - 4, 4); return intSmooth; } public int IndexOfNearestTime(float time) { return TimeIntensities.IndexOfNearestTime(time); } public int TransitionIndex { get { return _transitionIndex; } } } public class BulkReadException : IOException { public BulkReadException() : base(Resources.BulkReadException_BulkReadException_Failed_reading_block_from_file) { } } }
1
13,016
str.Substring(3).Split(' ')[0] would also work and is more concise.
ProteoWizard-pwiz
.cs
@@ -198,7 +198,8 @@ void nano::node::keepalive (std::string const & address_a, uint16_t port_a, bool { auto endpoint (nano::map_endpoint_to_v6 (i->endpoint ())); node_l->send_keepalive (endpoint); - if (preconfigured_peer_a) + // Force insert only for local preconfigured peers + if (preconfigured_peer_a && node_l->config.allow_local_peers && node_l->peers.not_a_peer (endpoint, false)) { node_l->peers.insert (endpoint, nano::protocol_version, true); }
1
#include <nano/node/node.hpp> #include <nano/lib/interface.h> #include <nano/lib/timer.hpp> #include <nano/lib/utility.hpp> #include <nano/node/common.hpp> #include <nano/node/rpc.hpp> #include <algorithm> #include <cstdlib> #include <future> #include <sstream> #include <boost/polymorphic_cast.hpp> #include <boost/property_tree/json_parser.hpp> double constexpr nano::node::price_max; double constexpr nano::node::free_cutoff; std::chrono::seconds constexpr nano::node::period; std::chrono::seconds constexpr nano::node::cutoff; std::chrono::seconds constexpr nano::node::syn_cookie_cutoff; std::chrono::minutes constexpr nano::node::backup_interval; std::chrono::seconds constexpr nano::node::search_pending_interval; std::chrono::seconds constexpr nano::node::peer_interval; std::chrono::hours constexpr nano::node::unchecked_cleanup_interval; std::chrono::milliseconds constexpr nano::node::process_confirmed_interval; int constexpr nano::port_mapping::mapping_timeout; int constexpr nano::port_mapping::check_timeout; unsigned constexpr nano::active_transactions::request_interval_ms; size_t constexpr nano::active_transactions::max_broadcast_queue; size_t constexpr nano::block_arrival::arrival_size_min; std::chrono::seconds constexpr nano::block_arrival::arrival_time_min; uint64_t constexpr nano::online_reps::weight_period; uint64_t constexpr nano::online_reps::weight_samples; namespace nano { extern unsigned char nano_bootstrap_weights[]; extern size_t nano_bootstrap_weights_size; } nano::network::network (nano::node & node_a, uint16_t port) : buffer_container (node_a.stats, nano::network::buffer_size, 4096), // 2Mb receive buffer socket (node_a.io_ctx, nano::endpoint (boost::asio::ip::address_v6::any (), port)), resolver (node_a.io_ctx), node (node_a), on (true) { boost::thread::attributes attrs; nano::thread_attributes::set (attrs); for (size_t i = 0; i < node.config.network_threads; ++i) { packet_processing_threads.push_back (boost::thread (attrs, [this]() { nano::thread_role::set (nano::thread_role::name::packet_processing); try { process_packets (); } catch (boost::system::error_code & ec) { BOOST_LOG (this->node.log) << FATAL_LOG_PREFIX << ec.message (); release_assert (false); } catch (std::error_code & ec) { BOOST_LOG (this->node.log) << FATAL_LOG_PREFIX << ec.message (); release_assert (false); } catch (std::runtime_error & err) { BOOST_LOG (this->node.log) << FATAL_LOG_PREFIX << err.what (); release_assert (false); } catch (...) { BOOST_LOG (this->node.log) << FATAL_LOG_PREFIX << "Unknown exception"; release_assert (false); } if (this->node.config.logging.network_packet_logging ()) { BOOST_LOG (this->node.log) << "Exiting packet processing thread"; } })); } } nano::network::~network () { for (auto & thread : packet_processing_threads) { thread.join (); } } void nano::network::start () { for (size_t i = 0; i < node.config.io_threads; ++i) { receive (); } } void nano::network::receive () { if (node.config.logging.network_packet_logging ()) { BOOST_LOG (node.log) << "Receiving packet"; } std::unique_lock<std::mutex> lock (socket_mutex); auto data (buffer_container.allocate ()); socket.async_receive_from (boost::asio::buffer (data->buffer, nano::network::buffer_size), data->endpoint, [this, data](boost::system::error_code const & error, size_t size_a) { if (!error && this->on) { data->size = size_a; this->buffer_container.enqueue (data); this->receive (); } else { this->buffer_container.release (data); if (error) { if (this->node.config.logging.network_logging ()) { BOOST_LOG (this->node.log) << boost::str (boost::format ("UDP Receive error: %1%") % error.message ()); } } if (this->on) { this->node.alarm.add (std::chrono::steady_clock::now () + std::chrono::seconds (5), [this]() { this->receive (); }); } } }); } void nano::network::process_packets () { auto local_endpoint (endpoint ()); while (on.load ()) { auto data (buffer_container.dequeue ()); if (data == nullptr) { break; } //std::cerr << data->endpoint.address ().to_string (); receive_action (data, local_endpoint); buffer_container.release (data); } } void nano::network::stop () { on = false; std::unique_lock<std::mutex> lock (socket_mutex); if (socket.is_open ()) { socket.close (); } resolver.cancel (); buffer_container.stop (); } void nano::network::send_keepalive (nano::endpoint const & endpoint_a) { assert (endpoint_a.address ().is_v6 ()); nano::keepalive message; node.peers.random_fill (message.peers); auto bytes = message.to_bytes (); if (node.config.logging.network_keepalive_logging ()) { BOOST_LOG (node.log) << boost::str (boost::format ("Keepalive req sent to %1%") % endpoint_a); } std::weak_ptr<nano::node> node_w (node.shared ()); send_buffer (bytes->data (), bytes->size (), endpoint_a, [bytes, node_w, endpoint_a](boost::system::error_code const & ec, size_t) { if (auto node_l = node_w.lock ()) { if (ec && node_l->config.logging.network_keepalive_logging ()) { BOOST_LOG (node_l->log) << boost::str (boost::format ("Error sending keepalive to %1%: %2%") % endpoint_a % ec.message ()); } else { node_l->stats.inc (nano::stat::type::message, nano::stat::detail::keepalive, nano::stat::dir::out); } } }); } void nano::node::keepalive (std::string const & address_a, uint16_t port_a, bool preconfigured_peer_a) { auto node_l (shared_from_this ()); network.resolver.async_resolve (boost::asio::ip::udp::resolver::query (address_a, std::to_string (port_a)), [node_l, address_a, port_a, preconfigured_peer_a](boost::system::error_code const & ec, boost::asio::ip::udp::resolver::iterator i_a) { if (!ec) { for (auto i (i_a), n (boost::asio::ip::udp::resolver::iterator{}); i != n; ++i) { auto endpoint (nano::map_endpoint_to_v6 (i->endpoint ())); node_l->send_keepalive (endpoint); if (preconfigured_peer_a) { node_l->peers.insert (endpoint, nano::protocol_version, true); } } } else { BOOST_LOG (node_l->log) << boost::str (boost::format ("Error resolving address: %1%:%2%: %3%") % address_a % port_a % ec.message ()); } }); } void nano::network::send_node_id_handshake (nano::endpoint const & endpoint_a, boost::optional<nano::uint256_union> const & query, boost::optional<nano::uint256_union> const & respond_to) { assert (endpoint_a.address ().is_v6 ()); boost::optional<std::pair<nano::account, nano::signature>> response (boost::none); if (respond_to) { response = std::make_pair (node.node_id.pub, nano::sign_message (node.node_id.prv, node.node_id.pub, *respond_to)); assert (!nano::validate_message (response->first, *respond_to, response->second)); } nano::node_id_handshake message (query, response); auto bytes = message.to_bytes (); if (node.config.logging.network_node_id_handshake_logging ()) { BOOST_LOG (node.log) << boost::str (boost::format ("Node ID handshake sent with node ID %1% to %2%: query %3%, respond_to %4% (signature %5%)") % node.node_id.pub.to_account () % endpoint_a % (query ? query->to_string () : std::string ("[none]")) % (respond_to ? respond_to->to_string () : std::string ("[none]")) % (response ? response->second.to_string () : std::string ("[none]"))); } node.stats.inc (nano::stat::type::message, nano::stat::detail::node_id_handshake, nano::stat::dir::out); std::weak_ptr<nano::node> node_w (node.shared ()); send_buffer (bytes->data (), bytes->size (), endpoint_a, [bytes, node_w, endpoint_a](boost::system::error_code const & ec, size_t) { if (auto node_l = node_w.lock ()) { if (ec && node_l->config.logging.network_node_id_handshake_logging ()) { BOOST_LOG (node_l->log) << boost::str (boost::format ("Error sending node ID handshake to %1% %2%") % endpoint_a % ec.message ()); } } }); } void nano::network::republish (nano::block_hash const & hash_a, std::shared_ptr<std::vector<uint8_t>> buffer_a, nano::endpoint endpoint_a) { if (node.config.logging.network_publish_logging ()) { BOOST_LOG (node.log) << boost::str (boost::format ("Publishing %1% to %2%") % hash_a.to_string () % endpoint_a); } std::weak_ptr<nano::node> node_w (node.shared ()); send_buffer (buffer_a->data (), buffer_a->size (), endpoint_a, [node_w, endpoint_a](boost::system::error_code const & ec, size_t size) { if (auto node_l = node_w.lock ()) { if (ec && node_l->config.logging.network_logging ()) { BOOST_LOG (node_l->log) << boost::str (boost::format ("Error sending publish to %1%: %2%") % endpoint_a % ec.message ()); } else { node_l->stats.inc (nano::stat::type::message, nano::stat::detail::publish, nano::stat::dir::out); } } }); } template <typename T> bool confirm_block (nano::transaction const & transaction_a, nano::node & node_a, T & list_a, std::shared_ptr<nano::block> block_a, bool also_publish) { bool result (false); if (node_a.config.enable_voting) { auto hash (block_a->hash ()); // Search in cache auto votes (node_a.votes_cache.find (hash)); if (votes.empty ()) { // Generate new vote node_a.wallets.foreach_representative (transaction_a, [&result, &list_a, &node_a, &transaction_a, &hash](nano::public_key const & pub_a, nano::raw_key const & prv_a) { result = true; auto vote (node_a.store.vote_generate (transaction_a, pub_a, prv_a, std::vector<nano::block_hash> (1, hash))); nano::confirm_ack confirm (vote); auto vote_bytes = confirm.to_bytes (); for (auto j (list_a.begin ()), m (list_a.end ()); j != m; ++j) { node_a.network.confirm_send (confirm, vote_bytes, *j); } node_a.votes_cache.add (vote); }); } else { // Send from cache for (auto & vote : votes) { nano::confirm_ack confirm (vote); auto vote_bytes = confirm.to_bytes (); for (auto j (list_a.begin ()), m (list_a.end ()); j != m; ++j) { node_a.network.confirm_send (confirm, vote_bytes, *j); } } } // Republish if required if (also_publish) { nano::publish publish (block_a); std::shared_ptr<std::vector<uint8_t>> publish_bytes; publish_bytes = publish.to_bytes (); for (auto j (list_a.begin ()), m (list_a.end ()); j != m; ++j) { node_a.network.republish (hash, publish_bytes, *j); } } } return result; } bool confirm_block (nano::transaction const & transaction_a, nano::node & node_a, nano::endpoint & peer_a, std::shared_ptr<nano::block> block_a, bool also_publish) { std::array<nano::endpoint, 1> endpoints; endpoints[0] = peer_a; auto result (confirm_block (transaction_a, node_a, endpoints, std::move (block_a), also_publish)); return result; } void nano::network::confirm_hashes (nano::transaction const & transaction_a, nano::endpoint const & peer_a, std::vector<nano::block_hash> blocks_bundle_a) { if (node.config.enable_voting) { node.wallets.foreach_representative (transaction_a, [this, &blocks_bundle_a, &peer_a, &transaction_a](nano::public_key const & pub_a, nano::raw_key const & prv_a) { auto vote (this->node.store.vote_generate (transaction_a, pub_a, prv_a, blocks_bundle_a)); nano::confirm_ack confirm (vote); std::shared_ptr<std::vector<uint8_t>> bytes (new std::vector<uint8_t>); { nano::vectorstream stream (*bytes); confirm.serialize (stream); } this->node.network.confirm_send (confirm, bytes, peer_a); this->node.votes_cache.add (vote); }); } } bool nano::network::send_votes_cache (nano::block_hash const & hash_a, nano::endpoint const & peer_a) { // Search in cache auto votes (node.votes_cache.find (hash_a)); // Send from cache for (auto & vote : votes) { nano::confirm_ack confirm (vote); auto vote_bytes = confirm.to_bytes (); confirm_send (confirm, vote_bytes, peer_a); } // Returns true if votes were sent bool result (!votes.empty ()); return result; } void nano::network::republish_block (std::shared_ptr<nano::block> block) { auto hash (block->hash ()); auto list (node.peers.list_fanout ()); nano::publish message (block); auto bytes = message.to_bytes (); for (auto i (list.begin ()), n (list.end ()); i != n; ++i) { republish (hash, bytes, *i); } if (node.config.logging.network_logging ()) { BOOST_LOG (node.log) << boost::str (boost::format ("Block %1% was republished to peers") % hash.to_string ()); } } void nano::network::republish_block (std::shared_ptr<nano::block> block, nano::endpoint const & peer_a) { auto hash (block->hash ()); nano::publish message (block); std::vector<uint8_t> bytes; { nano::vectorstream stream (bytes); message.serialize (stream); } republish (hash, std::make_shared<std::vector<uint8_t>> (bytes), peer_a); if (node.config.logging.network_logging ()) { BOOST_LOG (node.log) << boost::str (boost::format ("Block %1% was republished to peer") % hash.to_string ()); } } void nano::network::republish_block_batch (std::deque<std::shared_ptr<nano::block>> blocks_a, unsigned delay_a) { auto block (blocks_a.front ()); blocks_a.pop_front (); republish_block (block); if (!blocks_a.empty ()) { std::weak_ptr<nano::node> node_w (node.shared ()); node.alarm.add (std::chrono::steady_clock::now () + std::chrono::milliseconds (delay_a + std::rand () % delay_a), [node_w, blocks_a, delay_a]() { if (auto node_l = node_w.lock ()) { node_l->network.republish_block_batch (blocks_a, delay_a); } }); } } // In order to rate limit network traffic we republish: // 1) Only if they are a non-replay vote of a block that's actively settling. Settling blocks are limited by block PoW // 2) The rep has a weight > Y to prevent creating a lot of small-weight accounts to send out votes // 3) Only if a vote for this block from this representative hasn't been received in the previous X second. // This prevents rapid publishing of votes with increasing sequence numbers. // // These rules are implemented by the caller, not this function. void nano::network::republish_vote (std::shared_ptr<nano::vote> vote_a) { nano::confirm_ack confirm (vote_a); auto bytes = confirm.to_bytes (); auto list (node.peers.list_fanout ()); for (auto j (list.begin ()), m (list.end ()); j != m; ++j) { node.network.confirm_send (confirm, bytes, *j); } } void nano::network::broadcast_confirm_req (std::shared_ptr<nano::block> block_a) { auto list (std::make_shared<std::vector<nano::peer_information>> (node.peers.representatives (std::numeric_limits<size_t>::max ()))); if (list->empty () || node.peers.total_weight () < node.config.online_weight_minimum.number ()) { // broadcast request to all peers (with max limit 2 * sqrt (peers count)) list = std::make_shared<std::vector<nano::peer_information>> (node.peers.list_vector (std::min (static_cast<size_t> (100), 2 * node.peers.size_sqrt ()))); } /* * In either case (broadcasting to all representatives, or broadcasting to * all peers because there are not enough connected representatives), * limit each instance to a single random up-to-32 selection. The invoker * of "broadcast_confirm_req" will be responsible for calling it again * if the votes for a block have not arrived in time. */ const size_t max_endpoints = 32; random_pool::shuffle (list->begin (), list->end ()); if (list->size () > max_endpoints) { list->erase (list->begin () + max_endpoints, list->end ()); } broadcast_confirm_req_base (block_a, list, 0); } void nano::network::broadcast_confirm_req_base (std::shared_ptr<nano::block> block_a, std::shared_ptr<std::vector<nano::peer_information>> endpoints_a, unsigned delay_a, bool resumption) { const size_t max_reps = 10; if (!resumption && node.config.logging.network_logging ()) { BOOST_LOG (node.log) << boost::str (boost::format ("Broadcasting confirm req for block %1% to %2% representatives") % block_a->hash ().to_string () % endpoints_a->size ()); } auto count (0); while (!endpoints_a->empty () && count < max_reps) { send_confirm_req (endpoints_a->back ().endpoint, block_a); endpoints_a->pop_back (); count++; } if (!endpoints_a->empty ()) { delay_a += std::rand () % broadcast_interval_ms; std::weak_ptr<nano::node> node_w (node.shared ()); node.alarm.add (std::chrono::steady_clock::now () + std::chrono::milliseconds (delay_a), [node_w, block_a, endpoints_a, delay_a]() { if (auto node_l = node_w.lock ()) { node_l->network.broadcast_confirm_req_base (block_a, endpoints_a, delay_a, true); } }); } } void nano::network::broadcast_confirm_req_batch (std::unordered_map<nano::endpoint, std::vector<std::pair<nano::block_hash, nano::block_hash>>> request_bundle_a, unsigned delay_a, bool resumption) { const size_t max_reps = 10; if (!resumption && node.config.logging.network_logging ()) { BOOST_LOG (node.log) << boost::str (boost::format ("Broadcasting batch confirm req to %1% representatives") % request_bundle_a.size ()); } auto count (0); while (!request_bundle_a.empty () && count < max_reps) { auto j (request_bundle_a.begin ()); count++; std::vector<std::pair<nano::block_hash, nano::block_hash>> roots_hashes; // Limit max request size hash + root to 6 pairs while (roots_hashes.size () <= confirm_req_hashes_max && !j->second.empty ()) { roots_hashes.push_back (j->second.back ()); j->second.pop_back (); } send_confirm_req_hashes (j->first, roots_hashes); if (j->second.empty ()) { request_bundle_a.erase (j); } } if (!request_bundle_a.empty ()) { std::weak_ptr<nano::node> node_w (node.shared ()); node.alarm.add (std::chrono::steady_clock::now () + std::chrono::milliseconds (delay_a), [node_w, request_bundle_a, delay_a]() { if (auto node_l = node_w.lock ()) { node_l->network.broadcast_confirm_req_batch (request_bundle_a, delay_a + 50, true); } }); } } void nano::network::broadcast_confirm_req_batch (std::deque<std::pair<std::shared_ptr<nano::block>, std::shared_ptr<std::vector<nano::peer_information>>>> deque_a, unsigned delay_a) { auto pair (deque_a.front ()); deque_a.pop_front (); auto block (pair.first); // confirm_req to representatives auto endpoints (pair.second); if (!endpoints->empty ()) { broadcast_confirm_req_base (block, endpoints, delay_a); } /* Continue while blocks remain Broadcast with random delay between delay_a & 2*delay_a */ if (!deque_a.empty ()) { std::weak_ptr<nano::node> node_w (node.shared ()); node.alarm.add (std::chrono::steady_clock::now () + std::chrono::milliseconds (delay_a + std::rand () % delay_a), [node_w, deque_a, delay_a]() { if (auto node_l = node_w.lock ()) { node_l->network.broadcast_confirm_req_batch (deque_a, delay_a); } }); } } void nano::network::send_confirm_req (nano::endpoint const & endpoint_a, std::shared_ptr<nano::block> block) { nano::confirm_req message (block); auto bytes = message.to_bytes (); if (node.config.logging.network_message_logging ()) { BOOST_LOG (node.log) << boost::str (boost::format ("Sending confirm req to %1%") % endpoint_a); } std::weak_ptr<nano::node> node_w (node.shared ()); node.stats.inc (nano::stat::type::message, nano::stat::detail::confirm_req, nano::stat::dir::out); send_buffer (bytes->data (), bytes->size (), endpoint_a, [bytes, node_w](boost::system::error_code const & ec, size_t size) { if (auto node_l = node_w.lock ()) { if (ec && node_l->config.logging.network_logging ()) { BOOST_LOG (node_l->log) << boost::str (boost::format ("Error sending confirm request: %1%") % ec.message ()); } } }); } void nano::network::send_confirm_req_hashes (nano::endpoint const & endpoint_a, std::vector<std::pair<nano::block_hash, nano::block_hash>> const & roots_hashes_a) { nano::confirm_req message (roots_hashes_a); std::vector<uint8_t> bytes; { nano::vectorstream stream (bytes); message.serialize (stream); } if (node.config.logging.network_message_logging ()) { BOOST_LOG (node.log) << boost::str (boost::format ("Sending confirm req hashes to %1%") % endpoint_a); } std::weak_ptr<nano::node> node_w (node.shared ()); node.stats.inc (nano::stat::type::message, nano::stat::detail::confirm_req, nano::stat::dir::out); send_buffer (bytes.data (), bytes.size (), endpoint_a, [node_w](boost::system::error_code const & ec, size_t size) { if (auto node_l = node_w.lock ()) { if (ec && node_l->config.logging.network_logging ()) { BOOST_LOG (node_l->log) << boost::str (boost::format ("Error sending confirm request: %1%") % ec.message ()); } } }); } template <typename T> void rep_query (nano::node & node_a, T const & peers_a) { auto transaction (node_a.store.tx_begin_read ()); std::shared_ptr<nano::block> block (node_a.store.block_random (transaction)); auto hash (block->hash ()); node_a.rep_crawler.add (hash); for (auto i (peers_a.begin ()), n (peers_a.end ()); i != n; ++i) { node_a.peers.rep_request (*i); node_a.network.send_confirm_req (*i, block); } std::weak_ptr<nano::node> node_w (node_a.shared ()); node_a.alarm.add (std::chrono::steady_clock::now () + std::chrono::seconds (5), [node_w, hash]() { if (auto node_l = node_w.lock ()) { node_l->rep_crawler.remove (hash); } }); } void rep_query (nano::node & node_a, nano::endpoint const & peers_a) { std::array<nano::endpoint, 1> peers; peers[0] = peers_a; rep_query (node_a, peers); } namespace { class network_message_visitor : public nano::message_visitor { public: network_message_visitor (nano::node & node_a, nano::endpoint const & sender_a) : node (node_a), sender (sender_a) { } virtual ~network_message_visitor () = default; void keepalive (nano::keepalive const & message_a) override { if (node.config.logging.network_keepalive_logging ()) { BOOST_LOG (node.log) << boost::str (boost::format ("Received keepalive message from %1%") % sender); } node.stats.inc (nano::stat::type::message, nano::stat::detail::keepalive, nano::stat::dir::in); if (node.peers.contacted (sender, message_a.header.version_using)) { auto endpoint_l (nano::map_endpoint_to_v6 (sender)); auto cookie (node.peers.assign_syn_cookie (endpoint_l)); if (cookie) { node.network.send_node_id_handshake (endpoint_l, *cookie, boost::none); } } node.network.merge_peers (message_a.peers); } void publish (nano::publish const & message_a) override { if (node.config.logging.network_message_logging ()) { BOOST_LOG (node.log) << boost::str (boost::format ("Publish message from %1% for %2%") % sender % message_a.block->hash ().to_string ()); } node.stats.inc (nano::stat::type::message, nano::stat::detail::publish, nano::stat::dir::in); node.peers.contacted (sender, message_a.header.version_using); if (!node.block_processor.full ()) { node.process_active (message_a.block); } node.active.publish (message_a.block); } void confirm_req (nano::confirm_req const & message_a) override { if (node.config.logging.network_message_logging ()) { if (!message_a.roots_hashes.empty ()) { BOOST_LOG (node.log) << boost::str (boost::format ("Confirm_req message from %1% for hashes:roots %2%") % sender % message_a.roots_string ()); } else { BOOST_LOG (node.log) << boost::str (boost::format ("Confirm_req message from %1% for %2%") % sender % message_a.block->hash ().to_string ()); } } node.stats.inc (nano::stat::type::message, nano::stat::detail::confirm_req, nano::stat::dir::in); node.peers.contacted (sender, message_a.header.version_using); // Don't load nodes with disabled voting if (node.config.enable_voting && node.wallets.reps_count) { if (message_a.block != nullptr) { auto hash (message_a.block->hash ()); if (!node.network.send_votes_cache (hash, sender)) { auto transaction (node.store.tx_begin_read ()); auto successor (node.ledger.successor (transaction, nano::uint512_union (message_a.block->previous (), message_a.block->root ()))); if (successor != nullptr) { auto same_block (successor->hash () == hash); confirm_block (transaction, node, sender, std::move (successor), !same_block); } } } else if (!message_a.roots_hashes.empty ()) { auto transaction (node.store.tx_begin_read ()); std::vector<nano::block_hash> blocks_bundle; for (auto & root_hash : message_a.roots_hashes) { if (!node.network.send_votes_cache (root_hash.first, sender) && node.store.block_exists (transaction, root_hash.first)) { blocks_bundle.push_back (root_hash.first); } else { nano::block_hash successor (0); // Search for block root successor = node.store.block_successor (transaction, root_hash.second); // Search for account root if (successor.is_zero () && node.store.account_exists (transaction, root_hash.second)) { nano::account_info info; auto error (node.store.account_get (transaction, root_hash.second, info)); assert (!error); successor = info.open_block; } if (!successor.is_zero ()) { if (!node.network.send_votes_cache (successor, sender)) { blocks_bundle.push_back (successor); } auto successor_block (node.store.block_get (transaction, successor)); assert (successor_block != nullptr); node.network.republish_block (std::move (successor_block), sender); } } } if (!blocks_bundle.empty ()) { node.network.confirm_hashes (transaction, sender, blocks_bundle); } } } } void confirm_ack (nano::confirm_ack const & message_a) override { if (node.config.logging.network_message_logging ()) { BOOST_LOG (node.log) << boost::str (boost::format ("Received confirm_ack message from %1% for %2%sequence %3%") % sender % message_a.vote->hashes_string () % std::to_string (message_a.vote->sequence)); } node.stats.inc (nano::stat::type::message, nano::stat::detail::confirm_ack, nano::stat::dir::in); node.peers.contacted (sender, message_a.header.version_using); for (auto & vote_block : message_a.vote->blocks) { if (!vote_block.which ()) { auto block (boost::get<std::shared_ptr<nano::block>> (vote_block)); if (!node.block_processor.full ()) { node.process_active (block); } node.active.publish (block); } } node.vote_processor.vote (message_a.vote, sender); } void bulk_pull (nano::bulk_pull const &) override { assert (false); } void bulk_pull_account (nano::bulk_pull_account const &) override { assert (false); } void bulk_push (nano::bulk_push const &) override { assert (false); } void frontier_req (nano::frontier_req const &) override { assert (false); } void node_id_handshake (nano::node_id_handshake const & message_a) override { if (node.config.logging.network_node_id_handshake_logging ()) { BOOST_LOG (node.log) << boost::str (boost::format ("Received node_id_handshake message from %1% with query %2% and response account %3%") % sender % (message_a.query ? message_a.query->to_string () : std::string ("[none]")) % (message_a.response ? message_a.response->first.to_account () : std::string ("[none]"))); } auto endpoint_l (nano::map_endpoint_to_v6 (sender)); boost::optional<nano::uint256_union> out_query; boost::optional<nano::uint256_union> out_respond_to; if (message_a.query) { out_respond_to = message_a.query; } auto validated_response (false); if (message_a.response) { if (!node.peers.validate_syn_cookie (endpoint_l, message_a.response->first, message_a.response->second)) { validated_response = true; if (message_a.response->first != node.node_id.pub) { node.peers.insert (endpoint_l, message_a.header.version_using, false, message_a.response->first); } } else if (node.config.logging.network_node_id_handshake_logging ()) { BOOST_LOG (node.log) << boost::str (boost::format ("Failed to validate syn cookie signature %1% by %2%") % message_a.response->second.to_string () % message_a.response->first.to_account ()); } } if (!validated_response && !node.peers.known_peer (endpoint_l)) { out_query = node.peers.assign_syn_cookie (endpoint_l); } if (out_query || out_respond_to) { node.network.send_node_id_handshake (sender, out_query, out_respond_to); } node.stats.inc (nano::stat::type::message, nano::stat::detail::node_id_handshake, nano::stat::dir::in); } nano::node & node; nano::endpoint sender; }; } void nano::network::receive_action (nano::udp_data * data_a, nano::endpoint const & local_endpoint_a) { auto allowed_sender (true); if (!on) { allowed_sender = false; } else if (data_a->endpoint == local_endpoint_a) { allowed_sender = false; } else if (nano::reserved_address (data_a->endpoint, false) && !node.config.allow_local_peers) { allowed_sender = false; } if (allowed_sender) { network_message_visitor visitor (node, data_a->endpoint); nano::message_parser parser (node.block_uniquer, node.vote_uniquer, visitor, node.work); parser.deserialize_buffer (data_a->buffer, data_a->size); if (parser.status != nano::message_parser::parse_status::success) { node.stats.inc (nano::stat::type::error); switch (parser.status) { case nano::message_parser::parse_status::insufficient_work: // We've already increment error count, update detail only node.stats.inc_detail_only (nano::stat::type::error, nano::stat::detail::insufficient_work); break; case nano::message_parser::parse_status::invalid_magic: node.stats.inc (nano::stat::type::udp, nano::stat::detail::invalid_magic); break; case nano::message_parser::parse_status::invalid_network: node.stats.inc (nano::stat::type::udp, nano::stat::detail::invalid_network); break; case nano::message_parser::parse_status::invalid_header: node.stats.inc (nano::stat::type::udp, nano::stat::detail::invalid_header); break; case nano::message_parser::parse_status::invalid_message_type: node.stats.inc (nano::stat::type::udp, nano::stat::detail::invalid_message_type); break; case nano::message_parser::parse_status::invalid_keepalive_message: node.stats.inc (nano::stat::type::udp, nano::stat::detail::invalid_keepalive_message); break; case nano::message_parser::parse_status::invalid_publish_message: node.stats.inc (nano::stat::type::udp, nano::stat::detail::invalid_publish_message); break; case nano::message_parser::parse_status::invalid_confirm_req_message: node.stats.inc (nano::stat::type::udp, nano::stat::detail::invalid_confirm_req_message); break; case nano::message_parser::parse_status::invalid_confirm_ack_message: node.stats.inc (nano::stat::type::udp, nano::stat::detail::invalid_confirm_ack_message); break; case nano::message_parser::parse_status::invalid_node_id_handshake_message: node.stats.inc (nano::stat::type::udp, nano::stat::detail::invalid_node_id_handshake_message); break; case nano::message_parser::parse_status::outdated_version: node.stats.inc (nano::stat::type::udp, nano::stat::detail::outdated_version); break; case nano::message_parser::parse_status::success: /* Already checked, unreachable */ break; } if (node.config.logging.network_logging () && parser.status != nano::message_parser::parse_status::outdated_version) { BOOST_LOG (node.log) << "Could not parse message. Error: " << parser.status_string (); } } else { node.stats.add (nano::stat::type::traffic, nano::stat::dir::in, data_a->size); } } else { if (node.config.logging.network_logging ()) { BOOST_LOG (node.log) << boost::str (boost::format ("Reserved sender %1%") % data_a->endpoint.address ().to_string ()); } node.stats.inc_detail_only (nano::stat::type::error, nano::stat::detail::bad_sender); } } // Send keepalives to all the peers we've been notified of void nano::network::merge_peers (std::array<nano::endpoint, 8> const & peers_a) { for (auto i (peers_a.begin ()), j (peers_a.end ()); i != j; ++i) { if (!node.peers.reachout (*i)) { send_keepalive (*i); } } } bool nano::operation::operator> (nano::operation const & other_a) const { return wakeup > other_a.wakeup; } nano::alarm::alarm (boost::asio::io_context & io_ctx_a) : io_ctx (io_ctx_a), thread ([this]() { nano::thread_role::set (nano::thread_role::name::alarm); run (); }) { } nano::alarm::~alarm () { add (std::chrono::steady_clock::now (), nullptr); thread.join (); } void nano::alarm::run () { std::unique_lock<std::mutex> lock (mutex); auto done (false); while (!done) { if (!operations.empty ()) { auto & operation (operations.top ()); if (operation.function) { if (operation.wakeup <= std::chrono::steady_clock::now ()) { io_ctx.post (operation.function); operations.pop (); } else { auto wakeup (operation.wakeup); condition.wait_until (lock, wakeup); } } else { done = true; } } else { condition.wait (lock); } } } void nano::alarm::add (std::chrono::steady_clock::time_point const & wakeup_a, std::function<void()> const & operation) { { std::lock_guard<std::mutex> lock (mutex); operations.push (nano::operation ({ wakeup_a, operation })); } condition.notify_all (); } namespace nano { std::unique_ptr<seq_con_info_component> collect_seq_con_info (alarm & alarm, const std::string & name) { auto composite = std::make_unique<seq_con_info_composite> (name); size_t count = 0; { std::lock_guard<std::mutex> guard (alarm.mutex); count = alarm.operations.size (); } auto sizeof_element = sizeof (decltype (alarm.operations)::value_type); composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "operations", count, sizeof_element })); return composite; } } nano::node_init::node_init () : block_store_init (false), wallet_init (false) { } bool nano::node_init::error () { return block_store_init || wallet_init || wallets_store_init; } nano::vote_processor::vote_processor (nano::node & node_a) : node (node_a), started (false), stopped (false), active (false), thread ([this]() { nano::thread_role::set (nano::thread_role::name::vote_processing); process_loop (); }) { std::unique_lock<std::mutex> lock (mutex); while (!started) { condition.wait (lock); } } void nano::vote_processor::process_loop () { std::chrono::steady_clock::time_point start_time, end_time; std::chrono::steady_clock::duration elapsed_time; std::chrono::milliseconds elapsed_time_ms; uint64_t elapsed_time_ms_int; bool log_this_iteration; std::unique_lock<std::mutex> lock (mutex); started = true; lock.unlock (); condition.notify_all (); lock.lock (); while (!stopped) { if (!votes.empty ()) { std::deque<std::pair<std::shared_ptr<nano::vote>, nano::endpoint>> votes_l; votes_l.swap (votes); log_this_iteration = false; if (node.config.logging.network_logging () && votes_l.size () > 50) { /* * Only log the timing information for this iteration if * there are a sufficient number of items for it to be relevant */ log_this_iteration = true; start_time = std::chrono::steady_clock::now (); } active = true; lock.unlock (); verify_votes (votes_l); { std::unique_lock<std::mutex> active_single_lock (node.active.mutex); auto transaction (node.store.tx_begin_read ()); uint64_t count (1); for (auto & i : votes_l) { vote_blocking (transaction, i.first, i.second, true); // Free active_transactions mutex each 100 processed votes if (count % 100 == 0) { active_single_lock.unlock (); active_single_lock.lock (); } count++; } } lock.lock (); active = false; lock.unlock (); condition.notify_all (); lock.lock (); if (log_this_iteration) { end_time = std::chrono::steady_clock::now (); elapsed_time = end_time - start_time; elapsed_time_ms = std::chrono::duration_cast<std::chrono::milliseconds> (elapsed_time); elapsed_time_ms_int = elapsed_time_ms.count (); if (elapsed_time_ms_int >= 100) { /* * If the time spent was less than 100ms then * the results are probably not useful as well, * so don't spam the logs. */ BOOST_LOG (node.log) << boost::str (boost::format ("Processed %1% votes in %2% milliseconds (rate of %3% votes per second)") % votes_l.size () % elapsed_time_ms_int % ((votes_l.size () * 1000ULL) / elapsed_time_ms_int)); } } } else { condition.wait (lock); } } } void nano::vote_processor::vote (std::shared_ptr<nano::vote> vote_a, nano::endpoint endpoint_a) { assert (endpoint_a.address ().is_v6 ()); std::unique_lock<std::mutex> lock (mutex); if (!stopped) { bool process (false); /* Random early delection levels Always process votes for test network (process = true) Stop processing with max 144 * 1024 votes */ if (!nano::is_test_network) { // Level 0 (< 0.1%) if (votes.size () < 96 * 1024) { process = true; } // Level 1 (0.1-1%) else if (votes.size () < 112 * 1024) { process = (representatives_1.find (vote_a->account) != representatives_1.end ()); } // Level 2 (1-5%) else if (votes.size () < 128 * 1024) { process = (representatives_2.find (vote_a->account) != representatives_2.end ()); } // Level 3 (> 5%) else if (votes.size () < 144 * 1024) { process = (representatives_3.find (vote_a->account) != representatives_3.end ()); } } else { // Process for test network process = true; } if (process) { votes.push_back (std::make_pair (vote_a, endpoint_a)); lock.unlock (); condition.notify_all (); lock.lock (); } else { node.stats.inc (nano::stat::type::vote, nano::stat::detail::vote_overflow); if (node.config.logging.vote_logging ()) { BOOST_LOG (node.log) << "Votes overflow"; } } } } void nano::vote_processor::verify_votes (std::deque<std::pair<std::shared_ptr<nano::vote>, nano::endpoint>> & votes_a) { auto size (votes_a.size ()); std::vector<unsigned char const *> messages; messages.reserve (size); std::vector<nano::uint256_union> hashes; hashes.reserve (size); std::vector<size_t> lengths (size, sizeof (nano::uint256_union)); std::vector<unsigned char const *> pub_keys; pub_keys.reserve (size); std::vector<unsigned char const *> signatures; signatures.reserve (size); std::vector<int> verifications; verifications.resize (size); for (auto & vote : votes_a) { hashes.push_back (vote.first->hash ()); messages.push_back (hashes.back ().bytes.data ()); pub_keys.push_back (vote.first->account.bytes.data ()); signatures.push_back (vote.first->signature.bytes.data ()); } nano::signature_check_set check = { size, messages.data (), lengths.data (), pub_keys.data (), signatures.data (), verifications.data () }; node.checker.verify (check); std::remove_reference_t<decltype (votes_a)> result; auto i (0); for (auto & vote : votes_a) { assert (verifications[i] == 1 || verifications[i] == 0); if (verifications[i] == 1) { result.push_back (vote); } ++i; } votes_a.swap (result); } // node.active.mutex lock required nano::vote_code nano::vote_processor::vote_blocking (nano::transaction const & transaction_a, std::shared_ptr<nano::vote> vote_a, nano::endpoint endpoint_a, bool validated) { assert (endpoint_a.address ().is_v6 ()); assert (!node.active.mutex.try_lock ()); auto result (nano::vote_code::invalid); if (validated || !vote_a->validate ()) { auto max_vote (node.store.vote_max (transaction_a, vote_a)); result = nano::vote_code::replay; if (!node.active.vote (vote_a, true)) { result = nano::vote_code::vote; } switch (result) { case nano::vote_code::vote: node.observers.vote.notify (transaction_a, vote_a, endpoint_a); case nano::vote_code::replay: // This tries to assist rep nodes that have lost track of their highest sequence number by replaying our highest known vote back to them // Only do this if the sequence number is significantly different to account for network reordering // Amplify attack considerations: We're sending out a confirm_ack in response to a confirm_ack for no net traffic increase if (max_vote->sequence > vote_a->sequence + 10000) { nano::confirm_ack confirm (max_vote); node.network.confirm_send (confirm, confirm.to_bytes (), endpoint_a); } break; case nano::vote_code::invalid: assert (false); break; } } std::string status; switch (result) { case nano::vote_code::invalid: status = "Invalid"; node.stats.inc (nano::stat::type::vote, nano::stat::detail::vote_invalid); break; case nano::vote_code::replay: status = "Replay"; node.stats.inc (nano::stat::type::vote, nano::stat::detail::vote_replay); break; case nano::vote_code::vote: status = "Vote"; node.stats.inc (nano::stat::type::vote, nano::stat::detail::vote_valid); break; } if (node.config.logging.vote_logging ()) { BOOST_LOG (node.log) << boost::str (boost::format ("Vote from: %1% sequence: %2% block(s): %3%status: %4%") % vote_a->account.to_account () % std::to_string (vote_a->sequence) % vote_a->hashes_string () % status); } return result; } void nano::vote_processor::stop () { { std::lock_guard<std::mutex> lock (mutex); stopped = true; } condition.notify_all (); if (thread.joinable ()) { thread.join (); } } void nano::vote_processor::flush () { std::unique_lock<std::mutex> lock (mutex); while (active || !votes.empty ()) { condition.wait (lock); } } void nano::vote_processor::calculate_weights () { std::unique_lock<std::mutex> lock (mutex); if (!stopped) { representatives_1.clear (); representatives_2.clear (); representatives_3.clear (); auto supply (node.online_reps.online_stake ()); auto transaction (node.store.tx_begin_read ()); for (auto i (node.store.representation_begin (transaction)), n (node.store.representation_end ()); i != n; ++i) { nano::account representative (i->first); auto weight (node.ledger.weight (transaction, representative)); if (weight > supply / 1000) // 0.1% or above (level 1) { representatives_1.insert (representative); if (weight > supply / 100) // 1% or above (level 2) { representatives_2.insert (representative); if (weight > supply / 20) // 5% or above (level 3) { representatives_3.insert (representative); } } } } } } namespace nano { std::unique_ptr<seq_con_info_component> collect_seq_con_info (node_observers & node_observers, const std::string & name) { auto composite = std::make_unique<seq_con_info_composite> (name); composite->add_component (collect_seq_con_info (node_observers.blocks, "blocks")); composite->add_component (collect_seq_con_info (node_observers.wallet, "wallet")); composite->add_component (collect_seq_con_info (node_observers.vote, "vote")); composite->add_component (collect_seq_con_info (node_observers.account_balance, "account_balance")); composite->add_component (collect_seq_con_info (node_observers.endpoint, "endpoint")); composite->add_component (collect_seq_con_info (node_observers.disconnect, "disconnect")); return composite; } std::unique_ptr<seq_con_info_component> collect_seq_con_info (vote_processor & vote_processor, const std::string & name) { size_t votes_count = 0; size_t representatives_1_count = 0; size_t representatives_2_count = 0; size_t representatives_3_count = 0; { std::lock_guard<std::mutex> (vote_processor.mutex); votes_count = vote_processor.votes.size (); representatives_1_count = vote_processor.representatives_1.size (); representatives_2_count = vote_processor.representatives_2.size (); representatives_3_count = vote_processor.representatives_3.size (); } auto composite = std::make_unique<seq_con_info_composite> (name); composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "votes", votes_count, sizeof (decltype (vote_processor.votes)::value_type) })); composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "representatives_1", representatives_1_count, sizeof (decltype (vote_processor.representatives_1)::value_type) })); composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "representatives_2", representatives_2_count, sizeof (decltype (vote_processor.representatives_2)::value_type) })); composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "representatives_3", representatives_3_count, sizeof (decltype (vote_processor.representatives_3)::value_type) })); return composite; } } void nano::rep_crawler::add (nano::block_hash const & hash_a) { std::lock_guard<std::mutex> lock (mutex); active.insert (hash_a); } void nano::rep_crawler::remove (nano::block_hash const & hash_a) { std::lock_guard<std::mutex> lock (mutex); active.erase (hash_a); } bool nano::rep_crawler::exists (nano::block_hash const & hash_a) { std::lock_guard<std::mutex> lock (mutex); return active.count (hash_a) != 0; } namespace nano { std::unique_ptr<seq_con_info_component> collect_seq_con_info (rep_crawler & rep_crawler, const std::string & name) { size_t count = 0; { std::lock_guard<std::mutex> guard (rep_crawler.mutex); count = rep_crawler.active.size (); } auto sizeof_element = sizeof (decltype (rep_crawler.active)::value_type); auto composite = std::make_unique<seq_con_info_composite> (name); composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "active", count, sizeof_element })); return composite; } } namespace nano { std::unique_ptr<seq_con_info_component> collect_seq_con_info (block_processor & block_processor, const std::string & name) { size_t state_blocks_count = 0; size_t blocks_count = 0; size_t blocks_hashes_count = 0; size_t forced_count = 0; size_t rolled_back_count = 0; { std::lock_guard<std::mutex> guard (block_processor.mutex); state_blocks_count = block_processor.state_blocks.size (); blocks_count = block_processor.blocks.size (); blocks_hashes_count = block_processor.blocks_hashes.size (); forced_count = block_processor.forced.size (); rolled_back_count = block_processor.rolled_back.size (); } auto composite = std::make_unique<seq_con_info_composite> (name); composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "state_blocks", state_blocks_count, sizeof (decltype (block_processor.state_blocks)::value_type) })); composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "blocks", blocks_count, sizeof (decltype (block_processor.blocks)::value_type) })); composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "blocks_hashes", blocks_hashes_count, sizeof (decltype (block_processor.blocks_hashes)::value_type) })); composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "forced", forced_count, sizeof (decltype (block_processor.forced)::value_type) })); composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "rolled_back", rolled_back_count, sizeof (decltype (block_processor.rolled_back)::value_type) })); composite->add_component (collect_seq_con_info (block_processor.generator, "generator")); return composite; } } nano::node::node (nano::node_init & init_a, boost::asio::io_context & io_ctx_a, uint16_t peering_port_a, boost::filesystem::path const & application_path_a, nano::alarm & alarm_a, nano::logging const & logging_a, nano::work_pool & work_a) : node (init_a, io_ctx_a, application_path_a, alarm_a, nano::node_config (peering_port_a, logging_a), work_a) { } nano::node::node (nano::node_init & init_a, boost::asio::io_context & io_ctx_a, boost::filesystem::path const & application_path_a, nano::alarm & alarm_a, nano::node_config const & config_a, nano::work_pool & work_a, nano::node_flags flags_a) : io_ctx (io_ctx_a), config (config_a), flags (flags_a), alarm (alarm_a), work (work_a), store_impl (std::make_unique<nano::mdb_store> (init_a.block_store_init, config.logging, application_path_a / "data.ldb", config_a.lmdb_max_dbs, !flags.disable_unchecked_drop, flags.sideband_batch_size)), store (*store_impl), wallets_store_impl (std::make_unique<nano::mdb_wallets_store> (init_a.wallets_store_init, application_path_a / "wallets.ldb", config_a.lmdb_max_dbs)), wallets_store (*wallets_store_impl), gap_cache (*this), ledger (store, stats, config.epoch_block_link, config.epoch_block_signer), active (*this), network (*this, config.peering_port), bootstrap_initiator (*this), bootstrap (io_ctx_a, config.peering_port, *this), peers (network.endpoint ()), application_path (application_path_a), wallets (init_a.wallet_init, *this), port_mapping (*this), checker (config.signature_checker_threads), vote_processor (*this), warmed_up (0), block_processor (*this), block_processor_thread ([this]() { nano::thread_role::set (nano::thread_role::name::block_processing); this->block_processor.process_blocks (); }), online_reps (ledger, config.online_weight_minimum.number ()), stats (config.stat_config), vote_uniquer (block_uniquer), startup_time (std::chrono::steady_clock::now ()) { wallets.observer = [this](bool active) { observers.wallet.notify (active); }; peers.peer_observer = [this](nano::endpoint const & endpoint_a) { observers.endpoint.notify (endpoint_a); }; peers.disconnect_observer = [this]() { observers.disconnect.notify (); }; if (!config.callback_address.empty ()) { observers.blocks.add ([this](std::shared_ptr<nano::block> block_a, nano::account const & account_a, nano::amount const & amount_a, bool is_state_send_a) { if (this->block_arrival.recent (block_a->hash ())) { auto node_l (shared_from_this ()); background ([node_l, block_a, account_a, amount_a, is_state_send_a]() { boost::property_tree::ptree event; event.add ("account", account_a.to_account ()); event.add ("hash", block_a->hash ().to_string ()); std::string block_text; block_a->serialize_json (block_text); event.add ("block", block_text); event.add ("amount", amount_a.to_string_dec ()); if (is_state_send_a) { event.add ("is_send", is_state_send_a); } std::stringstream ostream; boost::property_tree::write_json (ostream, event); ostream.flush (); auto body (std::make_shared<std::string> (ostream.str ())); auto address (node_l->config.callback_address); auto port (node_l->config.callback_port); auto target (std::make_shared<std::string> (node_l->config.callback_target)); auto resolver (std::make_shared<boost::asio::ip::tcp::resolver> (node_l->io_ctx)); resolver->async_resolve (boost::asio::ip::tcp::resolver::query (address, std::to_string (port)), [node_l, address, port, target, body, resolver](boost::system::error_code const & ec, boost::asio::ip::tcp::resolver::iterator i_a) { if (!ec) { node_l->do_rpc_callback (i_a, address, port, target, body, resolver); } else { if (node_l->config.logging.callback_logging ()) { BOOST_LOG (node_l->log) << boost::str (boost::format ("Error resolving callback: %1%:%2%: %3%") % address % port % ec.message ()); } node_l->stats.inc (nano::stat::type::error, nano::stat::detail::http_callback, nano::stat::dir::out); } }); }); } }); } observers.endpoint.add ([this](nano::endpoint const & endpoint_a) { this->network.send_keepalive (endpoint_a); rep_query (*this, endpoint_a); }); observers.vote.add ([this](nano::transaction const & transaction, std::shared_ptr<nano::vote> vote_a, nano::endpoint const & endpoint_a) { assert (endpoint_a.address ().is_v6 ()); this->gap_cache.vote (vote_a); this->online_reps.observe (vote_a->account); nano::uint128_t rep_weight; nano::uint128_t min_rep_weight; { rep_weight = ledger.weight (transaction, vote_a->account); min_rep_weight = online_reps.online_stake () / 1000; } if (rep_weight > min_rep_weight) { bool rep_crawler_exists (false); for (auto hash : *vote_a) { if (this->rep_crawler.exists (hash)) { rep_crawler_exists = true; break; } } if (rep_crawler_exists) { // We see a valid non-replay vote for a block we requested, this node is probably a representative if (this->peers.rep_response (endpoint_a, vote_a->account, rep_weight)) { BOOST_LOG (log) << boost::str (boost::format ("Found a representative at %1%") % endpoint_a); // Rebroadcasting all active votes to new representative auto blocks (this->active.list_blocks (true)); for (auto i (blocks.begin ()), n (blocks.end ()); i != n; ++i) { if (*i != nullptr) { this->network.send_confirm_req (endpoint_a, *i); } } } } } }); if (NANO_VERSION_PATCH == 0) { BOOST_LOG (log) << "Node starting, version: " << NANO_MAJOR_MINOR_VERSION; } else { BOOST_LOG (log) << "Node starting, version: " << NANO_MAJOR_MINOR_RC_VERSION; } BOOST_LOG (log) << boost::str (boost::format ("Work pool running %1% threads") % work.threads.size ()); if (!init_a.error ()) { if (config.logging.node_lifetime_tracing ()) { BOOST_LOG (log) << "Constructing node"; } nano::genesis genesis; auto transaction (store.tx_begin_write ()); if (store.latest_begin (transaction) == store.latest_end ()) { // Store was empty meaning we just created it, add the genesis block store.initialize (transaction, genesis); } if (!store.block_exists (transaction, genesis.hash ())) { BOOST_LOG (log) << "Genesis block not found. Make sure the node network ID is correct."; std::exit (1); } node_id = nano::keypair (store.get_node_id (transaction)); BOOST_LOG (log) << "Node ID: " << node_id.pub.to_account (); } peers.online_weight_minimum = config.online_weight_minimum.number (); if (nano::is_live_network || nano::is_beta_network) { nano::bufferstream weight_stream ((const uint8_t *)nano_bootstrap_weights, nano_bootstrap_weights_size); nano::uint128_union block_height; if (!nano::try_read (weight_stream, block_height)) { auto max_blocks = (uint64_t)block_height.number (); auto transaction (store.tx_begin_read ()); if (ledger.store.block_count (transaction).sum () < max_blocks) { ledger.bootstrap_weight_max_blocks = max_blocks; while (true) { nano::account account; if (nano::try_read (weight_stream, account.bytes)) { break; } nano::amount weight; if (nano::try_read (weight_stream, weight.bytes)) { break; } BOOST_LOG (log) << "Using bootstrap rep weight: " << account.to_account () << " -> " << weight.format_balance (Mxrb_ratio, 0, true) << " XRB"; ledger.bootstrap_weights[account] = weight.number (); } } } } } nano::node::~node () { if (config.logging.node_lifetime_tracing ()) { BOOST_LOG (log) << "Destructing node"; } stop (); } void nano::node::do_rpc_callback (boost::asio::ip::tcp::resolver::iterator i_a, std::string const & address, uint16_t port, std::shared_ptr<std::string> target, std::shared_ptr<std::string> body, std::shared_ptr<boost::asio::ip::tcp::resolver> resolver) { if (i_a != boost::asio::ip::tcp::resolver::iterator{}) { auto node_l (shared_from_this ()); auto sock (std::make_shared<boost::asio::ip::tcp::socket> (node_l->io_ctx)); sock->async_connect (i_a->endpoint (), [node_l, target, body, sock, address, port, i_a, resolver](boost::system::error_code const & ec) mutable { if (!ec) { auto req (std::make_shared<boost::beast::http::request<boost::beast::http::string_body>> ()); req->method (boost::beast::http::verb::post); req->target (*target); req->version (11); req->insert (boost::beast::http::field::host, address); req->insert (boost::beast::http::field::content_type, "application/json"); req->body () = *body; req->prepare_payload (); boost::beast::http::async_write (*sock, *req, [node_l, sock, address, port, req, i_a, target, body, resolver](boost::system::error_code const & ec, size_t bytes_transferred) mutable { if (!ec) { auto sb (std::make_shared<boost::beast::flat_buffer> ()); auto resp (std::make_shared<boost::beast::http::response<boost::beast::http::string_body>> ()); boost::beast::http::async_read (*sock, *sb, *resp, [node_l, sb, resp, sock, address, port, i_a, target, body, resolver](boost::system::error_code const & ec, size_t bytes_transferred) mutable { if (!ec) { if (resp->result () == boost::beast::http::status::ok) { node_l->stats.inc (nano::stat::type::http_callback, nano::stat::detail::initiate, nano::stat::dir::out); } else { if (node_l->config.logging.callback_logging ()) { BOOST_LOG (node_l->log) << boost::str (boost::format ("Callback to %1%:%2% failed with status: %3%") % address % port % resp->result ()); } node_l->stats.inc (nano::stat::type::error, nano::stat::detail::http_callback, nano::stat::dir::out); } } else { if (node_l->config.logging.callback_logging ()) { BOOST_LOG (node_l->log) << boost::str (boost::format ("Unable complete callback: %1%:%2%: %3%") % address % port % ec.message ()); } node_l->stats.inc (nano::stat::type::error, nano::stat::detail::http_callback, nano::stat::dir::out); }; }); } else { if (node_l->config.logging.callback_logging ()) { BOOST_LOG (node_l->log) << boost::str (boost::format ("Unable to send callback: %1%:%2%: %3%") % address % port % ec.message ()); } node_l->stats.inc (nano::stat::type::error, nano::stat::detail::http_callback, nano::stat::dir::out); } }); } else { if (node_l->config.logging.callback_logging ()) { BOOST_LOG (node_l->log) << boost::str (boost::format ("Unable to connect to callback address: %1%:%2%: %3%") % address % port % ec.message ()); } node_l->stats.inc (nano::stat::type::error, nano::stat::detail::http_callback, nano::stat::dir::out); ++i_a; node_l->do_rpc_callback (i_a, address, port, target, body, resolver); } }); } } bool nano::node::copy_with_compaction (boost::filesystem::path const & destination_file) { return !mdb_env_copy2 (boost::polymorphic_downcast<nano::mdb_store *> (store_impl.get ())->env.environment, destination_file.string ().c_str (), MDB_CP_COMPACT); } void nano::node::send_keepalive (nano::endpoint const & endpoint_a) { network.send_keepalive (nano::map_endpoint_to_v6 (endpoint_a)); } void nano::node::process_fork (nano::transaction const & transaction_a, std::shared_ptr<nano::block> block_a) { auto root (block_a->root ()); if (!store.block_exists (transaction_a, block_a->type (), block_a->hash ()) && store.root_exists (transaction_a, block_a->root ())) { std::shared_ptr<nano::block> ledger_block (ledger.forked_block (transaction_a, *block_a)); if (ledger_block) { std::weak_ptr<nano::node> this_w (shared_from_this ()); if (!active.start (ledger_block, [this_w, root](std::shared_ptr<nano::block>) { if (auto this_l = this_w.lock ()) { auto attempt (this_l->bootstrap_initiator.current_attempt ()); if (attempt && attempt->mode == nano::bootstrap_mode::legacy) { auto transaction (this_l->store.tx_begin_read ()); auto account (this_l->ledger.store.frontier_get (transaction, root)); if (!account.is_zero ()) { attempt->requeue_pull (nano::pull_info (account, root, root)); } else if (this_l->ledger.store.account_exists (transaction, root)) { attempt->requeue_pull (nano::pull_info (root, nano::block_hash (0), nano::block_hash (0))); } } } })) { BOOST_LOG (log) << boost::str (boost::format ("Resolving fork between our block: %1% and block %2% both with root %3%") % ledger_block->hash ().to_string () % block_a->hash ().to_string () % block_a->root ().to_string ()); network.broadcast_confirm_req (ledger_block); } } } } namespace nano { std::unique_ptr<seq_con_info_component> collect_seq_con_info (node & node, const std::string & name) { auto composite = std::make_unique<seq_con_info_composite> (name); composite->add_component (collect_seq_con_info (node.alarm, "alarm")); composite->add_component (collect_seq_con_info (node.work, "work")); composite->add_component (collect_seq_con_info (node.gap_cache, "gap_cache")); composite->add_component (collect_seq_con_info (node.ledger, "ledger")); composite->add_component (collect_seq_con_info (node.active, "active")); composite->add_component (collect_seq_con_info (node.bootstrap_initiator, "bootstrap_initiator")); composite->add_component (collect_seq_con_info (node.bootstrap, "bootstrap")); composite->add_component (collect_seq_con_info (node.peers, "peers")); composite->add_component (collect_seq_con_info (node.observers, "observers")); composite->add_component (collect_seq_con_info (node.wallets, "wallets")); composite->add_component (collect_seq_con_info (node.vote_processor, "vote_processor")); composite->add_component (collect_seq_con_info (node.rep_crawler, "rep_crawler")); composite->add_component (collect_seq_con_info (node.block_processor, "block_processor")); composite->add_component (collect_seq_con_info (node.block_arrival, "block_arrival")); composite->add_component (collect_seq_con_info (node.online_reps, "online_reps")); composite->add_component (collect_seq_con_info (node.votes_cache, "votes_cache")); composite->add_component (collect_seq_con_info (node.block_uniquer, "block_uniquer")); composite->add_component (collect_seq_con_info (node.vote_uniquer, "vote_uniquer")); return composite; } } nano::gap_cache::gap_cache (nano::node & node_a) : node (node_a) { } void nano::gap_cache::add (nano::transaction const & transaction_a, nano::block_hash const & hash_a, std::chrono::steady_clock::time_point time_point_a) { std::lock_guard<std::mutex> lock (mutex); auto existing (blocks.get<1> ().find (hash_a)); if (existing != blocks.get<1> ().end ()) { blocks.get<1> ().modify (existing, [time_point_a](nano::gap_information & info) { info.arrival = time_point_a; }); } else { blocks.insert ({ time_point_a, hash_a, std::unordered_set<nano::account> () }); if (blocks.size () > max) { blocks.get<0> ().erase (blocks.get<0> ().begin ()); } } } void nano::gap_cache::vote (std::shared_ptr<nano::vote> vote_a) { std::lock_guard<std::mutex> lock (mutex); auto transaction (node.store.tx_begin_read ()); for (auto hash : *vote_a) { auto existing (blocks.get<1> ().find (hash)); if (existing != blocks.get<1> ().end ()) { auto is_new (false); blocks.get<1> ().modify (existing, [&](nano::gap_information & info) { is_new = info.voters.insert (vote_a->account).second; }); if (is_new) { uint128_t tally; for (auto & voter : existing->voters) { tally += node.ledger.weight (transaction, voter); } bool start_bootstrap (false); if (!node.flags.disable_lazy_bootstrap) { if (tally >= node.config.online_weight_minimum.number ()) { start_bootstrap = true; } } else if (!node.flags.disable_legacy_bootstrap && tally > bootstrap_threshold (transaction)) { start_bootstrap = true; } if (start_bootstrap) { auto node_l (node.shared ()); auto now (std::chrono::steady_clock::now ()); node.alarm.add (nano::is_test_network ? now + std::chrono::milliseconds (5) : now + std::chrono::seconds (5), [node_l, hash]() { auto transaction (node_l->store.tx_begin_read ()); if (!node_l->store.block_exists (transaction, hash)) { if (!node_l->bootstrap_initiator.in_progress ()) { BOOST_LOG (node_l->log) << boost::str (boost::format ("Missing block %1% which has enough votes to warrant lazy bootstrapping it") % hash.to_string ()); } if (!node_l->flags.disable_lazy_bootstrap) { node_l->bootstrap_initiator.bootstrap_lazy (hash); } else if (!node_l->flags.disable_legacy_bootstrap) { node_l->bootstrap_initiator.bootstrap (); } } }); } } } } } nano::uint128_t nano::gap_cache::bootstrap_threshold (nano::transaction const & transaction_a) { auto result ((node.online_reps.online_stake () / 256) * node.config.bootstrap_fraction_numerator); return result; } size_t nano::gap_cache::size () { std::lock_guard<std::mutex> lock (mutex); return blocks.size (); } namespace nano { std::unique_ptr<seq_con_info_component> collect_seq_con_info (gap_cache & gap_cache, const std::string & name) { auto count = gap_cache.size (); auto sizeof_element = sizeof (decltype (gap_cache.blocks)::value_type); auto composite = std::make_unique<seq_con_info_composite> (name); composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "blocks", count, sizeof_element })); return composite; } } void nano::network::confirm_send (nano::confirm_ack const & confirm_a, std::shared_ptr<std::vector<uint8_t>> bytes_a, nano::endpoint const & endpoint_a) { if (node.config.logging.network_publish_logging ()) { BOOST_LOG (node.log) << boost::str (boost::format ("Sending confirm_ack for block(s) %1%to %2% sequence %3%") % confirm_a.vote->hashes_string () % endpoint_a % std::to_string (confirm_a.vote->sequence)); } std::weak_ptr<nano::node> node_w (node.shared ()); node.network.send_buffer (bytes_a->data (), bytes_a->size (), endpoint_a, [bytes_a, node_w, endpoint_a](boost::system::error_code const & ec, size_t size_a) { if (auto node_l = node_w.lock ()) { if (ec && node_l->config.logging.network_logging ()) { BOOST_LOG (node_l->log) << boost::str (boost::format ("Error broadcasting confirm_ack to %1%: %2%") % endpoint_a % ec.message ()); } else { node_l->stats.inc (nano::stat::type::message, nano::stat::detail::confirm_ack, nano::stat::dir::out); } } }); } void nano::node::process_active (std::shared_ptr<nano::block> incoming) { block_arrival.add (incoming->hash ()); block_processor.add (incoming, nano::seconds_since_epoch ()); } nano::process_return nano::node::process (nano::block const & block_a) { auto transaction (store.tx_begin_write ()); auto result (ledger.process (transaction, block_a)); return result; } void nano::node::start () { network.start (); add_initial_peers (); ongoing_keepalive (); ongoing_syn_cookie_cleanup (); if (!flags.disable_legacy_bootstrap) { ongoing_bootstrap (); } else if (!flags.disable_unchecked_cleanup) { ongoing_unchecked_cleanup (); } ongoing_store_flush (); ongoing_rep_crawl (); ongoing_rep_calculation (); ongoing_peer_store (); ongoing_online_weight_calculation_queue (); if (!flags.disable_bootstrap_listener) { bootstrap.start (); } if (!flags.disable_backup) { backup_wallet (); } search_pending (); if (!flags.disable_wallet_bootstrap) { // Delay to start wallet lazy bootstrap auto this_l (shared ()); alarm.add (std::chrono::steady_clock::now () + std::chrono::minutes (1), [this_l]() { this_l->bootstrap_wallet (); }); } port_mapping.start (); } void nano::node::stop () { BOOST_LOG (log) << "Node stopping"; block_processor.stop (); if (block_processor_thread.joinable ()) { block_processor_thread.join (); } vote_processor.stop (); active.stop (); network.stop (); bootstrap_initiator.stop (); bootstrap.stop (); port_mapping.stop (); checker.stop (); wallets.stop (); } void nano::node::keepalive_preconfigured (std::vector<std::string> const & peers_a) { for (auto i (peers_a.begin ()), n (peers_a.end ()); i != n; ++i) { keepalive (*i, nano::network::node_port, true); } } nano::block_hash nano::node::latest (nano::account const & account_a) { auto transaction (store.tx_begin_read ()); return ledger.latest (transaction, account_a); } nano::uint128_t nano::node::balance (nano::account const & account_a) { auto transaction (store.tx_begin_read ()); return ledger.account_balance (transaction, account_a); } std::shared_ptr<nano::block> nano::node::block (nano::block_hash const & hash_a) { auto transaction (store.tx_begin_read ()); return store.block_get (transaction, hash_a); } std::pair<nano::uint128_t, nano::uint128_t> nano::node::balance_pending (nano::account const & account_a) { std::pair<nano::uint128_t, nano::uint128_t> result; auto transaction (store.tx_begin_read ()); result.first = ledger.account_balance (transaction, account_a); result.second = ledger.account_pending (transaction, account_a); return result; } nano::uint128_t nano::node::weight (nano::account const & account_a) { auto transaction (store.tx_begin_read ()); return ledger.weight (transaction, account_a); } nano::account nano::node::representative (nano::account const & account_a) { auto transaction (store.tx_begin_read ()); nano::account_info info; nano::account result (0); if (!store.account_get (transaction, account_a, info)) { result = info.rep_block; } return result; } void nano::node::ongoing_keepalive () { keepalive_preconfigured (config.preconfigured_peers); auto peers_l (peers.purge_list (std::chrono::steady_clock::now () - cutoff)); for (auto i (peers_l.begin ()), j (peers_l.end ()); i != j && std::chrono::steady_clock::now () - i->last_attempt > period; ++i) { network.send_keepalive (i->endpoint); } std::weak_ptr<nano::node> node_w (shared_from_this ()); alarm.add (std::chrono::steady_clock::now () + period, [node_w]() { if (auto node_l = node_w.lock ()) { node_l->ongoing_keepalive (); } }); } void nano::node::ongoing_syn_cookie_cleanup () { peers.purge_syn_cookies (std::chrono::steady_clock::now () - syn_cookie_cutoff); std::weak_ptr<nano::node> node_w (shared_from_this ()); alarm.add (std::chrono::steady_clock::now () + (syn_cookie_cutoff * 2), [node_w]() { if (auto node_l = node_w.lock ()) { node_l->ongoing_syn_cookie_cleanup (); } }); } void nano::node::ongoing_rep_crawl () { auto now (std::chrono::steady_clock::now ()); auto peers_l (peers.rep_crawl ()); rep_query (*this, peers_l); if (network.on) { std::weak_ptr<nano::node> node_w (shared_from_this ()); alarm.add (now + std::chrono::seconds (4), [node_w]() { if (auto node_l = node_w.lock ()) { node_l->ongoing_rep_crawl (); } }); } } void nano::node::ongoing_rep_calculation () { auto now (std::chrono::steady_clock::now ()); vote_processor.calculate_weights (); std::weak_ptr<nano::node> node_w (shared_from_this ()); alarm.add (now + std::chrono::minutes (10), [node_w]() { if (auto node_l = node_w.lock ()) { node_l->ongoing_rep_calculation (); } }); } void nano::node::ongoing_bootstrap () { auto next_wakeup (300); if (warmed_up < 3) { // Re-attempt bootstrapping more aggressively on startup next_wakeup = 5; if (!bootstrap_initiator.in_progress () && !peers.empty ()) { ++warmed_up; } } bootstrap_initiator.bootstrap (); std::weak_ptr<nano::node> node_w (shared_from_this ()); alarm.add (std::chrono::steady_clock::now () + std::chrono::seconds (next_wakeup), [node_w]() { if (auto node_l = node_w.lock ()) { node_l->ongoing_bootstrap (); } }); } void nano::node::ongoing_store_flush () { { auto transaction (store.tx_begin_write ()); store.flush (transaction); } std::weak_ptr<nano::node> node_w (shared_from_this ()); alarm.add (std::chrono::steady_clock::now () + std::chrono::seconds (5), [node_w]() { if (auto node_l = node_w.lock ()) { node_l->ongoing_store_flush (); } }); } void nano::node::ongoing_peer_store () { auto endpoint_peers = peers.list (); if (!endpoint_peers.empty ()) { // Clear all peers then refresh with the current list of peers auto transaction (store.tx_begin_write ()); store.peer_clear (transaction); for (const auto & endpoint : endpoint_peers) { nano::endpoint_key endpoint_key (endpoint.address ().to_v6 ().to_bytes (), endpoint.port ()); store.peer_put (transaction, std::move (endpoint_key)); } } std::weak_ptr<nano::node> node_w (shared_from_this ()); alarm.add (std::chrono::steady_clock::now () + peer_interval, [node_w]() { if (auto node_l = node_w.lock ()) { node_l->ongoing_peer_store (); } }); } void nano::node::backup_wallet () { auto transaction (wallets.tx_begin_read ()); for (auto i (wallets.items.begin ()), n (wallets.items.end ()); i != n; ++i) { boost::system::error_code error_chmod; auto backup_path (application_path / "backup"); boost::filesystem::create_directories (backup_path); nano::set_secure_perm_directory (backup_path, error_chmod); i->second->store.write_backup (transaction, backup_path / (i->first.to_string () + ".json")); } auto this_l (shared ()); alarm.add (std::chrono::steady_clock::now () + backup_interval, [this_l]() { this_l->backup_wallet (); }); } void nano::node::search_pending () { // Reload wallets from disk wallets.reload (); // Search pending wallets.search_pending_all (); auto this_l (shared ()); alarm.add (std::chrono::steady_clock::now () + search_pending_interval, [this_l]() { this_l->search_pending (); }); } void nano::node::bootstrap_wallet () { std::deque<nano::account> accounts; { std::lock_guard<std::mutex> lock (wallets.mutex); auto transaction (wallets.tx_begin_read ()); for (auto i (wallets.items.begin ()), n (wallets.items.end ()); i != n && accounts.size () < 128; ++i) { auto & wallet (*i->second); std::lock_guard<std::recursive_mutex> wallet_lock (wallet.store.mutex); for (auto j (wallet.store.begin (transaction)), m (wallet.store.end ()); j != m && accounts.size () < 128; ++j) { nano::account account (j->first); accounts.push_back (account); } } } bootstrap_initiator.bootstrap_wallet (accounts); } void nano::node::unchecked_cleanup () { std::deque<nano::unchecked_key> cleaning_list; // Collect old unchecked keys { auto now (nano::seconds_since_epoch ()); auto transaction (store.tx_begin_read ()); // Max 128k records to clean, max 2 minutes reading to prevent slow i/o systems start issues for (auto i (store.unchecked_begin (transaction)), n (store.unchecked_end ()); i != n && cleaning_list.size () < 128 * 1024 && nano::seconds_since_epoch () - now < 120; ++i) { nano::unchecked_key key (i->first); nano::unchecked_info info (i->second); if ((now - info.modified) > config.unchecked_cutoff_time.count ()) { cleaning_list.push_back (key); } } } // Delete old unchecked keys in batches while (!cleaning_list.empty ()) { size_t deleted_count (0); auto transaction (store.tx_begin_write ()); while (deleted_count++ < 2 * 1024 && !cleaning_list.empty ()) { auto key (cleaning_list.front ()); cleaning_list.pop_front (); store.unchecked_del (transaction, key); } } } void nano::node::ongoing_unchecked_cleanup () { if (!bootstrap_initiator.in_progress ()) { unchecked_cleanup (); } auto this_l (shared ()); alarm.add (std::chrono::steady_clock::now () + unchecked_cleanup_interval, [this_l]() { this_l->ongoing_unchecked_cleanup (); }); } int nano::node::price (nano::uint128_t const & balance_a, int amount_a) { assert (balance_a >= amount_a * nano::Gxrb_ratio); auto balance_l (balance_a); double result (0.0); for (auto i (0); i < amount_a; ++i) { balance_l -= nano::Gxrb_ratio; auto balance_scaled ((balance_l / nano::Mxrb_ratio).convert_to<double> ()); auto units (balance_scaled / 1000.0); auto unit_price (((free_cutoff - units) / free_cutoff) * price_max); result += std::min (std::max (0.0, unit_price), price_max); } return static_cast<int> (result * 100.0); } namespace { class work_request { public: work_request (boost::asio::io_context & io_ctx_a, boost::asio::ip::address address_a, uint16_t port_a) : address (address_a), port (port_a), socket (io_ctx_a) { } boost::asio::ip::address address; uint16_t port; boost::beast::flat_buffer buffer; boost::beast::http::response<boost::beast::http::string_body> response; boost::asio::ip::tcp::socket socket; }; class distributed_work : public std::enable_shared_from_this<distributed_work> { public: distributed_work (std::shared_ptr<nano::node> const & node_a, nano::block_hash const & root_a, std::function<void(uint64_t)> callback_a, uint64_t difficulty_a) : distributed_work (1, node_a, root_a, callback_a, difficulty_a) { assert (node_a != nullptr); } distributed_work (unsigned int backoff_a, std::shared_ptr<nano::node> const & node_a, nano::block_hash const & root_a, std::function<void(uint64_t)> callback_a, uint64_t difficulty_a) : callback (callback_a), backoff (backoff_a), node (node_a), root (root_a), need_resolve (node_a->config.work_peers), difficulty (difficulty_a) { assert (node_a != nullptr); completed.clear (); } void start () { if (need_resolve.empty ()) { start_work (); } else { auto current (need_resolve.back ()); need_resolve.pop_back (); auto this_l (shared_from_this ()); boost::system::error_code ec; auto parsed_address (boost::asio::ip::address_v6::from_string (current.first, ec)); if (!ec) { outstanding[parsed_address] = current.second; start (); } else { node->network.resolver.async_resolve (boost::asio::ip::udp::resolver::query (current.first, std::to_string (current.second)), [current, this_l](boost::system::error_code const & ec, boost::asio::ip::udp::resolver::iterator i_a) { if (!ec) { for (auto i (i_a), n (boost::asio::ip::udp::resolver::iterator{}); i != n; ++i) { auto endpoint (i->endpoint ()); this_l->outstanding[endpoint.address ()] = endpoint.port (); } } else { BOOST_LOG (this_l->node->log) << boost::str (boost::format ("Error resolving work peer: %1%:%2%: %3%") % current.first % current.second % ec.message ()); } this_l->start (); }); } } } void start_work () { if (!outstanding.empty ()) { auto this_l (shared_from_this ()); std::lock_guard<std::mutex> lock (mutex); for (auto const & i : outstanding) { auto host (i.first); auto service (i.second); node->background ([this_l, host, service]() { auto connection (std::make_shared<work_request> (this_l->node->io_ctx, host, service)); connection->socket.async_connect (nano::tcp_endpoint (host, service), [this_l, connection](boost::system::error_code const & ec) { if (!ec) { std::string request_string; { boost::property_tree::ptree request; request.put ("action", "work_generate"); request.put ("hash", this_l->root.to_string ()); std::stringstream ostream; boost::property_tree::write_json (ostream, request); request_string = ostream.str (); } auto request (std::make_shared<boost::beast::http::request<boost::beast::http::string_body>> ()); request->method (boost::beast::http::verb::post); request->target ("/"); request->version (11); request->body () = request_string; request->prepare_payload (); boost::beast::http::async_write (connection->socket, *request, [this_l, connection, request](boost::system::error_code const & ec, size_t bytes_transferred) { if (!ec) { boost::beast::http::async_read (connection->socket, connection->buffer, connection->response, [this_l, connection](boost::system::error_code const & ec, size_t bytes_transferred) { if (!ec) { if (connection->response.result () == boost::beast::http::status::ok) { this_l->success (connection->response.body (), connection->address); } else { BOOST_LOG (this_l->node->log) << boost::str (boost::format ("Work peer responded with an error %1% %2%: %3%") % connection->address % connection->port % connection->response.result ()); this_l->failure (connection->address); } } else { BOOST_LOG (this_l->node->log) << boost::str (boost::format ("Unable to read from work_peer %1% %2%: %3% (%4%)") % connection->address % connection->port % ec.message () % ec.value ()); this_l->failure (connection->address); } }); } else { BOOST_LOG (this_l->node->log) << boost::str (boost::format ("Unable to write to work_peer %1% %2%: %3% (%4%)") % connection->address % connection->port % ec.message () % ec.value ()); this_l->failure (connection->address); } }); } else { BOOST_LOG (this_l->node->log) << boost::str (boost::format ("Unable to connect to work_peer %1% %2%: %3% (%4%)") % connection->address % connection->port % ec.message () % ec.value ()); this_l->failure (connection->address); } }); }); } } else { handle_failure (true); } } void stop () { auto this_l (shared_from_this ()); std::lock_guard<std::mutex> lock (mutex); for (auto const & i : outstanding) { auto host (i.first); node->background ([this_l, host]() { std::string request_string; { boost::property_tree::ptree request; request.put ("action", "work_cancel"); request.put ("hash", this_l->root.to_string ()); std::stringstream ostream; boost::property_tree::write_json (ostream, request); request_string = ostream.str (); } boost::beast::http::request<boost::beast::http::string_body> request; request.method (boost::beast::http::verb::post); request.target ("/"); request.version (11); request.body () = request_string; request.prepare_payload (); auto socket (std::make_shared<boost::asio::ip::tcp::socket> (this_l->node->io_ctx)); boost::beast::http::async_write (*socket, request, [socket](boost::system::error_code const & ec, size_t bytes_transferred) { }); }); } outstanding.clear (); } void success (std::string const & body_a, boost::asio::ip::address const & address) { auto last (remove (address)); std::stringstream istream (body_a); try { boost::property_tree::ptree result; boost::property_tree::read_json (istream, result); auto work_text (result.get<std::string> ("work")); uint64_t work; if (!nano::from_string_hex (work_text, work)) { if (!nano::work_validate (root, work)) { set_once (work); stop (); } else { BOOST_LOG (node->log) << boost::str (boost::format ("Incorrect work response from %1% for root %2%: %3%") % address % root.to_string () % work_text); handle_failure (last); } } else { BOOST_LOG (node->log) << boost::str (boost::format ("Work response from %1% wasn't a number: %2%") % address % work_text); handle_failure (last); } } catch (...) { BOOST_LOG (node->log) << boost::str (boost::format ("Work response from %1% wasn't parsable: %2%") % address % body_a); handle_failure (last); } } void set_once (uint64_t work_a) { if (!completed.test_and_set ()) { callback (work_a); } } void failure (boost::asio::ip::address const & address) { auto last (remove (address)); handle_failure (last); } void handle_failure (bool last) { if (last) { if (!completed.test_and_set ()) { if (node->config.work_threads != 0 || node->work.opencl) { auto callback_l (callback); // clang-format off node->work.generate (root, [callback_l](boost::optional<uint64_t> const & work_a) { callback_l (work_a.value ()); }, difficulty); // clang-format on } else { if (backoff == 1 && node->config.logging.work_generation_time ()) { BOOST_LOG (node->log) << "Work peer(s) failed to generate work for root " << root.to_string () << ", retrying..."; } auto now (std::chrono::steady_clock::now ()); auto root_l (root); auto callback_l (callback); std::weak_ptr<nano::node> node_w (node); auto next_backoff (std::min (backoff * 2, (unsigned int)60 * 5)); // clang-format off node->alarm.add (now + std::chrono::seconds (backoff), [ node_w, root_l, callback_l, next_backoff, difficulty = difficulty ] { if (auto node_l = node_w.lock ()) { auto work_generation (std::make_shared<distributed_work> (next_backoff, node_l, root_l, callback_l, difficulty)); work_generation->start (); } }); // clang-format on } } } } bool remove (boost::asio::ip::address const & address) { std::lock_guard<std::mutex> lock (mutex); outstanding.erase (address); return outstanding.empty (); } std::function<void(uint64_t)> callback; unsigned int backoff; // in seconds std::shared_ptr<nano::node> node; nano::block_hash root; std::mutex mutex; std::map<boost::asio::ip::address, uint16_t> outstanding; std::vector<std::pair<std::string, uint16_t>> need_resolve; std::atomic_flag completed; uint64_t difficulty; }; } void nano::node::work_generate_blocking (nano::block & block_a, uint64_t difficulty_a) { block_a.block_work_set (work_generate_blocking (block_a.root (), difficulty_a)); } void nano::node::work_generate (nano::uint256_union const & hash_a, std::function<void(uint64_t)> callback_a, uint64_t difficulty_a) { auto work_generation (std::make_shared<distributed_work> (shared (), hash_a, callback_a, difficulty_a)); work_generation->start (); } uint64_t nano::node::work_generate_blocking (nano::uint256_union const & hash_a, uint64_t difficulty_a) { std::promise<uint64_t> promise; std::future<uint64_t> future = promise.get_future (); // clang-format off work_generate (hash_a, [&promise](uint64_t work_a) { promise.set_value (work_a); }, difficulty_a); // clang-format on return future.get (); } void nano::node::add_initial_peers () { auto transaction (store.tx_begin_read ()); for (auto i (store.peers_begin (transaction)), n (store.peers_end ()); i != n; ++i) { nano::endpoint endpoint (boost::asio::ip::address_v6 (i->first.address_bytes ()), i->first.port ()); if (!peers.reachout (endpoint)) { send_keepalive (endpoint); } } } void nano::node::block_confirm (std::shared_ptr<nano::block> block_a) { active.start (block_a); network.broadcast_confirm_req (block_a); // Calculate votes for local representatives if (config.enable_voting && active.active (*block_a)) { block_processor.generator.add (block_a->hash ()); } } nano::uint128_t nano::node::delta () { auto result ((online_reps.online_stake () / 100) * config.online_weight_quorum); return result; } void nano::node::ongoing_online_weight_calculation_queue () { std::weak_ptr<nano::node> node_w (shared_from_this ()); alarm.add (std::chrono::steady_clock::now () + (std::chrono::seconds (nano::online_reps::weight_period)), [node_w]() { if (auto node_l = node_w.lock ()) { node_l->ongoing_online_weight_calculation (); } }); } void nano::node::ongoing_online_weight_calculation () { online_reps.sample (); ongoing_online_weight_calculation_queue (); } namespace { class confirmed_visitor : public nano::block_visitor { public: confirmed_visitor (nano::transaction const & transaction_a, nano::node & node_a, std::shared_ptr<nano::block> block_a, nano::block_hash const & hash_a) : transaction (transaction_a), node (node_a), block (block_a), hash (hash_a) { } virtual ~confirmed_visitor () = default; void scan_receivable (nano::account const & account_a) { for (auto i (node.wallets.items.begin ()), n (node.wallets.items.end ()); i != n; ++i) { auto wallet (i->second); auto transaction_l (node.wallets.tx_begin_read ()); if (wallet->store.exists (transaction_l, account_a)) { nano::account representative; nano::pending_info pending; representative = wallet->store.representative (transaction_l); auto error (node.store.pending_get (transaction, nano::pending_key (account_a, hash), pending)); if (!error) { auto node_l (node.shared ()); auto amount (pending.amount.number ()); wallet->receive_async (block, representative, amount, [](std::shared_ptr<nano::block>) {}); } else { if (!node.store.block_exists (transaction, hash)) { BOOST_LOG (node.log) << boost::str (boost::format ("Confirmed block is missing: %1%") % hash.to_string ()); assert (false && "Confirmed block is missing"); } else { BOOST_LOG (node.log) << boost::str (boost::format ("Block %1% has already been received") % hash.to_string ()); } } } } } void state_block (nano::state_block const & block_a) override { scan_receivable (block_a.hashables.link); } void send_block (nano::send_block const & block_a) override { scan_receivable (block_a.hashables.destination); } void receive_block (nano::receive_block const &) override { } void open_block (nano::open_block const &) override { } void change_block (nano::change_block const &) override { } nano::transaction const & transaction; nano::node & node; std::shared_ptr<nano::block> block; nano::block_hash const & hash; }; } void nano::node::process_confirmed (std::shared_ptr<nano::block> block_a, uint8_t iteration) { auto hash (block_a->hash ()); if (ledger.block_exists (block_a->type (), hash)) { auto transaction (store.tx_begin_read ()); confirmed_visitor visitor (transaction, *this, block_a, hash); block_a->visit (visitor); auto account (ledger.account (transaction, hash)); auto amount (ledger.amount (transaction, hash)); bool is_state_send (false); nano::account pending_account (0); if (auto state = dynamic_cast<nano::state_block *> (block_a.get ())) { is_state_send = ledger.is_send (transaction, *state); pending_account = state->hashables.link; } if (auto send = dynamic_cast<nano::send_block *> (block_a.get ())) { pending_account = send->hashables.destination; } observers.blocks.notify (block_a, account, amount, is_state_send); if (amount > 0) { observers.account_balance.notify (account, false); if (!pending_account.is_zero ()) { observers.account_balance.notify (pending_account, true); } } } // Limit to 0.5 * 20 = 10 seconds (more than max block_processor::process_batch finish time) else if (iteration < 20) { iteration++; std::weak_ptr<nano::node> node_w (shared ()); alarm.add (std::chrono::steady_clock::now () + process_confirmed_interval, [node_w, block_a, iteration]() { if (auto node_l = node_w.lock ()) { node_l->process_confirmed (block_a, iteration); } }); } } void nano::node::process_message (nano::message & message_a, nano::endpoint const & sender_a) { network_message_visitor visitor (*this, sender_a); message_a.visit (visitor); } nano::endpoint nano::network::endpoint () { boost::system::error_code ec; std::unique_lock<std::mutex> lock (socket_mutex); auto port (socket.local_endpoint (ec).port ()); if (ec) { BOOST_LOG (node.log) << "Unable to retrieve port: " << ec.message (); } return nano::endpoint (boost::asio::ip::address_v6::loopback (), port); } bool nano::block_arrival::add (nano::block_hash const & hash_a) { std::lock_guard<std::mutex> lock (mutex); auto now (std::chrono::steady_clock::now ()); auto inserted (arrival.insert (nano::block_arrival_info{ now, hash_a })); auto result (!inserted.second); return result; } bool nano::block_arrival::recent (nano::block_hash const & hash_a) { std::lock_guard<std::mutex> lock (mutex); auto now (std::chrono::steady_clock::now ()); while (arrival.size () > arrival_size_min && arrival.begin ()->arrival + arrival_time_min < now) { arrival.erase (arrival.begin ()); } return arrival.get<1> ().find (hash_a) != arrival.get<1> ().end (); } namespace nano { std::unique_ptr<seq_con_info_component> collect_seq_con_info (block_arrival & block_arrival, const std::string & name) { size_t count = 0; { std::lock_guard<std::mutex> guard (block_arrival.mutex); count = block_arrival.arrival.size (); } auto sizeof_element = sizeof (decltype (block_arrival.arrival)::value_type); auto composite = std::make_unique<seq_con_info_composite> (name); composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "arrival", count, sizeof_element })); return composite; } } nano::online_reps::online_reps (nano::ledger & ledger_a, nano::uint128_t minimum_a) : ledger (ledger_a), minimum (minimum_a) { auto transaction (ledger_a.store.tx_begin_read ()); online = trend (transaction); } void nano::online_reps::observe (nano::account const & rep_a) { auto transaction (ledger.store.tx_begin_read ()); if (ledger.weight (transaction, rep_a) > nano::Gxrb_ratio) { std::lock_guard<std::mutex> lock (mutex); reps.insert (rep_a); } } void nano::online_reps::sample () { auto transaction (ledger.store.tx_begin_write ()); // Discard oldest entries while (ledger.store.online_weight_count (transaction) >= weight_samples) { auto oldest (ledger.store.online_weight_begin (transaction)); assert (oldest != ledger.store.online_weight_end ()); ledger.store.online_weight_del (transaction, oldest->first); } // Calculate current active rep weight nano::uint128_t current; std::unordered_set<nano::account> reps_copy; { std::lock_guard<std::mutex> lock (mutex); reps_copy.swap (reps); } for (auto & i : reps_copy) { current += ledger.weight (transaction, i); } ledger.store.online_weight_put (transaction, std::chrono::system_clock::now ().time_since_epoch ().count (), current); auto trend_l (trend (transaction)); std::lock_guard<std::mutex> lock (mutex); online = trend_l; } nano::uint128_t nano::online_reps::trend (nano::transaction & transaction_a) { std::vector<nano::uint128_t> items; items.reserve (weight_samples + 1); items.push_back (minimum); for (auto i (ledger.store.online_weight_begin (transaction_a)), n (ledger.store.online_weight_end ()); i != n; ++i) { items.push_back (i->second.number ()); } // Pick median value for our target vote weight auto median_idx = items.size () / 2; nth_element (items.begin (), items.begin () + median_idx, items.end ()); return nano::uint128_t{ items[median_idx] }; } nano::uint128_t nano::online_reps::online_stake () { std::lock_guard<std::mutex> lock (mutex); return std::max (online, minimum); } std::vector<nano::account> nano::online_reps::list () { std::vector<nano::account> result; std::lock_guard<std::mutex> lock (mutex); for (auto & i : reps) { result.push_back (i); } return result; } namespace nano { std::unique_ptr<seq_con_info_component> collect_seq_con_info (online_reps & online_reps, const std::string & name) { size_t count = 0; { std::lock_guard<std::mutex> guard (online_reps.mutex); count = online_reps.reps.size (); } auto sizeof_element = sizeof (decltype (online_reps.reps)::value_type); auto composite = std::make_unique<seq_con_info_composite> (name); composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "arrival", count, sizeof_element })); return composite; } } namespace { boost::asio::ip::address_v6 mapped_from_v4_bytes (unsigned long address_a) { return boost::asio::ip::address_v6::v4_mapped (boost::asio::ip::address_v4 (address_a)); } } bool nano::reserved_address (nano::endpoint const & endpoint_a, bool blacklist_loopback) { assert (endpoint_a.address ().is_v6 ()); auto bytes (endpoint_a.address ().to_v6 ()); auto result (false); static auto const rfc1700_min (mapped_from_v4_bytes (0x00000000ul)); static auto const rfc1700_max (mapped_from_v4_bytes (0x00fffffful)); static auto const ipv4_loopback_min (mapped_from_v4_bytes (0x7f000000ul)); static auto const ipv4_loopback_max (mapped_from_v4_bytes (0x7ffffffful)); static auto const rfc1918_1_min (mapped_from_v4_bytes (0x0a000000ul)); static auto const rfc1918_1_max (mapped_from_v4_bytes (0x0afffffful)); static auto const rfc1918_2_min (mapped_from_v4_bytes (0xac100000ul)); static auto const rfc1918_2_max (mapped_from_v4_bytes (0xac1ffffful)); static auto const rfc1918_3_min (mapped_from_v4_bytes (0xc0a80000ul)); static auto const rfc1918_3_max (mapped_from_v4_bytes (0xc0a8fffful)); static auto const rfc6598_min (mapped_from_v4_bytes (0x64400000ul)); static auto const rfc6598_max (mapped_from_v4_bytes (0x647ffffful)); static auto const rfc5737_1_min (mapped_from_v4_bytes (0xc0000200ul)); static auto const rfc5737_1_max (mapped_from_v4_bytes (0xc00002fful)); static auto const rfc5737_2_min (mapped_from_v4_bytes (0xc6336400ul)); static auto const rfc5737_2_max (mapped_from_v4_bytes (0xc63364fful)); static auto const rfc5737_3_min (mapped_from_v4_bytes (0xcb007100ul)); static auto const rfc5737_3_max (mapped_from_v4_bytes (0xcb0071fful)); static auto const ipv4_multicast_min (mapped_from_v4_bytes (0xe0000000ul)); static auto const ipv4_multicast_max (mapped_from_v4_bytes (0xeffffffful)); static auto const rfc6890_min (mapped_from_v4_bytes (0xf0000000ul)); static auto const rfc6890_max (mapped_from_v4_bytes (0xfffffffful)); static auto const rfc6666_min (boost::asio::ip::address_v6::from_string ("100::")); static auto const rfc6666_max (boost::asio::ip::address_v6::from_string ("100::ffff:ffff:ffff:ffff")); static auto const rfc3849_min (boost::asio::ip::address_v6::from_string ("2001:db8::")); static auto const rfc3849_max (boost::asio::ip::address_v6::from_string ("2001:db8:ffff:ffff:ffff:ffff:ffff:ffff")); static auto const rfc4193_min (boost::asio::ip::address_v6::from_string ("fc00::")); static auto const rfc4193_max (boost::asio::ip::address_v6::from_string ("fd00:ffff:ffff:ffff:ffff:ffff:ffff:ffff")); static auto const ipv6_multicast_min (boost::asio::ip::address_v6::from_string ("ff00::")); static auto const ipv6_multicast_max (boost::asio::ip::address_v6::from_string ("ff00:ffff:ffff:ffff:ffff:ffff:ffff:ffff")); if (bytes >= rfc1700_min && bytes <= rfc1700_max) { result = true; } else if (bytes >= rfc5737_1_min && bytes <= rfc5737_1_max) { result = true; } else if (bytes >= rfc5737_2_min && bytes <= rfc5737_2_max) { result = true; } else if (bytes >= rfc5737_3_min && bytes <= rfc5737_3_max) { result = true; } else if (bytes >= ipv4_multicast_min && bytes <= ipv4_multicast_max) { result = true; } else if (bytes >= rfc6890_min && bytes <= rfc6890_max) { result = true; } else if (bytes >= rfc6666_min && bytes <= rfc6666_max) { result = true; } else if (bytes >= rfc3849_min && bytes <= rfc3849_max) { result = true; } else if (bytes >= ipv6_multicast_min && bytes <= ipv6_multicast_max) { result = true; } else if (blacklist_loopback && bytes.is_loopback ()) { result = true; } else if (blacklist_loopback && bytes >= ipv4_loopback_min && bytes <= ipv4_loopback_max) { result = true; } else if (nano::is_live_network) { if (bytes >= rfc1918_1_min && bytes <= rfc1918_1_max) { result = true; } else if (bytes >= rfc1918_2_min && bytes <= rfc1918_2_max) { result = true; } else if (bytes >= rfc1918_3_min && bytes <= rfc1918_3_max) { result = true; } else if (bytes >= rfc6598_min && bytes <= rfc6598_max) { result = true; } else if (bytes >= rfc4193_min && bytes <= rfc4193_max) { result = true; } } return result; } void nano::network::send_buffer (uint8_t const * data_a, size_t size_a, nano::endpoint const & endpoint_a, std::function<void(boost::system::error_code const &, size_t)> callback_a) { std::unique_lock<std::mutex> lock (socket_mutex); if (node.config.logging.network_packet_logging ()) { BOOST_LOG (node.log) << "Sending packet"; } if (on.load ()) { socket.async_send_to (boost::asio::buffer (data_a, size_a), endpoint_a, [this, callback_a](boost::system::error_code const & ec, size_t size_a) { callback_a (ec, size_a); this->node.stats.add (nano::stat::type::traffic, nano::stat::dir::out, size_a); if (ec == boost::system::errc::host_unreachable) { this->node.stats.inc (nano::stat::type::error, nano::stat::detail::unreachable_host, nano::stat::dir::out); } if (this->node.config.logging.network_packet_logging ()) { BOOST_LOG (this->node.log) << "Packet send complete"; } }); } } std::shared_ptr<nano::node> nano::node::shared () { return shared_from_this (); } nano::election_vote_result::election_vote_result () : replay (false), processed (false) { } nano::election_vote_result::election_vote_result (bool replay_a, bool processed_a) { replay = replay_a; processed = processed_a; } nano::election::election (nano::node & node_a, std::shared_ptr<nano::block> block_a, std::function<void(std::shared_ptr<nano::block>)> const & confirmation_action_a) : confirmation_action (confirmation_action_a), node (node_a), election_start (std::chrono::steady_clock::now ()), status ({ block_a, 0 }), confirmed (false), stopped (false), announcements (0) { last_votes.insert (std::make_pair (nano::not_an_account (), nano::vote_info{ std::chrono::steady_clock::now (), 0, block_a->hash () })); blocks.insert (std::make_pair (block_a->hash (), block_a)); } void nano::election::compute_rep_votes (nano::transaction const & transaction_a) { if (node.config.enable_voting) { node.wallets.foreach_representative (transaction_a, [this, &transaction_a](nano::public_key const & pub_a, nano::raw_key const & prv_a) { auto vote (this->node.store.vote_generate (transaction_a, pub_a, prv_a, status.winner)); this->node.vote_processor.vote (vote, this->node.network.endpoint ()); }); } } void nano::election::confirm_once (nano::transaction const & transaction_a, bool confirmed_back) { if (!confirmed.exchange (true)) { status.election_end = std::chrono::duration_cast<std::chrono::milliseconds> (std::chrono::system_clock::now ().time_since_epoch ()); status.election_duration = std::chrono::duration_cast<std::chrono::milliseconds> (std::chrono::steady_clock::now () - election_start); auto winner_l (status.winner); auto node_l (node.shared ()); auto confirmation_action_l (confirmation_action); node.background ([node_l, winner_l, confirmation_action_l]() { node_l->process_confirmed (winner_l); confirmation_action_l (winner_l); }); if (!confirmed_back) { confirm_back (transaction_a); } } } void nano::election::confirm_back (nano::transaction const & transaction_a) { std::deque<nano::block_hash> hashes = { status.winner->previous (), status.winner->source (), status.winner->link () }; while (!hashes.empty ()) { auto hash (hashes.front ()); hashes.pop_front (); if (!hash.is_zero () && !node.ledger.is_epoch_link (hash)) { auto existing (node.active.blocks.find (hash)); if (existing != node.active.blocks.end () && !existing->second->confirmed && !existing->second->stopped && existing->second->blocks.size () == 1) { release_assert (existing->second->status.winner->hash () == hash); existing->second->confirm_once (transaction_a, true); // Avoid recursive actions hashes.push_back (existing->second->status.winner->previous ()); hashes.push_back (existing->second->status.winner->source ()); hashes.push_back (existing->second->status.winner->link ()); } } } } void nano::election::stop () { stopped = true; } bool nano::election::have_quorum (nano::tally_t const & tally_a, nano::uint128_t tally_sum) { bool result = false; if (tally_sum >= node.config.online_weight_minimum.number ()) { auto i (tally_a.begin ()); auto first (i->first); ++i; auto second (i != tally_a.end () ? i->first : 0); auto delta_l (node.delta ()); result = tally_a.begin ()->first > (second + delta_l); } return result; } nano::tally_t nano::election::tally (nano::transaction const & transaction_a) { std::unordered_map<nano::block_hash, nano::uint128_t> block_weights; for (auto vote_info : last_votes) { block_weights[vote_info.second.hash] += node.ledger.weight (transaction_a, vote_info.first); } last_tally = block_weights; nano::tally_t result; for (auto item : block_weights) { auto block (blocks.find (item.first)); if (block != blocks.end ()) { result.insert (std::make_pair (item.second, block->second)); } } return result; } void nano::election::confirm_if_quorum (nano::transaction const & transaction_a) { auto tally_l (tally (transaction_a)); assert (tally_l.size () > 0); auto winner (tally_l.begin ()); auto block_l (winner->second); status.tally = winner->first; nano::uint128_t sum (0); for (auto & i : tally_l) { sum += i.first; } if (sum >= node.config.online_weight_minimum.number () && block_l->hash () != status.winner->hash ()) { auto node_l (node.shared ()); node_l->block_processor.force (block_l); status.winner = block_l; } if (have_quorum (tally_l, sum)) { if (node.config.logging.vote_logging () || blocks.size () > 1) { log_votes (tally_l); } confirm_once (transaction_a); } } void nano::election::log_votes (nano::tally_t const & tally_a) { std::stringstream tally; tally << boost::str (boost::format ("\nVote tally for root %1%") % status.winner->root ().to_string ()); for (auto i (tally_a.begin ()), n (tally_a.end ()); i != n; ++i) { tally << boost::str (boost::format ("\nBlock %1% weight %2%") % i->second->hash ().to_string () % i->first.convert_to<std::string> ()); } for (auto i (last_votes.begin ()), n (last_votes.end ()); i != n; ++i) { tally << boost::str (boost::format ("\n%1% %2%") % i->first.to_account () % i->second.hash.to_string ()); } BOOST_LOG (node.log) << tally.str (); } nano::election_vote_result nano::election::vote (nano::account rep, uint64_t sequence, nano::block_hash block_hash) { // see republish_vote documentation for an explanation of these rules auto transaction (node.store.tx_begin_read ()); auto replay (false); auto supply (node.online_reps.online_stake ()); auto weight (node.ledger.weight (transaction, rep)); auto should_process (false); if (nano::is_test_network || weight > supply / 1000) // 0.1% or above { unsigned int cooldown; if (weight < supply / 100) // 0.1% to 1% { cooldown = 15; } else if (weight < supply / 20) // 1% to 5% { cooldown = 5; } else // 5% or above { cooldown = 1; } auto last_vote_it (last_votes.find (rep)); if (last_vote_it == last_votes.end ()) { should_process = true; } else { auto last_vote (last_vote_it->second); if (last_vote.sequence < sequence || (last_vote.sequence == sequence && last_vote.hash < block_hash)) { if (last_vote.time <= std::chrono::steady_clock::now () - std::chrono::seconds (cooldown)) { should_process = true; } } else { replay = true; } } if (should_process) { last_votes[rep] = { std::chrono::steady_clock::now (), sequence, block_hash }; if (!confirmed) { confirm_if_quorum (transaction); } } } return nano::election_vote_result (replay, should_process); } bool nano::node::validate_block_by_previous (nano::transaction const & transaction, std::shared_ptr<nano::block> block_a) { bool result (false); nano::account account; if (!block_a->previous ().is_zero ()) { if (store.block_exists (transaction, block_a->previous ())) { account = ledger.account (transaction, block_a->previous ()); } else { result = true; } } else { account = block_a->root (); } if (!result && block_a->type () == nano::block_type::state) { std::shared_ptr<nano::state_block> block_l (std::static_pointer_cast<nano::state_block> (block_a)); nano::amount prev_balance (0); if (!block_l->hashables.previous.is_zero ()) { if (store.block_exists (transaction, block_l->hashables.previous)) { prev_balance = ledger.balance (transaction, block_l->hashables.previous); } else { result = true; } } if (!result) { if (block_l->hashables.balance == prev_balance && !ledger.epoch_link.is_zero () && ledger.is_epoch_link (block_l->hashables.link)) { account = ledger.epoch_signer; } } } if (!result && (account.is_zero () || nano::validate_message (account, block_a->hash (), block_a->block_signature ()))) { result = true; } return result; } bool nano::election::publish (std::shared_ptr<nano::block> block_a) { auto result (false); if (blocks.size () >= 10) { if (last_tally[block_a->hash ()] < node.online_reps.online_stake () / 10) { result = true; } } if (!result) { auto transaction (node.store.tx_begin_read ()); result = node.validate_block_by_previous (transaction, block_a); if (!result) { if (blocks.find (block_a->hash ()) == blocks.end ()) { blocks.insert (std::make_pair (block_a->hash (), block_a)); confirm_if_quorum (transaction); node.network.republish_block (block_a); } } } return result; } size_t nano::election::last_votes_size () { std::lock_guard<std::mutex> lock (node.active.mutex); return last_votes.size (); } void nano::active_transactions::request_confirm (std::unique_lock<std::mutex> & lock_a) { std::unordered_set<nano::uint512_union> inactive; auto transaction (node.store.tx_begin_read ()); unsigned unconfirmed_count (0); unsigned unconfirmed_announcements (0); std::unordered_map<nano::endpoint, std::vector<std::pair<nano::block_hash, nano::block_hash>>> requests_bundle; std::deque<std::shared_ptr<nano::block>> rebroadcast_bundle; std::deque<std::pair<std::shared_ptr<nano::block>, std::shared_ptr<std::vector<nano::peer_information>>>> confirm_req_bundle; auto roots_size (roots.size ()); for (auto i (roots.get<1> ().begin ()), n (roots.get<1> ().end ()); i != n; ++i) { auto root (i->root); auto election_l (i->election); if ((election_l->confirmed || election_l->stopped) && election_l->announcements >= announcement_min - 1) { if (election_l->confirmed) { confirmed.push_back (election_l->status); if (confirmed.size () > election_history_size) { confirmed.pop_front (); } } inactive.insert (root); } else { if (election_l->announcements > announcement_long) { ++unconfirmed_count; unconfirmed_announcements += election_l->announcements; // Log votes for very long unconfirmed elections if (election_l->announcements % 50 == 1) { auto tally_l (election_l->tally (transaction)); election_l->log_votes (tally_l); } /* Escalation for long unconfirmed elections Start new elections for previous block & source if there are less than 100 active elections */ if (election_l->announcements % announcement_long == 1 && roots_size < 100 && !nano::is_test_network) { std::shared_ptr<nano::block> previous; auto previous_hash (election_l->status.winner->previous ()); if (!previous_hash.is_zero ()) { previous = node.store.block_get (transaction, previous_hash); if (previous != nullptr) { add (std::move (previous)); } } /* If previous block not existing/not commited yet, block_source can cause segfault for state blocks So source check can be done only if previous != nullptr or previous is 0 (open account) */ if (previous_hash.is_zero () || previous != nullptr) { auto source_hash (node.ledger.block_source (transaction, *election_l->status.winner)); if (!source_hash.is_zero ()) { auto source (node.store.block_get (transaction, source_hash)); if (source != nullptr) { add (std::move (source)); } } } } } if (election_l->announcements < announcement_long || election_l->announcements % announcement_long == 1) { if (node.ledger.could_fit (transaction, *election_l->status.winner)) { // Broadcast winner if (rebroadcast_bundle.size () < max_broadcast_queue) { rebroadcast_bundle.push_back (election_l->status.winner); } } else { if (election_l->announcements != 0) { election_l->stop (); } } } if (election_l->announcements % 4 == 1) { auto reps (std::make_shared<std::vector<nano::peer_information>> (node.peers.representatives (std::numeric_limits<size_t>::max ()))); std::unordered_set<nano::account> probable_reps; nano::uint128_t total_weight (0); for (auto j (reps->begin ()), m (reps->end ()); j != m;) { auto & rep_votes (election_l->last_votes); auto rep_acct (j->probable_rep_account); // Calculate if representative isn't recorded for several IP addresses if (probable_reps.find (rep_acct) == probable_reps.end ()) { total_weight = total_weight + j->rep_weight.number (); probable_reps.insert (rep_acct); } if (rep_votes.find (rep_acct) != rep_votes.end ()) { if (j + 1 == reps->end ()) { reps->pop_back (); break; } std::swap (*j, reps->back ()); reps->pop_back (); m = reps->end (); } else { ++j; if (node.config.logging.vote_logging ()) { BOOST_LOG (node.log) << "Representative did not respond to confirm_req, retrying: " << rep_acct.to_account (); } } } if ((!reps->empty () && total_weight > node.config.online_weight_minimum.number ()) || roots_size > 5) { // broadcast_confirm_req_base modifies reps, so we clone it once to avoid aliasing if (!nano::is_test_network) { if (confirm_req_bundle.size () < max_broadcast_queue) { confirm_req_bundle.push_back (std::make_pair (election_l->status.winner, reps)); } } else { for (auto & rep : *reps) { auto rep_request (requests_bundle.find (rep.endpoint)); auto block (election_l->status.winner); auto root_hash (std::make_pair (block->hash (), block->root ())); if (rep_request == requests_bundle.end ()) { if (requests_bundle.size () < max_broadcast_queue) { std::vector<std::pair<nano::block_hash, nano::block_hash>> insert_vector = { root_hash }; requests_bundle.insert (std::make_pair (rep.endpoint, insert_vector)); } } else if (rep_request->second.size () < max_broadcast_queue * nano::network::confirm_req_hashes_max) { rep_request->second.push_back (root_hash); } } } } else { if (!nano::is_test_network) { confirm_req_bundle.push_back (std::make_pair (election_l->status.winner, std::make_shared<std::vector<nano::peer_information>> (node.peers.list_vector (100)))); } else { for (auto & rep : *reps) { auto rep_request (requests_bundle.find (rep.endpoint)); auto block (election_l->status.winner); auto root_hash (std::make_pair (block->hash (), block->root ())); if (rep_request == requests_bundle.end ()) { std::vector<std::pair<nano::block_hash, nano::block_hash>> insert_vector = { root_hash }; requests_bundle.insert (std::make_pair (rep.endpoint, insert_vector)); } else { rep_request->second.push_back (root_hash); } } } } } } ++election_l->announcements; } lock_a.unlock (); // Rebroadcast unconfirmed blocks if (!rebroadcast_bundle.empty ()) { node.network.republish_block_batch (rebroadcast_bundle); } // Batch confirmation request if (!nano::is_live_network && !requests_bundle.empty ()) { node.network.broadcast_confirm_req_batch (requests_bundle, 50); } //confirm_req broadcast if (!confirm_req_bundle.empty ()) { node.network.broadcast_confirm_req_batch (confirm_req_bundle); } lock_a.lock (); for (auto i (inactive.begin ()), n (inactive.end ()); i != n; ++i) { auto root_it (roots.find (*i)); assert (root_it != roots.end ()); for (auto & block : root_it->election->blocks) { auto erased (blocks.erase (block.first)); (void)erased; assert (erased == 1); } roots.erase (*i); } if (unconfirmed_count > 0) { BOOST_LOG (node.log) << boost::str (boost::format ("%1% blocks have been unconfirmed averaging %2% announcements") % unconfirmed_count % (unconfirmed_announcements / unconfirmed_count)); } } void nano::active_transactions::request_loop () { std::unique_lock<std::mutex> lock (mutex); started = true; lock.unlock (); condition.notify_all (); lock.lock (); while (!stopped) { request_confirm (lock); const auto extra_delay (std::min (roots.size (), max_broadcast_queue) * node.network.broadcast_interval_ms * 2); condition.wait_for (lock, std::chrono::milliseconds (request_interval_ms + extra_delay)); } } void nano::active_transactions::stop () { std::unique_lock<std::mutex> lock (mutex); while (!started) { condition.wait (lock); } stopped = true; lock.unlock (); condition.notify_all (); if (thread.joinable ()) { thread.join (); } lock.lock (); roots.clear (); } bool nano::active_transactions::start (std::shared_ptr<nano::block> block_a, std::function<void(std::shared_ptr<nano::block>)> const & confirmation_action_a) { std::lock_guard<std::mutex> lock (mutex); return add (block_a, confirmation_action_a); } bool nano::active_transactions::add (std::shared_ptr<nano::block> block_a, std::function<void(std::shared_ptr<nano::block>)> const & confirmation_action_a) { auto error (true); if (!stopped) { auto root (nano::uint512_union (block_a->previous (), block_a->root ())); auto existing (roots.find (root)); if (existing == roots.end ()) { auto election (std::make_shared<nano::election> (node, block_a, confirmation_action_a)); uint64_t difficulty (0); auto error (nano::work_validate (*block_a, &difficulty)); release_assert (!error); roots.insert (nano::conflict_info{ root, difficulty, election }); blocks.insert (std::make_pair (block_a->hash (), election)); } error = existing != roots.end (); } return error; } // Validate a vote and apply it to the current election if one exists bool nano::active_transactions::vote (std::shared_ptr<nano::vote> vote_a, bool single_lock) { std::shared_ptr<nano::election> election; bool replay (false); bool processed (false); { std::unique_lock<std::mutex> lock; if (!single_lock) { lock = std::unique_lock<std::mutex> (mutex); } for (auto vote_block : vote_a->blocks) { nano::election_vote_result result; if (vote_block.which ()) { auto block_hash (boost::get<nano::block_hash> (vote_block)); auto existing (blocks.find (block_hash)); if (existing != blocks.end ()) { result = existing->second->vote (vote_a->account, vote_a->sequence, block_hash); } } else { auto block (boost::get<std::shared_ptr<nano::block>> (vote_block)); auto existing (roots.find (nano::uint512_union (block->previous (), block->root ()))); if (existing != roots.end ()) { result = existing->election->vote (vote_a->account, vote_a->sequence, block->hash ()); } } replay = replay || result.replay; processed = processed || result.processed; } } if (processed) { node.network.republish_vote (vote_a); } return replay; } bool nano::active_transactions::active (nano::block const & block_a) { std::lock_guard<std::mutex> lock (mutex); return roots.find (nano::uint512_union (block_a.previous (), block_a.root ())) != roots.end (); } void nano::active_transactions::update_difficulty (nano::block const & block_a) { std::lock_guard<std::mutex> lock (mutex); auto existing (roots.find (nano::uint512_union (block_a.previous (), block_a.root ()))); if (existing != roots.end ()) { uint64_t difficulty; auto error (nano::work_validate (block_a, &difficulty)); assert (!error); roots.modify (existing, [difficulty](nano::conflict_info & info_a) { info_a.difficulty = difficulty; }); } } // List of active blocks in elections std::deque<std::shared_ptr<nano::block>> nano::active_transactions::list_blocks (bool single_lock) { std::deque<std::shared_ptr<nano::block>> result; std::unique_lock<std::mutex> lock; if (!single_lock) { lock = std::unique_lock<std::mutex> (mutex); } for (auto i (roots.begin ()), n (roots.end ()); i != n; ++i) { result.push_back (i->election->status.winner); } return result; } std::deque<nano::election_status> nano::active_transactions::list_confirmed () { std::lock_guard<std::mutex> lock (mutex); return confirmed; } void nano::active_transactions::erase (nano::block const & block_a) { std::lock_guard<std::mutex> lock (mutex); if (roots.find (nano::uint512_union (block_a.previous (), block_a.root ())) != roots.end ()) { roots.erase (nano::uint512_union (block_a.previous (), block_a.root ())); BOOST_LOG (node.log) << boost::str (boost::format ("Election erased for block block %1% root %2%") % block_a.hash ().to_string () % block_a.root ().to_string ()); } } bool nano::active_transactions::empty () { std::lock_guard<std::mutex> lock (mutex); return roots.empty (); } size_t nano::active_transactions::size () { std::lock_guard<std::mutex> lock (mutex); return roots.size (); } nano::active_transactions::active_transactions (nano::node & node_a) : node (node_a), started (false), stopped (false), thread ([this]() { nano::thread_role::set (nano::thread_role::name::request_loop); request_loop (); }) { std::unique_lock<std::mutex> lock (mutex); while (!started) { condition.wait (lock); } } nano::active_transactions::~active_transactions () { stop (); } bool nano::active_transactions::publish (std::shared_ptr<nano::block> block_a) { std::lock_guard<std::mutex> lock (mutex); auto existing (roots.find (nano::uint512_union (block_a->previous (), block_a->root ()))); auto result (true); if (existing != roots.end ()) { result = existing->election->publish (block_a); if (!result) { blocks.insert (std::make_pair (block_a->hash (), existing->election)); } } return result; } namespace nano { std::unique_ptr<seq_con_info_component> collect_seq_con_info (active_transactions & active_transactions, const std::string & name) { size_t roots_count = 0; size_t blocks_count = 0; size_t confirmed_count = 0; { std::lock_guard<std::mutex> guard (active_transactions.mutex); roots_count = active_transactions.roots.size (); blocks_count = active_transactions.blocks.size (); confirmed_count = active_transactions.confirmed.size (); } auto composite = std::make_unique<seq_con_info_composite> (name); composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "roots", roots_count, sizeof (decltype (active_transactions.roots)::value_type) })); composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "blocks", blocks_count, sizeof (decltype (active_transactions.blocks)::value_type) })); composite->add_component (std::make_unique<seq_con_info_leaf> (seq_con_info{ "confirmed", confirmed_count, sizeof (decltype (active_transactions.confirmed)::value_type) })); return composite; } } int nano::node::store_version () { auto transaction (store.tx_begin_read ()); return store.version_get (transaction); } nano::thread_runner::thread_runner (boost::asio::io_context & io_ctx_a, unsigned service_threads_a) { boost::thread::attributes attrs; nano::thread_attributes::set (attrs); for (auto i (0u); i < service_threads_a; ++i) { threads.push_back (boost::thread (attrs, [&io_ctx_a]() { nano::thread_role::set (nano::thread_role::name::io); try { io_ctx_a.run (); } catch (...) { #ifndef NDEBUG /* * In a release build, catch and swallow the * io_context exception, in debug mode pass it * on */ throw; #endif } })); } } nano::thread_runner::~thread_runner () { join (); } void nano::thread_runner::join () { for (auto & i : threads) { if (i.joinable ()) { i.join (); } } } nano::inactive_node::inactive_node (boost::filesystem::path const & path, uint16_t peering_port_a) : path (path), io_context (std::make_shared<boost::asio::io_context> ()), alarm (*io_context), work (1, nullptr), peering_port (peering_port_a) { boost::system::error_code error_chmod; /* * @warning May throw a filesystem exception */ boost::filesystem::create_directories (path); nano::set_secure_perm_directory (path, error_chmod); logging.max_size = std::numeric_limits<std::uintmax_t>::max (); logging.init (path); node = std::make_shared<nano::node> (init, *io_context, peering_port, path, alarm, logging, work); } nano::inactive_node::~inactive_node () { node->stop (); } nano::udp_buffer::udp_buffer (nano::stat & stats, size_t size, size_t count) : stats (stats), free (count), full (count), slab (size * count), entries (count), stopped (false) { assert (count > 0); assert (size > 0); auto slab_data (slab.data ()); auto entry_data (entries.data ()); for (auto i (0); i < count; ++i, ++entry_data) { *entry_data = { slab_data + i * size, 0, nano::endpoint () }; free.push_back (entry_data); } } nano::udp_data * nano::udp_buffer::allocate () { std::unique_lock<std::mutex> lock (mutex); while (!stopped && free.empty () && full.empty ()) { stats.inc (nano::stat::type::udp, nano::stat::detail::blocking, nano::stat::dir::in); condition.wait (lock); } nano::udp_data * result (nullptr); if (!free.empty ()) { result = free.front (); free.pop_front (); } if (result == nullptr && !full.empty ()) { result = full.front (); full.pop_front (); stats.inc (nano::stat::type::udp, nano::stat::detail::overflow, nano::stat::dir::in); } release_assert (result || stopped); return result; } void nano::udp_buffer::enqueue (nano::udp_data * data_a) { assert (data_a != nullptr); { std::lock_guard<std::mutex> lock (mutex); full.push_back (data_a); } condition.notify_all (); } nano::udp_data * nano::udp_buffer::dequeue () { std::unique_lock<std::mutex> lock (mutex); while (!stopped && full.empty ()) { condition.wait (lock); } nano::udp_data * result (nullptr); if (!full.empty ()) { result = full.front (); full.pop_front (); } return result; } void nano::udp_buffer::release (nano::udp_data * data_a) { assert (data_a != nullptr); { std::lock_guard<std::mutex> lock (mutex); free.push_back (data_a); } condition.notify_all (); } void nano::udp_buffer::stop () { { std::lock_guard<std::mutex> lock (mutex); stopped = true; } condition.notify_all (); }
1
15,002
Did you mean to negate the not_a_peer check?
nanocurrency-nano-node
cpp
@@ -1,6 +1,6 @@ /* * DBeaver - Universal Database Manager - * Copyright (C) 2010-2021 DBeaver Corp and others + * Copyright (C) 2010-2017 Serge Rider ([email protected]) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License.
1
/* * DBeaver - Universal Database Manager * Copyright (C) 2010-2021 DBeaver Corp and others * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jkiss.dbeaver.ui.controls; import org.eclipse.swt.widgets.Composite; import org.eclipse.swt.widgets.Text; import org.jkiss.code.Nullable; import org.jkiss.dbeaver.DBException; /** * CustomTimeEditor */ public class CustomTimeEditor { private Text timeEditor; // private Button timePickerButton; public CustomTimeEditor(Composite parent, int style) { this.timeEditor = new Text(parent, style); /* Composite ph = UIUtils.createPlaceholder(parent, 2); this.timeEditor = new Text(ph, style); this.timePickerButton = new Button(ph, SWT.FLAT | SWT.ARROW | SWT.DOWN); this.timePickerButton.addSelectionListener(new SelectionAdapter() { @Override public void widgetSelected(SelectionEvent e) { super.widgetSelected(e); UIUtils.showMessageBox(timePickerButton.getShell(), "asdf", "sdf", SWT.ICON_INFORMATION); } }); */ } public void setValue(@Nullable String value) { if (value == null) { timeEditor.setText(""); } else { timeEditor.setText(value); } } public String getValue() throws DBException { final String timeText = timeEditor.getText(); if (timeText.isEmpty()) { return null; } return timeText; } public void setEditable(boolean editable) { timeEditor.setEditable(editable); } public Text getControl() { return timeEditor; } public void selectAll() { timeEditor.selectAll(); } }
1
11,662
Copyright (C) 2010-2021 DBeaver Corp and others
dbeaver-dbeaver
java
@@ -22,9 +22,8 @@ We use this to be able to highlight parts of the text. """ -import re import html - +import re from PyQt5.QtWidgets import QStyle, QStyleOptionViewItem, QStyledItemDelegate from PyQt5.QtCore import QRectF, QSize, Qt from PyQt5.QtGui import (QIcon, QPalette, QTextDocument, QTextOption,
1
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2014-2015 Florian Bruhin (The Compiler) <[email protected]> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. """Completion item delegate for CompletionView. We use this to be able to highlight parts of the text. """ import re import html from PyQt5.QtWidgets import QStyle, QStyleOptionViewItem, QStyledItemDelegate from PyQt5.QtCore import QRectF, QSize, Qt from PyQt5.QtGui import (QIcon, QPalette, QTextDocument, QTextOption, QAbstractTextDocumentLayout) from qutebrowser.config import config, configexc, style from qutebrowser.utils import qtutils class CompletionItemDelegate(QStyledItemDelegate): """Delegate used by CompletionView to draw individual items. Mainly a cleaned up port of Qt's way to draw a TreeView item, except it uses a QTextDocument to draw the text and add marking. Original implementation: qt/src/gui/styles/qcommonstyle.cpp:drawControl:2153 Attributes: _opt: The QStyleOptionViewItem which is used. _style: The style to be used. _painter: The QPainter to be used. _doc: The QTextDocument to be used. """ # FIXME this is horribly slow when resizing. # We should probably cache something in _get_textdoc or so, but as soon as # we implement eliding that cache probably isn't worth much anymore... # https://github.com/The-Compiler/qutebrowser/issues/121 def __init__(self, parent=None): self._painter = None self._opt = None self._doc = None self._style = None super().__init__(parent) def _draw_background(self): """Draw the background of an ItemViewItem.""" self._style.drawPrimitive(self._style.PE_PanelItemViewItem, self._opt, self._painter, self._opt.widget) def _draw_icon(self): """Draw the icon of an ItemViewItem.""" icon_rect = self._style.subElementRect( self._style.SE_ItemViewItemDecoration, self._opt, self._opt.widget) if not icon_rect.isValid(): # The rect seems to be wrong in all kind of ways if no icon should # be displayed. return mode = QIcon.Normal if not self._opt.state & QStyle.State_Enabled: mode = QIcon.Disabled elif self._opt.state & QStyle.State_Selected: mode = QIcon.Selected state = QIcon.On if self._opt.state & QStyle.State_Open else QIcon.Off self._opt.icon.paint(self._painter, icon_rect, self._opt.decorationAlignment, mode, state) def _draw_text(self, index): """Draw the text of an ItemViewItem. This is the main part where we differ from the original implementation in Qt: We use a QTextDocument to draw text. Args: index: The QModelIndex of the item to draw. """ if not self._opt.text: return text_rect_ = self._style.subElementRect( self._style.SE_ItemViewItemText, self._opt, self._opt.widget) qtutils.ensure_valid(text_rect_) margin = self._style.pixelMetric(QStyle.PM_FocusFrameHMargin, self._opt, self._opt.widget) + 1 # remove width padding text_rect = text_rect_.adjusted(margin, 0, -margin, 0) qtutils.ensure_valid(text_rect) # move text upwards a bit if index.parent().isValid(): text_rect.adjust(0, -1, 0, -1) else: text_rect.adjust(0, -2, 0, -2) self._painter.save() state = self._opt.state if state & QStyle.State_Enabled and state & QStyle.State_Active: cg = QPalette.Normal elif state & QStyle.State_Enabled: cg = QPalette.Inactive else: cg = QPalette.Disabled if state & QStyle.State_Selected: self._painter.setPen(self._opt.palette.color( cg, QPalette.HighlightedText)) # This is a dirty fix for the text jumping by one pixel for # whatever reason. text_rect.adjust(0, -1, 0, 0) else: self._painter.setPen(self._opt.palette.color(cg, QPalette.Text)) if state & QStyle.State_Editing: self._painter.setPen(self._opt.palette.color(cg, QPalette.Text)) self._painter.drawRect(text_rect_.adjusted(0, 0, -1, -1)) self._painter.translate(text_rect.left(), text_rect.top()) self._get_textdoc(index) self._draw_textdoc(text_rect) self._painter.restore() def _draw_textdoc(self, rect): """Draw the QTextDocument of an item. Args: rect: The QRect to clip the drawing to. """ # We can't use drawContents because then the color would be ignored. clip = QRectF(0, 0, rect.width(), rect.height()) self._painter.save() if self._opt.state & QStyle.State_Selected: option = 'completion.item.selected.fg' elif not self._opt.state & QStyle.State_Enabled: option = 'completion.category.fg' else: option = 'completion.fg' try: self._painter.setPen(config.get('colors', option)) except configexc.NoOptionError: self._painter.setPen(config.get('colors', 'completion.fg')) ctx = QAbstractTextDocumentLayout.PaintContext() ctx.palette.setColor(QPalette.Text, self._painter.pen().color()) if clip.isValid(): self._painter.setClipRect(clip) ctx.clip = clip self._doc.documentLayout().draw(self._painter, ctx) self._painter.restore() def _get_textdoc(self, index): """Create the QTextDocument of an item. Args: index: The QModelIndex of the item to draw. """ # FIXME we probably should do eliding here. See # qcommonstyle.cpp:viewItemDrawText # https://github.com/The-Compiler/qutebrowser/issues/118 text_option = QTextOption() if self._opt.features & QStyleOptionViewItem.WrapText: text_option.setWrapMode(QTextOption.WordWrap) else: text_option.setWrapMode(QTextOption.ManualWrap) text_option.setTextDirection(self._opt.direction) text_option.setAlignment(QStyle.visualAlignment( self._opt.direction, self._opt.displayAlignment)) self._doc = QTextDocument(self) self._doc.setDefaultFont(self._opt.font) self._doc.setDefaultTextOption(text_option) self._doc.setDefaultStyleSheet(style.get_stylesheet(""" .highlight { {{ color['completion.match.fg'] }} } """)) self._doc.setDocumentMargin(2) if index.parent().isValid(): pattern = index.model().pattern columns_to_filter = index.model().srcmodel.columns_to_filter if index.column() in columns_to_filter and pattern: repl = r'<span class="highlight">\g<0></span>' text = re.sub(re.escape(pattern), repl, self._opt.text, flags=re.IGNORECASE) self._doc.setHtml(text) else: self._doc.setPlainText(self._opt.text) else: self._doc.setHtml('<b>{}</b>'.format(html.escape(self._opt.text))) def _draw_focus_rect(self): """Draw the focus rectangle of an ItemViewItem.""" state = self._opt.state if not state & QStyle.State_HasFocus: return o = self._opt o.rect = self._style.subElementRect( self._style.SE_ItemViewItemFocusRect, self._opt, self._opt.widget) o.state |= QStyle.State_KeyboardFocusChange | QStyle.State_Item qtutils.ensure_valid(o.rect) if state & QStyle.State_Enabled: cg = QPalette.Normal else: cg = QPalette.Disabled if state & QStyle.State_Selected: role = QPalette.Highlight else: role = QPalette.Window o.backgroundColor = self._opt.palette.color(cg, role) self._style.drawPrimitive(QStyle.PE_FrameFocusRect, o, self._painter, self._opt.widget) def sizeHint(self, option, index): """Override sizeHint of QStyledItemDelegate. Return the cell size based on the QTextDocument size, but might not work correctly yet. Args: option: const QStyleOptionViewItem & option index: const QModelIndex & index Return: A QSize with the recommended size. """ value = index.data(Qt.SizeHintRole) if value is not None: return value self._opt = QStyleOptionViewItem(option) self.initStyleOption(self._opt, index) self._style = self._opt.widget.style() self._get_textdoc(index) docsize = self._doc.size().toSize() size = self._style.sizeFromContents(QStyle.CT_ItemViewItem, self._opt, docsize, self._opt.widget) qtutils.ensure_valid(size) return size + QSize(10, 3) def paint(self, painter, option, index): """Override the QStyledItemDelegate paint function. Args: painter: QPainter * painter option: const QStyleOptionViewItem & option index: const QModelIndex & index """ self._painter = painter self._painter.save() self._opt = QStyleOptionViewItem(option) self.initStyleOption(self._opt, index) self._style = self._opt.widget.style() self._draw_background() self._draw_icon() self._draw_text(index) self._draw_focus_rect() self._painter.restore()
1
13,478
This seems like an unrelated change
qutebrowser-qutebrowser
py
@@ -48,6 +48,10 @@ func (l *linuxStandardInit) Init() error { runtime.LockOSThread() defer runtime.UnlockOSThread() if !l.config.Config.NoNewKeyring { + if err := label.SetKeyLabel(l.config.ProcessLabel); err != nil { + return err + } + defer label.SetKeyLabel("") ringname, keepperms, newperms := l.getSessionRingParams() // Do not inherit the parent's session keyring.
1
// +build linux package libcontainer import ( "fmt" "os" "os/exec" "runtime" "syscall" //only for Exec "github.com/opencontainers/runc/libcontainer/apparmor" "github.com/opencontainers/runc/libcontainer/configs" "github.com/opencontainers/runc/libcontainer/keys" "github.com/opencontainers/runc/libcontainer/seccomp" "github.com/opencontainers/runc/libcontainer/system" "github.com/opencontainers/selinux/go-selinux/label" "github.com/pkg/errors" "golang.org/x/sys/unix" ) type linuxStandardInit struct { pipe *os.File consoleSocket *os.File parentPid int fifoFd int config *initConfig } func (l *linuxStandardInit) getSessionRingParams() (string, uint32, uint32) { var newperms uint32 if l.config.Config.Namespaces.Contains(configs.NEWUSER) { // With user ns we need 'other' search permissions. newperms = 0x8 } else { // Without user ns we need 'UID' search permissions. newperms = 0x80000 } // Create a unique per session container name that we can join in setns; // However, other containers can also join it. return fmt.Sprintf("_ses.%s", l.config.ContainerId), 0xffffffff, newperms } func (l *linuxStandardInit) Init() error { runtime.LockOSThread() defer runtime.UnlockOSThread() if !l.config.Config.NoNewKeyring { ringname, keepperms, newperms := l.getSessionRingParams() // Do not inherit the parent's session keyring. if sessKeyId, err := keys.JoinSessionKeyring(ringname); err != nil { // If keyrings aren't supported then it is likely we are on an // older kernel (or inside an LXC container). While we could bail, // the security feature we are using here is best-effort (it only // really provides marginal protection since VFS credentials are // the only significant protection of keyrings). // // TODO(cyphar): Log this so people know what's going on, once we // have proper logging in 'runc init'. if errors.Cause(err) != unix.ENOSYS { return errors.Wrap(err, "join session keyring") } } else { // Make session keyring searcheable. If we've gotten this far we // bail on any error -- we don't want to have a keyring with bad // permissions. if err := keys.ModKeyringPerm(sessKeyId, keepperms, newperms); err != nil { return errors.Wrap(err, "mod keyring permissions") } } } if err := setupNetwork(l.config); err != nil { return err } if err := setupRoute(l.config.Config); err != nil { return err } label.Init() if err := prepareRootfs(l.pipe, l.config); err != nil { return err } // Set up the console. This has to be done *before* we finalize the rootfs, // but *after* we've given the user the chance to set up all of the mounts // they wanted. if l.config.CreateConsole { if err := setupConsole(l.consoleSocket, l.config, true); err != nil { return err } if err := system.Setctty(); err != nil { return errors.Wrap(err, "setctty") } } // Finish the rootfs setup. if l.config.Config.Namespaces.Contains(configs.NEWNS) { if err := finalizeRootfs(l.config.Config); err != nil { return err } } if hostname := l.config.Config.Hostname; hostname != "" { if err := unix.Sethostname([]byte(hostname)); err != nil { return errors.Wrap(err, "sethostname") } } if err := apparmor.ApplyProfile(l.config.AppArmorProfile); err != nil { return errors.Wrap(err, "apply apparmor profile") } for key, value := range l.config.Config.Sysctl { if err := writeSystemProperty(key, value); err != nil { return errors.Wrapf(err, "write sysctl key %s", key) } } for _, path := range l.config.Config.ReadonlyPaths { if err := readonlyPath(path); err != nil { return errors.Wrapf(err, "readonly path %s", path) } } for _, path := range l.config.Config.MaskPaths { if err := maskPath(path, l.config.Config.MountLabel); err != nil { return errors.Wrapf(err, "mask path %s", path) } } pdeath, err := system.GetParentDeathSignal() if err != nil { return errors.Wrap(err, "get pdeath signal") } if l.config.NoNewPrivileges { if err := unix.Prctl(unix.PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); err != nil { return errors.Wrap(err, "set nonewprivileges") } } // Tell our parent that we're ready to Execv. This must be done before the // Seccomp rules have been applied, because we need to be able to read and // write to a socket. if err := syncParentReady(l.pipe); err != nil { return errors.Wrap(err, "sync ready") } if err := label.SetProcessLabel(l.config.ProcessLabel); err != nil { return errors.Wrap(err, "set process label") } defer label.SetProcessLabel("") // Without NoNewPrivileges seccomp is a privileged operation, so we need to // do this before dropping capabilities; otherwise do it as late as possible // just before execve so as few syscalls take place after it as possible. if l.config.Config.Seccomp != nil && !l.config.NoNewPrivileges { if err := seccomp.InitSeccomp(l.config.Config.Seccomp); err != nil { return err } } if err := finalizeNamespace(l.config); err != nil { return err } // finalizeNamespace can change user/group which clears the parent death // signal, so we restore it here. if err := pdeath.Restore(); err != nil { return errors.Wrap(err, "restore pdeath signal") } // Compare the parent from the initial start of the init process and make // sure that it did not change. if the parent changes that means it died // and we were reparented to something else so we should just kill ourself // and not cause problems for someone else. if unix.Getppid() != l.parentPid { return unix.Kill(unix.Getpid(), unix.SIGKILL) } // Check for the arg before waiting to make sure it exists and it is // returned as a create time error. name, err := exec.LookPath(l.config.Args[0]) if err != nil { return err } // Close the pipe to signal that we have completed our init. l.pipe.Close() // Wait for the FIFO to be opened on the other side before exec-ing the // user process. We open it through /proc/self/fd/$fd, because the fd that // was given to us was an O_PATH fd to the fifo itself. Linux allows us to // re-open an O_PATH fd through /proc. fd, err := unix.Open(fmt.Sprintf("/proc/self/fd/%d", l.fifoFd), unix.O_WRONLY|unix.O_CLOEXEC, 0) if err != nil { return newSystemErrorWithCause(err, "open exec fifo") } if _, err := unix.Write(fd, []byte("0")); err != nil { return newSystemErrorWithCause(err, "write 0 exec fifo") } // Close the O_PATH fifofd fd before exec because the kernel resets // dumpable in the wrong order. This has been fixed in newer kernels, but // we keep this to ensure CVE-2016-9962 doesn't re-emerge on older kernels. // N.B. the core issue itself (passing dirfds to the host filesystem) has // since been resolved. // https://github.com/torvalds/linux/blob/v4.9/fs/exec.c#L1290-L1318 unix.Close(l.fifoFd) // Set seccomp as close to execve as possible, so as few syscalls take // place afterward (reducing the amount of syscalls that users need to // enable in their seccomp profiles). if l.config.Config.Seccomp != nil && l.config.NoNewPrivileges { if err := seccomp.InitSeccomp(l.config.Config.Seccomp); err != nil { return newSystemErrorWithCause(err, "init seccomp") } } if err := syscall.Exec(name, l.config.Args[0:], os.Environ()); err != nil { return newSystemErrorWithCause(err, "exec user process") } return nil }
1
17,450
Just to double-check -- are you sure this needs to be done *before* we create a new session? (Is `SetKeyLabel` setting what the label will be for all future keys or the label for the current key?)
opencontainers-runc
go
@@ -47,6 +47,19 @@ using LocatorSelectorEntry = fastrtps::rtps::LocatorSelectorEntry; using LocatorSelector = fastrtps::rtps::LocatorSelector; using PortParameters = fastrtps::rtps::PortParameters; +namespace eprosima { +namespace fastdds { +namespace rtps { + +static constexpr SharedMemTransportDescriptor::OverflowPolicy shm_default_overflow_policy = + SharedMemTransportDescriptor::OverflowPolicy::DISCARD; + + +} // namespace rtps +} // namespace fastdds +} // namespace eprosima + + TransportInterface* SharedMemTransportDescriptor::create_transport() const { return new SharedMemTransport(*this);
1
// Copyright 2019 Proyectos y Sistemas de Mantenimiento SL (eProsima). // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <utility> #include <cstring> #include <algorithm> #include <fastdds/rtps/transport/TransportInterface.h> #include <fastrtps/rtps/messages/CDRMessage.h> #include <fastrtps/log/Log.h> #include <fastdds/rtps/network/ReceiverResource.h> #include <fastdds/rtps/network/SenderResource.h> #include <fastrtps/rtps/messages/MessageReceiver.h> #include <rtps/transport/shared_mem/SHMLocator.hpp> #include <rtps/transport/shared_mem/SharedMemTransport.h> #include <rtps/transport/shared_mem/SharedMemSenderResource.hpp> #include <rtps/transport/shared_mem/SharedMemChannelResource.hpp> #include <rtps/transport/shared_mem/SharedMemManager.hpp> #define SHM_MANAGER_DOMAIN ("fastrtps") using namespace std; using namespace eprosima; using namespace eprosima::fastdds; using namespace eprosima::fastdds::rtps; using Locator_t = fastrtps::rtps::Locator_t; using LocatorList_t = fastrtps::rtps::LocatorList_t; using Log = dds::Log; using octet = fastrtps::rtps::octet; using SenderResource = fastrtps::rtps::SenderResource; using LocatorSelectorEntry = fastrtps::rtps::LocatorSelectorEntry; using LocatorSelector = fastrtps::rtps::LocatorSelector; using PortParameters = fastrtps::rtps::PortParameters; TransportInterface* SharedMemTransportDescriptor::create_transport() const { return new SharedMemTransport(*this); } //********************************************************* // SharedMemTransport //********************************************************* SharedMemTransport::SharedMemTransport( const SharedMemTransportDescriptor& descriptor) : TransportInterface(LOCATOR_KIND_SHM) , configuration_(descriptor) { } SharedMemTransport::SharedMemTransport() : TransportInterface(LOCATOR_KIND_SHM) { } SharedMemTransport::~SharedMemTransport() { clean(); } bool SharedMemTransport::getDefaultMetatrafficMulticastLocators( LocatorList_t& locators, uint32_t metatraffic_multicast_port) const { locators.push_back(SHMLocator::create_locator(metatraffic_multicast_port, SHMLocator::Type::MULTICAST)); return true; } bool SharedMemTransport::getDefaultMetatrafficUnicastLocators( LocatorList_t& locators, uint32_t metatraffic_unicast_port) const { locators.push_back(SHMLocator::create_locator(metatraffic_unicast_port, SHMLocator::Type::UNICAST)); return true; } bool SharedMemTransport::getDefaultUnicastLocators( LocatorList_t& locators, uint32_t unicast_port) const { auto locator = SHMLocator::create_locator(unicast_port, SHMLocator::Type::UNICAST); fillUnicastLocator(locator, unicast_port); locators.push_back(locator); return true; } void SharedMemTransport::AddDefaultOutputLocator( LocatorList_t& defaultList) { (void)defaultList; } const SharedMemTransportDescriptor* SharedMemTransport::configuration() const { return &configuration_; } bool SharedMemTransport::OpenInputChannel( const Locator_t& locator, TransportReceiverInterface* receiver, uint32_t maxMsgSize) { std::unique_lock<std::recursive_mutex> scopedLock(input_channels_mutex_); if (!IsLocatorSupported(locator)) { return false; } if (!IsInputChannelOpen(locator)) { try { auto channel_resource = CreateInputChannelResource(locator, maxMsgSize, receiver); input_channels_.push_back(channel_resource); } catch (std::exception& e) { (void)e; logInfo(RTPS_MSG_OUT, std::string("CreateInputChannelResource failed for port ") << locator.port << " msg: " << e.what()); return false; } } return true; } bool SharedMemTransport::is_locator_allowed( const Locator_t& locator) const { return IsLocatorSupported(locator); } LocatorList_t SharedMemTransport::NormalizeLocator( const Locator_t& locator) { LocatorList_t list; list.push_back(locator); return list; } bool SharedMemTransport::is_local_locator( const Locator_t& locator) const { assert(locator.kind == LOCATOR_KIND_SHM); (void)locator; return true; } void SharedMemTransport::clean() { assert(input_channels_.size() == 0); } bool SharedMemTransport::CloseInputChannel( const Locator_t& locator) { std::lock_guard<std::recursive_mutex> lock(input_channels_mutex_); for (auto it = input_channels_.begin(); it != input_channels_.end(); it++) { if ( (*it)->locator() == locator) { (*it)->disable(); (*it)->release(); (*it)->clear(); delete (*it); input_channels_.erase(it); return true; } } return false; } bool SharedMemTransport::DoInputLocatorsMatch( const Locator_t& left, const Locator_t& right) const { return left.kind == right.kind && left.port == right.port; } bool SharedMemTransport::init() { try { switch (configuration_.port_overflow_policy()) { case SharedMemTransportDescriptor::OverflowPolicy::DISCARD: push_lambda_ = &SharedMemTransport::push_discard; break; case SharedMemTransportDescriptor::OverflowPolicy::FAIL: push_lambda_ = &SharedMemTransport::push_fail; break; default: throw std::runtime_error("unknown port_overflow_policy"); } switch (configuration_.segment_overflow_policy()) { case SharedMemTransportDescriptor::OverflowPolicy::DISCARD: case SharedMemTransportDescriptor::OverflowPolicy::FAIL: break; default: throw std::runtime_error("unknown port_overflow_policy"); } shared_mem_manager_ = std::make_shared<SharedMemManager>(SHM_MANAGER_DOMAIN); shared_mem_segment_ = shared_mem_manager_->create_segment(configuration_.segment_size(), configuration_.port_queue_capacity()); // Memset the whole segment to zero in order to force physical map of the buffer auto buffer = shared_mem_segment_->alloc_buffer(configuration_.segment_size(), (std::chrono::steady_clock::now()+std::chrono::milliseconds(100))); memset(buffer->data(), 0, configuration_.segment_size()); buffer.reset(); if (!configuration_.rtps_dump_file().empty()) { auto packets_file_consumer = std::unique_ptr<SHMPacketFileConsumer>( new SHMPacketFileConsumer(configuration_.rtps_dump_file())); packet_logger_ = std::make_shared<PacketsLog<SHMPacketFileConsumer> >(); packet_logger_->RegisterConsumer(std::move(packets_file_consumer)); } } catch (std::exception& e) { logError(RTPS_MSG_OUT, e.what()); return false; } return true; } bool SharedMemTransport::IsInputChannelOpen( const Locator_t& locator) const { std::lock_guard<std::recursive_mutex> lock(input_channels_mutex_); return IsLocatorSupported(locator) && (std::find_if( input_channels_.begin(), input_channels_.end(), [&](const SharedMemChannelResource* resource) { return locator == resource->locator(); }) != input_channels_.end()); } bool SharedMemTransport::IsLocatorSupported( const Locator_t& locator) const { return locator.kind == transport_kind_; } SharedMemChannelResource* SharedMemTransport::CreateInputChannelResource( const Locator_t& locator, uint32_t maxMsgSize, TransportReceiverInterface* receiver) { (void) maxMsgSize; // Multicast locators implies ReadShared (Multiple readers) ports. auto open_mode = locator.address[0] == 'M' ? SharedMemGlobal::Port::OpenMode::ReadShared : SharedMemGlobal::Port::OpenMode::ReadExclusive; return new SharedMemChannelResource( shared_mem_manager_->open_port( locator.port, configuration_.port_queue_capacity(), configuration_.healthy_check_timeout_ms(), open_mode)->create_listener(), locator, receiver, configuration_.rtps_dump_file()); } bool SharedMemTransport::OpenOutputChannel( SendResourceList& sender_resource_list, const Locator_t& locator) { if (!IsLocatorSupported(locator)) { return false; } // We try to find a SenderResource that can be reuse to this locator. // Note: This is done in this level because if we do in NetworkFactory level, we have to mantain what transport // already reuses a SenderResource. for (auto& sender_resource : sender_resource_list) { SharedMemSenderResource* sm_sender_resource = SharedMemSenderResource::cast(*this, sender_resource.get()); if (sm_sender_resource) { return true; } } try { sender_resource_list.emplace_back( static_cast<SenderResource*>(new SharedMemSenderResource(*this))); } catch (std::exception& e) { logError(RTPS_MSG_OUT, "SharedMemTransport error opening port " << std::to_string(locator.port) << " with msg: " << e.what()); return false; } return true; } Locator_t SharedMemTransport::RemoteToMainLocal( const Locator_t& remote) const { if (!IsLocatorSupported(remote)) { return false; } Locator_t mainLocal(remote); mainLocal.set_Invalid_Address(); return mainLocal; } bool SharedMemTransport::transform_remote_locator( const Locator_t& remote_locator, Locator_t& result_locator) const { if (IsLocatorSupported(remote_locator)) { result_locator = remote_locator; return true; } return false; } std::shared_ptr<SharedMemManager::Buffer> SharedMemTransport::copy_to_shared_buffer( const octet* send_buffer, uint32_t send_buffer_size, const std::chrono::steady_clock::time_point& max_blocking_time_point) { assert(shared_mem_segment_); std::shared_ptr<SharedMemManager::Buffer> shared_buffer = shared_mem_segment_->alloc_buffer(send_buffer_size, max_blocking_time_point); memcpy(shared_buffer->data(), send_buffer, send_buffer_size); return shared_buffer; } bool SharedMemTransport::send( const octet* send_buffer, uint32_t send_buffer_size, fastrtps::rtps::LocatorsIterator* destination_locators_begin, fastrtps::rtps::LocatorsIterator* destination_locators_end, const std::chrono::steady_clock::time_point& max_blocking_time_point) { fastrtps::rtps::LocatorsIterator& it = *destination_locators_begin; bool ret = true; std::shared_ptr<SharedMemManager::Buffer> shared_buffer; try { while (it != *destination_locators_end) { if (IsLocatorSupported(*it)) { // Only copy the first time if (shared_buffer == nullptr) { shared_buffer = copy_to_shared_buffer(send_buffer, send_buffer_size, max_blocking_time_point); } ret &= send(shared_buffer, *it); if (packet_logger_ && ret) { packet_logger_->QueueLog({packet_logger_->now(), Locator_t(), * it, shared_buffer}); } } ++it; } } catch (const std::exception& e) { logWarning(RTPS_MSG_OUT, e.what()); // Segment overflow with discard policy doesn't return error. if (!shared_buffer && configuration_.segment_overflow_policy() == SharedMemTransportDescriptor::OverflowPolicy::DISCARD) { ret = true; } else { ret = false; } } return ret; } std::shared_ptr<SharedMemManager::Port> SharedMemTransport::find_port( uint32_t port_id) { try { return opened_ports_.at(port_id); } catch (const std::out_of_range&) { // The port is not opened std::shared_ptr<SharedMemManager::Port> port = shared_mem_manager_-> open_port(port_id, configuration_.port_queue_capacity(), configuration_.healthy_check_timeout_ms(), SharedMemGlobal::Port::OpenMode::Write); opened_ports_[port_id] = port; return port; } } bool SharedMemTransport::push_discard( const std::shared_ptr<SharedMemManager::Buffer>& buffer, const Locator_t& remote_locator) { try { if (!find_port(remote_locator.port)->try_push(buffer)) { logWarning(RTPS_MSG_OUT, "Port " << remote_locator.port << " full. Buffer dropped"); } } catch (const std::exception& error) { logWarning(RTPS_MSG_OUT, error.what()); return false; } return true; } bool SharedMemTransport::push_fail( const std::shared_ptr<SharedMemManager::Buffer>& buffer, const Locator_t& remote_locator) { try { return find_port(remote_locator.port)->try_push(buffer); } catch (const std::exception& error) { logWarning(RTPS_MSG_OUT, error.what()); return false; } } bool SharedMemTransport::send( const std::shared_ptr<SharedMemManager::Buffer>& buffer, const Locator_t& remote_locator) { if (!push_lambda_(*this, buffer, remote_locator)) { return false; } logInfo(RTPS_MSG_OUT, "(ID:" << std::this_thread::get_id() <<") " << "SharedMemTransport: " << buffer->size() << " bytes to port " << remote_locator.port); return true; } /** * Invalidate all selector entries containing certain multicast locator. * * This function will process all entries from 'index' onwards and, if any * of them has 'locator' on its multicast list, will invalidate them * (i.e. their 'transport_should_process' flag will be changed to false). * * If this function returns true, the locator received should be selected. * * @param entries Selector entries collection to process * @param index Starting index to process * @param locator Locator to be searched * * @return true when at least one entry was invalidated, false otherwise */ void SharedMemTransport::select_locators( LocatorSelector& selector) const { fastrtps::ResourceLimitedVector<LocatorSelectorEntry*>& entries = selector.transport_starts(); for (size_t i = 0; i < entries.size(); ++i) { LocatorSelectorEntry* entry = entries[i]; if (entry->transport_should_process) { bool selected = false; // With shared-memory transport using multicast vs unicast is not an advantage // because no copies are saved. So no multicast locators are selected for (size_t j = 0; j < entry->unicast.size(); ++j) { if (IsLocatorSupported(entry->unicast[j]) && !selector.is_selected(entry->unicast[j])) { entry->state.unicast.push_back(j); selected = true; } } // Select this entry if necessary if (selected) { selector.select(i); } } } } bool SharedMemTransport::fillMetatrafficMulticastLocator( Locator_t& locator, uint32_t metatraffic_multicast_port) const { if (locator.port == 0) { locator.port = metatraffic_multicast_port; } return true; } bool SharedMemTransport::fillMetatrafficUnicastLocator( Locator_t& locator, uint32_t metatraffic_unicast_port) const { if (locator.port == 0) { locator.port = metatraffic_unicast_port; } return true; } bool SharedMemTransport::configureInitialPeerLocator( Locator_t& locator, const PortParameters& port_params, uint32_t domainId, LocatorList_t& list) const { if (locator.port == 0) { for (uint32_t i = 0; i < configuration()->maxInitialPeersRange; ++i) { Locator_t auxloc(locator); auxloc.port = port_params.getUnicastPort(domainId, i); list.push_back(auxloc); } } else { list.push_back(locator); } return true; } bool SharedMemTransport::fillUnicastLocator( Locator_t& locator, uint32_t well_known_port) const { if (locator.port == 0) { locator.port = well_known_port; } return true; }
1
18,071
I thought we were removing the FAIL policy altogether....
eProsima-Fast-DDS
cpp
@@ -14,7 +14,7 @@ def txt2tags_actionFunc(target,source,env): import txt2tags - txt2tags.exec_command_line([str(source[0])]) + txt2tags.exec_command_line(["--outfile", str(source[0])[:-3] + "html", str(source[0])]) def exists(env): try:
1
### #This file is a part of the NVDA project. #URL: http://www.nvda-project.org/ #Copyright 2010 James Teh <[email protected]>. #This program is free software: you can redistribute it and/or modify #it under the terms of the GNU General Public License version 2.0, as published by #the Free Software Foundation. #This program is distributed in the hope that it will be useful, #but WITHOUT ANY WARRANTY; without even the implied warranty of #MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. #This license can be found at: #http://www.gnu.org/licenses/old-licenses/gpl-2.0.html ### def txt2tags_actionFunc(target,source,env): import txt2tags txt2tags.exec_command_line([str(source[0])]) def exists(env): try: import txt2tags return True except ImportError: return False def generate(env): env['BUILDERS']['txt2tags']=env.Builder( action=env.Action(txt2tags_actionFunc,lambda t,s,e: 'Converting %s to html'%s[0].path), suffix='.html', src_suffix='.t2t' )
1
22,407
Could you elaborate on why you made this change?
nvaccess-nvda
py
@@ -374,6 +374,9 @@ TEST(Scanner, Basic) { CHECK_SEMANTIC_VALUE("\"\\\\\\\110 \"", TokenType::STRING, "\\H "), CHECK_SEMANTIC_VALUE("\"\\\\\\\\110 \"", TokenType::STRING, "\\\\110 "), CHECK_SEMANTIC_VALUE("\"\\\\\\\\\110 \"", TokenType::STRING, "\\\\H "), + + + CHECK_SEMANTIC_VALUE("\"己所不欲,勿施于人\"", TokenType::STRING, "己所不欲,勿施于人"), }; #undef CHECK_SEMANTIC_TYPE #undef CHECK_SEMANTIC_VALUE
1
/* Copyright (c) 2018 vesoft inc. All rights reserved. * * This source code is licensed under Apache 2.0 License, * attached with Common Clause Condition 1.0, found in the LICENSES directory. */ #include "base/Base.h" #include <gtest/gtest.h> #include <sstream> #include <vector> #include <utility> #include "parser/GraphParser.hpp" #include "parser/GraphScanner.h" using testing::AssertionFailure; using testing::AssertionSuccess; using testing::AssertionResult; namespace nebula { using semantic_type = nebula::GraphParser::semantic_type; static auto checkSemanticValue(const char *expected, semantic_type *sv) { auto actual = *sv->strval; delete sv->strval; if (expected != actual) { return AssertionFailure() << "Semantic value not match, " << "expected: " << expected << ", actual: " << actual; } return AssertionSuccess(); } static auto checkSemanticValue(bool expected, semantic_type *sv) { auto actual = sv->boolval; if (expected != actual) { return AssertionFailure() << "Semantic value not match, " << "expected: " << expected << ", actual: " << actual; } return AssertionSuccess(); } template <typename T> static std::enable_if_t<std::is_integral<T>::value, AssertionResult> checkSemanticValue(T expected, semantic_type *sv) { auto actual = static_cast<T>(sv->intval); if (expected != actual) { return AssertionFailure() << "Semantic value not match, " << "expected: " << expected << ", actual: " << actual; } return AssertionSuccess(); } template <typename T> static std::enable_if_t<std::is_floating_point<T>::value, AssertionResult> checkSemanticValue(T expected, semantic_type *sv) { auto actual = static_cast<T>(sv->doubleval); if (expected != actual) { return AssertionFailure() << "Semantic value not match, " << "expected: " << expected << ", actual: " << actual; } return AssertionSuccess(); } TEST(Scanner, Basic) { using TokenType = nebula::GraphParser::token_type; using Validator = std::function<::testing::AssertionResult()>; nebula::GraphParser::semantic_type yylval; nebula::GraphParser::location_type yyloc; GraphScanner scanner; std::string stream; #define CHECK_SEMANTIC_TYPE(STR, TYPE) \ (stream += " ", stream += STR, [&] () { \ auto actual = scanner.yylex(&yylval, &yyloc); \ if (actual != TYPE) { \ return AssertionFailure() << "Token type not match for `" \ << STR << "', expected: " \ << static_cast<int>(TYPE) \ << ", actual: " \ << static_cast<int>(actual); \ } else { \ return AssertionSuccess(); \ } \ }) #define CHECK_SEMANTIC_VALUE(STR, TYPE, value) \ (stream += " ", stream += STR, [&] () { \ auto actual = scanner.yylex(&yylval, &yyloc); \ if (actual != TYPE) { \ return AssertionFailure() << "Token type not match for `" \ << STR << "', expected: " \ << static_cast<int>(TYPE) \ << ", actual: " \ << static_cast<int>(actual); \ } else { \ return checkSemanticValue(value, &yylval); \ } \ }) std::vector<Validator> validators = { CHECK_SEMANTIC_TYPE(".", TokenType::DOT), CHECK_SEMANTIC_TYPE(",", TokenType::COMMA), CHECK_SEMANTIC_TYPE(":", TokenType::COLON), CHECK_SEMANTIC_TYPE(";", TokenType::SEMICOLON), CHECK_SEMANTIC_TYPE("+", TokenType::PLUS), CHECK_SEMANTIC_TYPE("-", TokenType::MINUS), CHECK_SEMANTIC_TYPE("*", TokenType::MUL), CHECK_SEMANTIC_TYPE("/", TokenType::DIV), CHECK_SEMANTIC_TYPE("%", TokenType::MOD), CHECK_SEMANTIC_TYPE("!", TokenType::NOT), CHECK_SEMANTIC_TYPE("@", TokenType::AT), CHECK_SEMANTIC_TYPE("<", TokenType::LT), CHECK_SEMANTIC_TYPE("<=", TokenType::LE), CHECK_SEMANTIC_TYPE(">", TokenType::GT), CHECK_SEMANTIC_TYPE(">=", TokenType::GE), CHECK_SEMANTIC_TYPE("==", TokenType::EQ), CHECK_SEMANTIC_TYPE("!=", TokenType::NE), CHECK_SEMANTIC_TYPE("||", TokenType::OR), CHECK_SEMANTIC_TYPE("&&", TokenType::AND), CHECK_SEMANTIC_TYPE("|", TokenType::PIPE), CHECK_SEMANTIC_TYPE("=", TokenType::ASSIGN), CHECK_SEMANTIC_TYPE("(", TokenType::L_PAREN), CHECK_SEMANTIC_TYPE(")", TokenType::R_PAREN), CHECK_SEMANTIC_TYPE("[", TokenType::L_BRACKET), CHECK_SEMANTIC_TYPE("]", TokenType::R_BRACKET), CHECK_SEMANTIC_TYPE("{", TokenType::L_BRACE), CHECK_SEMANTIC_TYPE("}", TokenType::R_BRACE), CHECK_SEMANTIC_TYPE("<-", TokenType::L_ARROW), CHECK_SEMANTIC_TYPE("->", TokenType::R_ARROW), CHECK_SEMANTIC_TYPE("$-", TokenType::INPUT_REF), CHECK_SEMANTIC_TYPE("$^", TokenType::SRC_REF), CHECK_SEMANTIC_TYPE("$$", TokenType::DST_REF), CHECK_SEMANTIC_TYPE("GO", TokenType::KW_GO), CHECK_SEMANTIC_TYPE("go", TokenType::KW_GO), CHECK_SEMANTIC_TYPE("AS", TokenType::KW_AS), CHECK_SEMANTIC_TYPE("as", TokenType::KW_AS), CHECK_SEMANTIC_TYPE("TO", TokenType::KW_TO), CHECK_SEMANTIC_TYPE("to", TokenType::KW_TO), CHECK_SEMANTIC_TYPE("USE", TokenType::KW_USE), CHECK_SEMANTIC_TYPE("use", TokenType::KW_USE), CHECK_SEMANTIC_TYPE("SET", TokenType::KW_SET), CHECK_SEMANTIC_TYPE("set", TokenType::KW_SET), CHECK_SEMANTIC_TYPE("FROM", TokenType::KW_FROM), CHECK_SEMANTIC_TYPE("from", TokenType::KW_FROM), CHECK_SEMANTIC_TYPE("WHERE", TokenType::KW_WHERE), CHECK_SEMANTIC_TYPE("where", TokenType::KW_WHERE), CHECK_SEMANTIC_TYPE("MATCH", TokenType::KW_MATCH), CHECK_SEMANTIC_TYPE("match", TokenType::KW_MATCH), CHECK_SEMANTIC_TYPE("INSERT", TokenType::KW_INSERT), CHECK_SEMANTIC_TYPE("insert", TokenType::KW_INSERT), CHECK_SEMANTIC_TYPE("VALUES", TokenType::KW_VALUES), CHECK_SEMANTIC_TYPE("values", TokenType::KW_VALUES), CHECK_SEMANTIC_TYPE("YIELD", TokenType::KW_YIELD), CHECK_SEMANTIC_TYPE("yield", TokenType::KW_YIELD), CHECK_SEMANTIC_TYPE("RETURN", TokenType::KW_RETURN), CHECK_SEMANTIC_TYPE("return", TokenType::KW_RETURN), CHECK_SEMANTIC_TYPE("VERTEX", TokenType::KW_VERTEX), CHECK_SEMANTIC_TYPE("vertex", TokenType::KW_VERTEX), CHECK_SEMANTIC_TYPE("EDGE", TokenType::KW_EDGE), CHECK_SEMANTIC_TYPE("edge", TokenType::KW_EDGE), CHECK_SEMANTIC_TYPE("EDGES", TokenType::KW_EDGES), CHECK_SEMANTIC_TYPE("edges", TokenType::KW_EDGES), CHECK_SEMANTIC_TYPE("UPDATE", TokenType::KW_UPDATE), CHECK_SEMANTIC_TYPE("update", TokenType::KW_UPDATE), CHECK_SEMANTIC_TYPE("ALTER", TokenType::KW_ALTER), CHECK_SEMANTIC_TYPE("alter", TokenType::KW_ALTER), CHECK_SEMANTIC_TYPE("STEPS", TokenType::KW_STEPS), CHECK_SEMANTIC_TYPE("steps", TokenType::KW_STEPS), CHECK_SEMANTIC_TYPE("OVER", TokenType::KW_OVER), CHECK_SEMANTIC_TYPE("over", TokenType::KW_OVER), CHECK_SEMANTIC_TYPE("UPTO", TokenType::KW_UPTO), CHECK_SEMANTIC_TYPE("upto", TokenType::KW_UPTO), CHECK_SEMANTIC_TYPE("REVERSELY", TokenType::KW_REVERSELY), CHECK_SEMANTIC_TYPE("reversely", TokenType::KW_REVERSELY), CHECK_SEMANTIC_TYPE("SPACE", TokenType::KW_SPACE), CHECK_SEMANTIC_TYPE("space", TokenType::KW_SPACE), CHECK_SEMANTIC_TYPE("SPACES", TokenType::KW_SPACES), CHECK_SEMANTIC_TYPE("spaces", TokenType::KW_SPACES), CHECK_SEMANTIC_TYPE("BIGINT", TokenType::KW_BIGINT), CHECK_SEMANTIC_TYPE("bigint", TokenType::KW_BIGINT), CHECK_SEMANTIC_TYPE("DOUBLE", TokenType::KW_DOUBLE), CHECK_SEMANTIC_TYPE("double", TokenType::KW_DOUBLE), CHECK_SEMANTIC_TYPE("STRING", TokenType::KW_STRING), CHECK_SEMANTIC_TYPE("string", TokenType::KW_STRING), CHECK_SEMANTIC_TYPE("BOOL", TokenType::KW_BOOL), CHECK_SEMANTIC_TYPE("bool", TokenType::KW_BOOL), CHECK_SEMANTIC_TYPE("TAG", TokenType::KW_TAG), CHECK_SEMANTIC_TYPE("tag", TokenType::KW_TAG), CHECK_SEMANTIC_TYPE("TAGS", TokenType::KW_TAGS), CHECK_SEMANTIC_TYPE("tags", TokenType::KW_TAGS), CHECK_SEMANTIC_TYPE("UNION", TokenType::KW_UNION), CHECK_SEMANTIC_TYPE("union", TokenType::KW_UNION), CHECK_SEMANTIC_TYPE("INTERSECT", TokenType::KW_INTERSECT), CHECK_SEMANTIC_TYPE("intersect", TokenType::KW_INTERSECT), CHECK_SEMANTIC_TYPE("MINUS", TokenType::KW_MINUS), CHECK_SEMANTIC_TYPE("minus", TokenType::KW_MINUS), CHECK_SEMANTIC_TYPE("SHOW", TokenType::KW_SHOW), CHECK_SEMANTIC_TYPE("show", TokenType::KW_SHOW), CHECK_SEMANTIC_TYPE("Show", TokenType::KW_SHOW), CHECK_SEMANTIC_TYPE("ADD", TokenType::KW_ADD), CHECK_SEMANTIC_TYPE("add", TokenType::KW_ADD), CHECK_SEMANTIC_TYPE("Add", TokenType::KW_ADD), CHECK_SEMANTIC_TYPE("HOSTS", TokenType::KW_HOSTS), CHECK_SEMANTIC_TYPE("hosts", TokenType::KW_HOSTS), CHECK_SEMANTIC_TYPE("Hosts", TokenType::KW_HOSTS), CHECK_SEMANTIC_TYPE("TIMESTAMP", TokenType::KW_TIMESTAMP), CHECK_SEMANTIC_TYPE("timestamp", TokenType::KW_TIMESTAMP), CHECK_SEMANTIC_TYPE("Timestamp", TokenType::KW_TIMESTAMP), CHECK_SEMANTIC_TYPE("DELETE", TokenType::KW_DELETE), CHECK_SEMANTIC_TYPE("delete", TokenType::KW_DELETE), CHECK_SEMANTIC_TYPE("Delete", TokenType::KW_DELETE), CHECK_SEMANTIC_TYPE("FIND", TokenType::KW_FIND), CHECK_SEMANTIC_TYPE("find", TokenType::KW_FIND), CHECK_SEMANTIC_TYPE("Find", TokenType::KW_FIND), CHECK_SEMANTIC_TYPE("CREATE", TokenType::KW_CREATE), CHECK_SEMANTIC_TYPE("create", TokenType::KW_CREATE), CHECK_SEMANTIC_TYPE("Create", TokenType::KW_CREATE), CHECK_SEMANTIC_TYPE("PARTITION_NUM", TokenType::KW_PARTITION_NUM), CHECK_SEMANTIC_TYPE("partition_num", TokenType::KW_PARTITION_NUM), CHECK_SEMANTIC_TYPE("Partition_num", TokenType::KW_PARTITION_NUM), CHECK_SEMANTIC_TYPE("REPLICA_FACTOR", TokenType::KW_REPLICA_FACTOR), CHECK_SEMANTIC_TYPE("replica_factor", TokenType::KW_REPLICA_FACTOR), CHECK_SEMANTIC_TYPE("Replica_factor", TokenType::KW_REPLICA_FACTOR), CHECK_SEMANTIC_TYPE("DROP", TokenType::KW_DROP), CHECK_SEMANTIC_TYPE("drop", TokenType::KW_DROP), CHECK_SEMANTIC_TYPE("Drop", TokenType::KW_DROP), CHECK_SEMANTIC_TYPE("DESC", TokenType::KW_DESC), CHECK_SEMANTIC_TYPE("desc", TokenType::KW_DESC), CHECK_SEMANTIC_TYPE("Desc", TokenType::KW_DESC), CHECK_SEMANTIC_TYPE("DESCRIBE", TokenType::KW_DESCRIBE), CHECK_SEMANTIC_TYPE("describe", TokenType::KW_DESCRIBE), CHECK_SEMANTIC_TYPE("Describe", TokenType::KW_DESCRIBE), CHECK_SEMANTIC_TYPE("REMOVE", TokenType::KW_REMOVE), CHECK_SEMANTIC_TYPE("remove", TokenType::KW_REMOVE), CHECK_SEMANTIC_TYPE("Remove", TokenType::KW_REMOVE), CHECK_SEMANTIC_TYPE("IF", TokenType::KW_IF), CHECK_SEMANTIC_TYPE("If", TokenType::KW_IF), CHECK_SEMANTIC_TYPE("if", TokenType::KW_IF), CHECK_SEMANTIC_TYPE("NOT", TokenType::KW_NOT), CHECK_SEMANTIC_TYPE("Not", TokenType::KW_NOT), CHECK_SEMANTIC_TYPE("not", TokenType::KW_NOT), CHECK_SEMANTIC_TYPE("EXISTS", TokenType::KW_EXISTS), CHECK_SEMANTIC_TYPE("Exists", TokenType::KW_EXISTS), CHECK_SEMANTIC_TYPE("exists", TokenType::KW_EXISTS), CHECK_SEMANTIC_TYPE("WITH", TokenType::KW_WITH), CHECK_SEMANTIC_TYPE("With", TokenType::KW_WITH), CHECK_SEMANTIC_TYPE("with", TokenType::KW_WITH), CHECK_SEMANTIC_TYPE("FIRSTNAME", TokenType::KW_FIRSTNAME), CHECK_SEMANTIC_TYPE("Firstname", TokenType::KW_FIRSTNAME), CHECK_SEMANTIC_TYPE("FirstName", TokenType::KW_FIRSTNAME), CHECK_SEMANTIC_TYPE("firstname", TokenType::KW_FIRSTNAME), CHECK_SEMANTIC_TYPE("LASTNAME", TokenType::KW_LASTNAME), CHECK_SEMANTIC_TYPE("Lastname", TokenType::KW_LASTNAME), CHECK_SEMANTIC_TYPE("LastName", TokenType::KW_LASTNAME), CHECK_SEMANTIC_TYPE("lastname", TokenType::KW_LASTNAME), CHECK_SEMANTIC_TYPE("EMAIL", TokenType::KW_EMAIL), CHECK_SEMANTIC_TYPE("Email", TokenType::KW_EMAIL), CHECK_SEMANTIC_TYPE("email", TokenType::KW_EMAIL), CHECK_SEMANTIC_TYPE("PHONE", TokenType::KW_PHONE), CHECK_SEMANTIC_TYPE("Phone", TokenType::KW_PHONE), CHECK_SEMANTIC_TYPE("phone", TokenType::KW_PHONE), CHECK_SEMANTIC_TYPE("USER", TokenType::KW_USER), CHECK_SEMANTIC_TYPE("User", TokenType::KW_USER), CHECK_SEMANTIC_TYPE("user", TokenType::KW_USER), CHECK_SEMANTIC_TYPE("USERS", TokenType::KW_USERS), CHECK_SEMANTIC_TYPE("Users", TokenType::KW_USERS), CHECK_SEMANTIC_TYPE("users", TokenType::KW_USERS), CHECK_SEMANTIC_TYPE("PASSWORD", TokenType::KW_PASSWORD), CHECK_SEMANTIC_TYPE("Password", TokenType::KW_PASSWORD), CHECK_SEMANTIC_TYPE("password", TokenType::KW_PASSWORD), CHECK_SEMANTIC_TYPE("CHANGE", TokenType::KW_CHANGE), CHECK_SEMANTIC_TYPE("Change", TokenType::KW_CHANGE), CHECK_SEMANTIC_TYPE("change", TokenType::KW_CHANGE), CHECK_SEMANTIC_TYPE("ROLE", TokenType::KW_ROLE), CHECK_SEMANTIC_TYPE("Role", TokenType::KW_ROLE), CHECK_SEMANTIC_TYPE("role", TokenType::KW_ROLE), CHECK_SEMANTIC_TYPE("GOD", TokenType::KW_GOD), CHECK_SEMANTIC_TYPE("God", TokenType::KW_GOD), CHECK_SEMANTIC_TYPE("god", TokenType::KW_GOD), CHECK_SEMANTIC_TYPE("ADMIN", TokenType::KW_ADMIN), CHECK_SEMANTIC_TYPE("Admin", TokenType::KW_ADMIN), CHECK_SEMANTIC_TYPE("admin", TokenType::KW_ADMIN), CHECK_SEMANTIC_TYPE("GUEST", TokenType::KW_GUEST), CHECK_SEMANTIC_TYPE("Guest", TokenType::KW_GUEST), CHECK_SEMANTIC_TYPE("guest", TokenType::KW_GUEST), CHECK_SEMANTIC_TYPE("GRANT", TokenType::KW_GRANT), CHECK_SEMANTIC_TYPE("Grant", TokenType::KW_GRANT), CHECK_SEMANTIC_TYPE("grant", TokenType::KW_GRANT), CHECK_SEMANTIC_TYPE("REVOKE", TokenType::KW_REVOKE), CHECK_SEMANTIC_TYPE("Revoke", TokenType::KW_REVOKE), CHECK_SEMANTIC_TYPE("revoke", TokenType::KW_REVOKE), CHECK_SEMANTIC_TYPE("ON", TokenType::KW_ON), CHECK_SEMANTIC_TYPE("On", TokenType::KW_ON), CHECK_SEMANTIC_TYPE("on", TokenType::KW_ON), CHECK_SEMANTIC_TYPE("ROLES", TokenType::KW_ROLES), CHECK_SEMANTIC_TYPE("Roles", TokenType::KW_ROLES), CHECK_SEMANTIC_TYPE("BY", TokenType::KW_BY), CHECK_SEMANTIC_TYPE("By", TokenType::KW_BY), CHECK_SEMANTIC_TYPE("by", TokenType::KW_BY), CHECK_SEMANTIC_TYPE("IN", TokenType::KW_IN), CHECK_SEMANTIC_TYPE("In", TokenType::KW_IN), CHECK_SEMANTIC_TYPE("TTL_DURATION", TokenType::KW_TTL_DURATION), CHECK_SEMANTIC_TYPE("ttl_duration", TokenType::KW_TTL_DURATION), CHECK_SEMANTIC_TYPE("Ttl_duration", TokenType::KW_TTL_DURATION), CHECK_SEMANTIC_TYPE("TTL_COL", TokenType::KW_TTL_COL), CHECK_SEMANTIC_TYPE("ttl_col", TokenType::KW_TTL_COL), CHECK_SEMANTIC_TYPE("Ttl_col", TokenType::KW_TTL_COL), CHECK_SEMANTIC_TYPE("ORDER", TokenType::KW_ORDER), CHECK_SEMANTIC_TYPE("Order", TokenType::KW_ORDER), CHECK_SEMANTIC_TYPE("order", TokenType::KW_ORDER), CHECK_SEMANTIC_TYPE("ASC", TokenType::KW_ASC), CHECK_SEMANTIC_TYPE("Asc", TokenType::KW_ASC), CHECK_SEMANTIC_TYPE("asc", TokenType::KW_ASC), CHECK_SEMANTIC_TYPE("_type", TokenType::TYPE_PROP), CHECK_SEMANTIC_TYPE("_id", TokenType::ID_PROP), CHECK_SEMANTIC_TYPE("_src", TokenType::SRC_ID_PROP), CHECK_SEMANTIC_TYPE("_dst", TokenType::DST_ID_PROP), CHECK_SEMANTIC_TYPE("_rank", TokenType::RANK_PROP), CHECK_SEMANTIC_VALUE("TRUE", TokenType::BOOL, true), CHECK_SEMANTIC_VALUE("true", TokenType::BOOL, true), CHECK_SEMANTIC_VALUE("FALSE", TokenType::BOOL, false), CHECK_SEMANTIC_VALUE("false", TokenType::BOOL, false), CHECK_SEMANTIC_VALUE("$var", TokenType::VARIABLE, "var"), CHECK_SEMANTIC_VALUE("$var123", TokenType::VARIABLE, "var123"), CHECK_SEMANTIC_VALUE("label", TokenType::LABEL, "label"), CHECK_SEMANTIC_VALUE("label123", TokenType::LABEL, "label123"), CHECK_SEMANTIC_VALUE("123", TokenType::INTEGER, 123), CHECK_SEMANTIC_VALUE("0x123", TokenType::INTEGER, 0x123), CHECK_SEMANTIC_VALUE("0xdeadbeef", TokenType::INTEGER, 0xdeadbeef), CHECK_SEMANTIC_VALUE("0123", TokenType::INTEGER, 0123), CHECK_SEMANTIC_VALUE("123.", TokenType::DOUBLE, 123.), CHECK_SEMANTIC_VALUE(".123", TokenType::DOUBLE, 0.123), CHECK_SEMANTIC_VALUE("123.456", TokenType::DOUBLE, 123.456), CHECK_SEMANTIC_VALUE("127.0.0.1", TokenType::IPV4, 0x7F000001), CHECK_SEMANTIC_VALUE("\"Hello\"", TokenType::STRING, "Hello"), CHECK_SEMANTIC_VALUE("\"Hello\\\\\"", TokenType::STRING, "Hello\\"), CHECK_SEMANTIC_VALUE("\"He\\nllo\"", TokenType::STRING, "He\nllo"), CHECK_SEMANTIC_VALUE("\"He\\\nllo\"", TokenType::STRING, "He\nllo"), CHECK_SEMANTIC_VALUE("\"\\\"Hello\\\"\"", TokenType::STRING, "\"Hello\""), // escape Normal character CHECK_SEMANTIC_VALUE("\"Hell\\o\"", TokenType::STRING, "Hello"), CHECK_SEMANTIC_VALUE("\"Hell\\\\o\"", TokenType::STRING, "Hell\\o"), CHECK_SEMANTIC_VALUE("\"Hell\\\\\\o\"", TokenType::STRING, "Hell\\o"), CHECK_SEMANTIC_VALUE("\"\\110ello\"", TokenType::STRING, "Hello"), CHECK_SEMANTIC_VALUE("\"\110ello\"", TokenType::STRING, "Hello"), CHECK_SEMANTIC_VALUE("\"\110 \"", TokenType::STRING, "H "), CHECK_SEMANTIC_VALUE("\"\\110 \"", TokenType::STRING, "H "), CHECK_SEMANTIC_VALUE("\"\\\110 \"", TokenType::STRING, "H "), CHECK_SEMANTIC_VALUE("\"\\\\110 \"", TokenType::STRING, "\\110 "), CHECK_SEMANTIC_VALUE("\"\\\\\110 \"", TokenType::STRING, "\\H "), CHECK_SEMANTIC_VALUE("\"\\\\\\110 \"", TokenType::STRING, "\\H "), CHECK_SEMANTIC_VALUE("\"\\\\\\\110 \"", TokenType::STRING, "\\H "), CHECK_SEMANTIC_VALUE("\"\\\\\\\\110 \"", TokenType::STRING, "\\\\110 "), CHECK_SEMANTIC_VALUE("\"\\\\\\\\\110 \"", TokenType::STRING, "\\\\H "), }; #undef CHECK_SEMANTIC_TYPE #undef CHECK_SEMANTIC_VALUE auto input = [&] (char *buf, int maxSize) { static int copied = 0; int left = stream.size() - copied; if (left == 0) { return 0; } int n = left < maxSize ? left : maxSize; ::memcpy(buf, &stream[copied], n); copied += n; return n; }; scanner.setReadBuffer(input); for (auto &item : validators) { ASSERT_TRUE(item()); } } } // namespace nebula
1
19,456
before the fix, does this sentence make service crash? My point is can you re-produce the problem. I'm not sure the bug is due to non-asiic code
vesoft-inc-nebula
cpp
@@ -86,7 +86,7 @@ func repoAssignment() macaron.Handler { // Contexter middleware already checks token for user sign in process. func reqToken() macaron.Handler { return func(c *context.Context) { - if !c.IsLogged { + if true != c.Data["IsApiToken"] { c.Error(401) return }
1
// Copyright 2015 The Gogs Authors. All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. package v1 import ( "strings" "github.com/go-macaron/binding" "gopkg.in/macaron.v1" api "github.com/gogs/go-gogs-client" "github.com/gogs/gogs/models" "github.com/gogs/gogs/models/errors" "github.com/gogs/gogs/pkg/context" "github.com/gogs/gogs/pkg/form" "github.com/gogs/gogs/routes/api/v1/admin" "github.com/gogs/gogs/routes/api/v1/misc" "github.com/gogs/gogs/routes/api/v1/org" "github.com/gogs/gogs/routes/api/v1/repo" "github.com/gogs/gogs/routes/api/v1/user" ) func repoAssignment() macaron.Handler { return func(c *context.APIContext) { userName := c.Params(":username") repoName := c.Params(":reponame") var ( owner *models.User err error ) // Check if the user is the same as the repository owner. if c.IsLogged && c.User.LowerName == strings.ToLower(userName) { owner = c.User } else { owner, err = models.GetUserByName(userName) if err != nil { if errors.IsUserNotExist(err) { c.Status(404) } else { c.Error(500, "GetUserByName", err) } return } } c.Repo.Owner = owner // Get repository. repo, err := models.GetRepositoryByName(owner.ID, repoName) if err != nil { if errors.IsRepoNotExist(err) { c.Status(404) } else { c.Error(500, "GetRepositoryByName", err) } return } else if err = repo.GetOwner(); err != nil { c.Error(500, "GetOwner", err) return } if c.IsLogged && c.User.IsAdmin { c.Repo.AccessMode = models.ACCESS_MODE_OWNER } else { mode, err := models.AccessLevel(c.User.ID, repo) if err != nil { c.Error(500, "AccessLevel", err) return } c.Repo.AccessMode = mode } if !c.Repo.HasAccess() { c.Status(404) return } c.Repo.Repository = repo } } // Contexter middleware already checks token for user sign in process. func reqToken() macaron.Handler { return func(c *context.Context) { if !c.IsLogged { c.Error(401) return } } } func reqBasicAuth() macaron.Handler { return func(c *context.Context) { if !c.IsBasicAuth { c.Error(401) return } } } func reqAdmin() macaron.Handler { return func(c *context.Context) { if !c.IsLogged || !c.User.IsAdmin { c.Error(403) return } } } func reqRepoWriter() macaron.Handler { return func(c *context.Context) { if !c.Repo.IsWriter() { c.Error(403) return } } } func orgAssignment(args ...bool) macaron.Handler { var ( assignOrg bool assignTeam bool ) if len(args) > 0 { assignOrg = args[0] } if len(args) > 1 { assignTeam = args[1] } return func(c *context.APIContext) { c.Org = new(context.APIOrganization) var err error if assignOrg { c.Org.Organization, err = models.GetUserByName(c.Params(":orgname")) if err != nil { if errors.IsUserNotExist(err) { c.Status(404) } else { c.Error(500, "GetUserByName", err) } return } } if assignTeam { c.Org.Team, err = models.GetTeamByID(c.ParamsInt64(":teamid")) if err != nil { if errors.IsUserNotExist(err) { c.Status(404) } else { c.Error(500, "GetTeamById", err) } return } } } } func mustEnableIssues(c *context.APIContext) { if !c.Repo.Repository.EnableIssues || c.Repo.Repository.EnableExternalTracker { c.Status(404) return } } // RegisterRoutes registers all v1 APIs routes to web application. // FIXME: custom form error response func RegisterRoutes(m *macaron.Macaron) { bind := binding.Bind m.Group("/v1", func() { // Handle preflight OPTIONS request m.Options("/*", func() {}) // Miscellaneous m.Post("/markdown", bind(api.MarkdownOption{}), misc.Markdown) m.Post("/markdown/raw", misc.MarkdownRaw) // Users m.Group("/users", func() { m.Get("/search", user.Search) m.Group("/:username", func() { m.Get("", user.GetInfo) m.Group("/tokens", func() { m.Combo("").Get(user.ListAccessTokens). Post(bind(api.CreateAccessTokenOption{}), user.CreateAccessToken) }, reqBasicAuth()) }) }) m.Group("/users", func() { m.Group("/:username", func() { m.Get("/keys", user.ListPublicKeys) m.Get("/followers", user.ListFollowers) m.Group("/following", func() { m.Get("", user.ListFollowing) m.Get("/:target", user.CheckFollowing) }) }) }, reqToken()) m.Group("/user", func() { m.Get("", user.GetAuthenticatedUser) m.Combo("/emails").Get(user.ListEmails). Post(bind(api.CreateEmailOption{}), user.AddEmail). Delete(bind(api.CreateEmailOption{}), user.DeleteEmail) m.Get("/followers", user.ListMyFollowers) m.Group("/following", func() { m.Get("", user.ListMyFollowing) m.Combo("/:username").Get(user.CheckMyFollowing).Put(user.Follow).Delete(user.Unfollow) }) m.Group("/keys", func() { m.Combo("").Get(user.ListMyPublicKeys). Post(bind(api.CreateKeyOption{}), user.CreatePublicKey) m.Combo("/:id").Get(user.GetPublicKey). Delete(user.DeletePublicKey) }) m.Combo("/issues").Get(repo.ListUserIssues) }, reqToken()) // Repositories m.Get("/users/:username/repos", reqToken(), repo.ListUserRepositories) m.Get("/orgs/:org/repos", reqToken(), repo.ListOrgRepositories) m.Combo("/user/repos", reqToken()).Get(repo.ListMyRepos). Post(bind(api.CreateRepoOption{}), repo.Create) m.Post("/org/:org/repos", reqToken(), bind(api.CreateRepoOption{}), repo.CreateOrgRepo) m.Group("/repos", func() { m.Get("/search", repo.Search) }) m.Group("/repos", func() { m.Post("/migrate", bind(form.MigrateRepo{}), repo.Migrate) m.Combo("/:username/:reponame", repoAssignment()).Get(repo.Get). Delete(repo.Delete) m.Group("/:username/:reponame", func() { m.Group("/hooks", func() { m.Combo("").Get(repo.ListHooks). Post(bind(api.CreateHookOption{}), repo.CreateHook) m.Combo("/:id").Patch(bind(api.EditHookOption{}), repo.EditHook). Delete(repo.DeleteHook) }) m.Group("/collaborators", func() { m.Get("", repo.ListCollaborators) m.Combo("/:collaborator").Get(repo.IsCollaborator).Put(bind(api.AddCollaboratorOption{}), repo.AddCollaborator). Delete(repo.DeleteCollaborator) }) m.Get("/raw/*", context.RepoRef(), repo.GetRawFile) m.Get("/archive/*", repo.GetArchive) m.Get("/forks", repo.ListForks) m.Group("/branches", func() { m.Get("", repo.ListBranches) m.Get("/*", repo.GetBranch) }) m.Group("/keys", func() { m.Combo("").Get(repo.ListDeployKeys). Post(bind(api.CreateKeyOption{}), repo.CreateDeployKey) m.Combo("/:id").Get(repo.GetDeployKey). Delete(repo.DeleteDeploykey) }) m.Group("/issues", func() { m.Combo("").Get(repo.ListIssues).Post(bind(api.CreateIssueOption{}), repo.CreateIssue) m.Group("/comments", func() { m.Get("", repo.ListRepoIssueComments) m.Combo("/:id").Patch(bind(api.EditIssueCommentOption{}), repo.EditIssueComment) }) m.Group("/:index", func() { m.Combo("").Get(repo.GetIssue).Patch(bind(api.EditIssueOption{}), repo.EditIssue) m.Group("/comments", func() { m.Combo("").Get(repo.ListIssueComments).Post(bind(api.CreateIssueCommentOption{}), repo.CreateIssueComment) m.Combo("/:id").Patch(bind(api.EditIssueCommentOption{}), repo.EditIssueComment). Delete(repo.DeleteIssueComment) }) m.Group("/labels", func() { m.Combo("").Get(repo.ListIssueLabels). Post(bind(api.IssueLabelsOption{}), repo.AddIssueLabels). Put(bind(api.IssueLabelsOption{}), repo.ReplaceIssueLabels). Delete(repo.ClearIssueLabels) m.Delete("/:id", repo.DeleteIssueLabel) }) }) }, mustEnableIssues) m.Group("/labels", func() { m.Combo("").Get(repo.ListLabels). Post(bind(api.CreateLabelOption{}), repo.CreateLabel) m.Combo("/:id").Get(repo.GetLabel).Patch(bind(api.EditLabelOption{}), repo.EditLabel). Delete(repo.DeleteLabel) }) m.Group("/milestones", func() { m.Combo("").Get(repo.ListMilestones). Post(reqRepoWriter(), bind(api.CreateMilestoneOption{}), repo.CreateMilestone) m.Combo("/:id").Get(repo.GetMilestone). Patch(reqRepoWriter(), bind(api.EditMilestoneOption{}), repo.EditMilestone). Delete(reqRepoWriter(), repo.DeleteMilestone) }) m.Post("/mirror-sync", repo.MirrorSync) m.Get("/editorconfig/:filename", context.RepoRef(), repo.GetEditorconfig) }, repoAssignment()) }, reqToken()) m.Get("/issues", reqToken(), repo.ListUserIssues) // Organizations m.Combo("/user/orgs", reqToken()).Get(org.ListMyOrgs).Post(bind(api.CreateOrgOption{}), org.CreateMyOrg) m.Get("/users/:username/orgs", org.ListUserOrgs) m.Group("/orgs/:orgname", func() { m.Combo("").Get(org.Get).Patch(bind(api.EditOrgOption{}), org.Edit) m.Combo("/teams").Get(org.ListTeams) }, orgAssignment(true)) m.Any("/*", func(c *context.Context) { c.Error(404) }) m.Group("/admin", func() { m.Group("/users", func() { m.Post("", bind(api.CreateUserOption{}), admin.CreateUser) m.Group("/:username", func() { m.Combo("").Patch(bind(api.EditUserOption{}), admin.EditUser). Delete(admin.DeleteUser) m.Post("/keys", bind(api.CreateKeyOption{}), admin.CreatePublicKey) m.Post("/orgs", bind(api.CreateOrgOption{}), admin.CreateOrg) m.Post("/repos", bind(api.CreateRepoOption{}), admin.CreateRepo) }) }) m.Group("/orgs/:orgname", func() { m.Group("/teams", func() { m.Post("", orgAssignment(true), bind(api.CreateTeamOption{}), admin.CreateTeam) }) }) m.Group("/teams", func() { m.Group("/:teamid", func() { m.Combo("/members/:username").Put(admin.AddTeamMember).Delete(admin.RemoveTeamMember) m.Combo("/repos/:reponame").Put(admin.AddTeamRepository).Delete(admin.RemoveTeamRepository) }, orgAssignment(false, true)) }) }, reqAdmin()) }, context.APIContexter()) }
1
13,007
This comparison looks a bit strange, we should first check existence of key "IsApiToken" and then check if it is equal to true. ~~Besdies, s/IsApiToken/IsAuthedByToken/~~
gogs-gogs
go
@@ -568,10 +568,12 @@ func (r *ReconcileClusterSync) applySyncSets( ObservedGeneration: syncSet.AsMetaObject().GetGeneration(), Result: hiveintv1alpha1.SuccessSyncSetResult, } - if syncSet.GetSpec().ResourceApplyMode == hivev1.SyncResourceApplyMode { + applyMode := syncSet.GetSpec().ResourceApplyMode + if applyMode == hivev1.SyncResourceApplyMode { newSyncStatus.ResourcesToDelete = resourcesApplied } - if syncSet.GetSpec().ResourceApplyMode == hivev1.UpsertResourceApplyMode && len(oldSyncStatus.ResourcesToDelete) > 0 { + // applyMode defaults to UpsertResourceApplyMode + if (applyMode == hivev1.UpsertResourceApplyMode || applyMode == "") && len(oldSyncStatus.ResourcesToDelete) > 0 { logger.Infof("resource apply mode is %v but there are resources to delete in clustersync status", hivev1.UpsertResourceApplyMode) oldSyncStatus.ResourcesToDelete = nil }
1
package clustersync import ( "context" "fmt" "math/big" "math/rand" "os" "reflect" "sort" "strconv" "strings" "time" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" log "github.com/sirupsen/logrus" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/json" "k8s.io/client-go/rest" "k8s.io/client-go/util/flowcontrol" "k8s.io/client-go/util/workqueue" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/metrics" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" "sigs.k8s.io/yaml" hivev1 "github.com/openshift/hive/apis/hive/v1" hiveintv1alpha1 "github.com/openshift/hive/apis/hiveinternal/v1alpha1" "github.com/openshift/hive/pkg/constants" hivemetrics "github.com/openshift/hive/pkg/controller/metrics" controllerutils "github.com/openshift/hive/pkg/controller/utils" "github.com/openshift/hive/pkg/remoteclient" "github.com/openshift/hive/pkg/resource" ) const ( ControllerName = hivev1.ClustersyncControllerName defaultReapplyInterval = 2 * time.Hour reapplyIntervalEnvKey = "SYNCSET_REAPPLY_INTERVAL" reapplyIntervalJitter = 0.1 secretAPIVersion = "v1" secretKind = "Secret" labelApply = "apply" labelCreateOrUpdate = "createOrUpdate" labelCreateOnly = "createOnly" metricResultSuccess = "success" metricResultError = "error" stsName = "hive-clustersync" ) var ( metricTimeToApplySyncSet = prometheus.NewHistogramVec( prometheus.HistogramOpts{ Name: "hive_syncset_apply_duration_seconds", Help: "Time to first successfully apply syncset to a cluster", Buckets: []float64{5, 10, 30, 60, 300, 600, 1800, 3600}, }, []string{"group"}, ) metricTimeToApplySelectorSyncSet = prometheus.NewHistogramVec( prometheus.HistogramOpts{ Name: "hive_selectorsyncset_apply_duration_seconds", Help: "Time to first successfully apply each SelectorSyncSet to a new cluster. Does not include results when cluster labels change post-install and new syncsets match.", Buckets: []float64{5, 10, 30, 60, 300, 600, 1800, 3600}, }, []string{"name"}, ) metricResourcesApplied = prometheus.NewCounterVec(prometheus.CounterOpts{ Name: "hive_syncsetinstance_resources_applied_total", Help: "Counter incremented each time we sync a resource to a remote cluster, labeled by type of apply and result.", }, []string{"type", "result"}, ) metricTimeToApplySyncSetResource = prometheus.NewHistogramVec( prometheus.HistogramOpts{ Name: "hive_syncsetinstance_apply_duration_seconds", Help: "Time to apply individual resources in a syncset, labeled by type of apply and result.", Buckets: []float64{0.5, 1, 3, 5, 10, 20, 30}, }, []string{"type", "result"}, ) metricTimeToApplySyncSets = prometheus.NewHistogram( prometheus.HistogramOpts{ Name: "hive_clustersync_first_success_duration_seconds", Help: "Time between cluster install complete and all syncsets applied.", Buckets: []float64{60, 300, 600, 1200, 1800, 2400, 3000, 3600}, }, ) ) func init() { metrics.Registry.MustRegister(metricTimeToApplySyncSet) metrics.Registry.MustRegister(metricTimeToApplySelectorSyncSet) metrics.Registry.MustRegister(metricResourcesApplied) metrics.Registry.MustRegister(metricTimeToApplySyncSetResource) metrics.Registry.MustRegister(metricTimeToApplySyncSets) } // Add creates a new clustersync Controller and adds it to the Manager with default RBAC. The Manager will set fields on the Controller // and Start it when the Manager is Started. func Add(mgr manager.Manager) error { logger := log.WithField("controller", ControllerName) concurrentReconciles, clientRateLimiter, queueRateLimiter, err := controllerutils.GetControllerConfig(mgr.GetClient(), ControllerName) if err != nil { logger.WithError(err).Error("could not get controller configurations") return err } r, err := NewReconciler(mgr, clientRateLimiter) if err != nil { return err } logger.Debug("Getting HIVE_CLUSTERSYNC_POD_NAME") podname, found := os.LookupEnv("HIVE_CLUSTERSYNC_POD_NAME") if !found { return errors.New("environment variable HIVE_CLUSTERSYNC_POD_NAME not set") } logger.Debug("Setting ordinalID") parts := strings.Split(podname, "-") ordinalIDStr := parts[len(parts)-1] ordinalID32, err := strconv.Atoi(ordinalIDStr) if err != nil { return errors.Wrap(err, "error converting ordinalID to int") } r.ordinalID = int64(ordinalID32) logger.WithField("ordinalID", r.ordinalID).Debug("ordinalID set") return AddToManager(mgr, r, concurrentReconciles, queueRateLimiter) } // NewReconciler returns a new ReconcileClusterSync func NewReconciler(mgr manager.Manager, rateLimiter flowcontrol.RateLimiter) (*ReconcileClusterSync, error) { logger := log.WithField("controller", ControllerName) reapplyInterval := defaultReapplyInterval if envReapplyInterval := os.Getenv(reapplyIntervalEnvKey); len(envReapplyInterval) > 0 { var err error reapplyInterval, err = time.ParseDuration(envReapplyInterval) if err != nil { log.WithError(err).WithField("reapplyInterval", envReapplyInterval).Errorf("unable to parse %s", reapplyIntervalEnvKey) return nil, err } } log.WithField("reapplyInterval", reapplyInterval).Info("Reapply interval set") c := controllerutils.NewClientWithMetricsOrDie(mgr, ControllerName, &rateLimiter) return &ReconcileClusterSync{ Client: c, logger: logger, reapplyInterval: reapplyInterval, resourceHelperBuilder: resourceHelperBuilderFunc, remoteClusterAPIClientBuilder: func(cd *hivev1.ClusterDeployment) remoteclient.Builder { return remoteclient.NewBuilder(c, cd, ControllerName) }, }, nil } func resourceHelperBuilderFunc(restConfig *rest.Config, fakeCluster bool, logger log.FieldLogger) (resource.Helper, error) { if fakeCluster { return resource.NewFakeHelper(logger), nil } return resource.NewHelperFromRESTConfig(restConfig, logger) } // AddToManager adds a new Controller to mgr with r as the reconcile.Reconciler func AddToManager(mgr manager.Manager, r *ReconcileClusterSync, concurrentReconciles int, rateLimiter workqueue.RateLimiter) error { // Create a new controller c, err := controller.New("clusterSync-controller", mgr, controller.Options{ Reconciler: r, MaxConcurrentReconciles: concurrentReconciles, RateLimiter: rateLimiter, }) if err != nil { return err } // Watch for changes to ClusterDeployment if err := c.Watch(&source.Kind{Type: &hivev1.ClusterDeployment{}}, &handler.EnqueueRequestForObject{}); err != nil { return err } // Watch for changes to SyncSets if err := c.Watch( &source.Kind{Type: &hivev1.SyncSet{}}, handler.EnqueueRequestsFromMapFunc(requestsForSyncSet)); err != nil { return err } // Watch for changes to SelectorSyncSets if err := c.Watch( &source.Kind{Type: &hivev1.SelectorSyncSet{}}, handler.EnqueueRequestsFromMapFunc(requestsForSelectorSyncSet(r.Client, r.logger))); err != nil { return err } return nil } func requestsForSyncSet(o client.Object) []reconcile.Request { ss, ok := o.(*hivev1.SyncSet) if !ok { return nil } requests := make([]reconcile.Request, len(ss.Spec.ClusterDeploymentRefs)) for i, cdRef := range ss.Spec.ClusterDeploymentRefs { requests[i].Namespace = ss.Namespace requests[i].Name = cdRef.Name } return requests } func requestsForSelectorSyncSet(c client.Client, logger log.FieldLogger) handler.MapFunc { return func(o client.Object) []reconcile.Request { sss, ok := o.(*hivev1.SelectorSyncSet) if !ok { return nil } logger := logger.WithField("selectorSyncSet", sss.Name) labelSelector, err := metav1.LabelSelectorAsSelector(&sss.Spec.ClusterDeploymentSelector) if err != nil { logger.WithError(err).Warn("cannot parse ClusterDeployment selector") return nil } cds := &hivev1.ClusterDeploymentList{} if err := c.List(context.Background(), cds, client.MatchingLabelsSelector{Selector: labelSelector}); err != nil { logger.WithError(err).Log(controllerutils.LogLevel(err), "could not list ClusterDeployments matching SelectorSyncSet") return nil } requests := make([]reconcile.Request, len(cds.Items)) for i, cd := range cds.Items { requests[i].Namespace = cd.Namespace requests[i].Name = cd.Name } return requests } } var _ reconcile.Reconciler = &ReconcileClusterSync{} // ReconcileClusterSync reconciles a ClusterDeployment object to apply its SyncSets and SelectorSyncSets type ReconcileClusterSync struct { client.Client logger log.FieldLogger reapplyInterval time.Duration resourceHelperBuilder func(*rest.Config, bool, log.FieldLogger) (resource.Helper, error) // remoteClusterAPIClientBuilder is a function pointer to the function that gets a builder for building a client // for the remote cluster's API server remoteClusterAPIClientBuilder func(cd *hivev1.ClusterDeployment) remoteclient.Builder ordinalID int64 } func (r *ReconcileClusterSync) getAndCheckClusterSyncStatefulSet(logger log.FieldLogger) (*appsv1.StatefulSet, error) { hiveNS := controllerutils.GetHiveNamespace() logger.Debug("Getting statefulset") sts := &appsv1.StatefulSet{} err := r.Get(context.Background(), types.NamespacedName{Namespace: hiveNS, Name: stsName}, sts) if err != nil { logger.WithError(err).WithField("hiveNS", hiveNS).Error("error getting statefulset.") return nil, err } logger.Debug("Ensuring replicas is set") if sts.Spec.Replicas == nil { return nil, errors.New("sts.Spec.Replicas not set") } if sts.Status.CurrentReplicas != *sts.Spec.Replicas { // This ensures that we don't have partial syncing which may make it seem like things are working. // TODO: Remove this once https://issues.redhat.com/browse/CO-1268 is completed as this should no longer be needed. return nil, fmt.Errorf("statefulset replica count is off. current: %v expected: %v", sts.Status.CurrentReplicas, *sts.Spec.Replicas) } // All checks passed return sts, nil } // isSyncAssignedToMe determines if this instance of the controller is assigned to the resource being sync'd func (r *ReconcileClusterSync) isSyncAssignedToMe(sts *appsv1.StatefulSet, cd *hivev1.ClusterDeployment, logger log.FieldLogger) (bool, error) { logger.Debug("Getting uid for hashing") var uidAsBigInt big.Int // There are a couple of assumptions here: // * the clusterdeployment uid has 4 sections separated by hyphens // * the 4 sections are hex numbers // These assumptions are based on the fact that Kubernetes says UIDs are actually // ISO/IEC 9834-8 UUIDs. If this changes, this code may fail and may need to be updated. // https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids hexUID := strings.Replace(string(cd.UID), "-", "", 4) logger.Debugf("hexUID: %+v", hexUID) uidAsBigInt.SetString(hexUID, 16) logger.Debug("calculating replicas") replicas := int64(*sts.Spec.Replicas) logger.Debug("determining who is assigned to sync this cluster") ordinalIDOfAssignee := uidAsBigInt.Mod(&uidAsBigInt, big.NewInt(replicas)).Int64() assignedToMe := ordinalIDOfAssignee == r.ordinalID logger.WithFields(log.Fields{ "replicas": replicas, "ordinalIDOfAssignee": ordinalIDOfAssignee, "ordinalID": r.ordinalID, "assignedToMe": assignedToMe, }).Debug("computed values") return assignedToMe, nil } // Reconcile reads the state of the ClusterDeployment and applies any SyncSets or SelectorSyncSets that need to be // applied or re-applied. func (r *ReconcileClusterSync) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { logger := controllerutils.BuildControllerLogger(ControllerName, "clusterDeployment", request.NamespacedName) logger.Infof("reconciling ClusterDeployment") recobsrv := hivemetrics.NewReconcileObserver(ControllerName, logger) defer recobsrv.ObserveControllerReconcileTime() recobsrv.SetOutcome(hivemetrics.ReconcileOutcomeNoOp) // Fetch the ClusterDeployment instance cd := &hivev1.ClusterDeployment{} err := r.Get(context.TODO(), request.NamespacedName, cd) if err != nil { if apierrors.IsNotFound(err) { logger.Info("ClusterDeployment not found") return reconcile.Result{}, nil } log.WithError(err).Error("failed to get ClusterDeployment") return reconcile.Result{}, err } sts, err := r.getAndCheckClusterSyncStatefulSet(logger) if err != nil { log.WithError(err).Error("failed getting clustersync statefulset") return reconcile.Result{}, err } if me, err := r.isSyncAssignedToMe(sts, cd, logger); !me || err != nil { if err != nil { logger.WithError(err).Error("failed determining which instance is assigned to sync this cluster") return reconcile.Result{}, err } logger.Debug("not syncing because isSyncAssignedToMe returned false") recobsrv.SetOutcome(hivemetrics.ReconcileOutcomeSkippedSync) return reconcile.Result{}, nil } if controllerutils.IsClusterPausedOrRelocating(cd, logger) { return reconcile.Result{}, nil } if cd.DeletionTimestamp != nil { logger.Debug("cluster is being deleted") return reconcile.Result{}, nil } if unreachable, _ := remoteclient.Unreachable(cd); unreachable { logger.Debug("cluster is unreachable") return reconcile.Result{}, nil } restConfig, err := r.remoteClusterAPIClientBuilder(cd).RESTConfig() if err != nil { logger.WithError(err).Error("unable to get REST config") return reconcile.Result{}, err } // If this cluster carries the fake annotation we will fake out all helper communication with it. fakeCluster := controllerutils.IsFakeCluster(cd) resourceHelper, err := r.resourceHelperBuilder(restConfig, fakeCluster, logger) if err != nil { log.WithError(err).Error("cannot create helper") return reconcile.Result{}, err } needToCreateClusterSync := false clusterSync := &hiveintv1alpha1.ClusterSync{} switch err := r.Get(context.Background(), request.NamespacedName, clusterSync); { case apierrors.IsNotFound(err): logger.Info("creating ClusterSync as it does not exist") clusterSync.Namespace = cd.Namespace clusterSync.Name = cd.Name ownerRef := metav1.NewControllerRef(cd, cd.GroupVersionKind()) ownerRef.Controller = nil clusterSync.OwnerReferences = []metav1.OwnerReference{*ownerRef} if err := r.Create(context.Background(), clusterSync); err != nil { logger.WithError(err).Log(controllerutils.LogLevel(err), "could not create ClusterSync") return reconcile.Result{}, err } recobsrv.SetOutcome(hivemetrics.ReconcileOutcomeClusterSyncCreated) // requeue immediately so that we reconcile soon after the ClusterSync is created return reconcile.Result{Requeue: true}, nil case err != nil: logger.WithError(err).Log(controllerutils.LogLevel(err), "could not get ClusterSync") return reconcile.Result{}, err } needToCreateLease := false lease := &hiveintv1alpha1.ClusterSyncLease{} switch err := r.Get(context.Background(), request.NamespacedName, lease); { case apierrors.IsNotFound(err): logger.Info("Lease for ClusterSync does not exist; will need to create") needToCreateLease = true case err != nil: logger.WithError(err).Log(controllerutils.LogLevel(err), "could not get lease for ClusterSync") return reconcile.Result{}, err } origStatus := clusterSync.Status.DeepCopy() syncSets, err := r.getSyncSetsForClusterDeployment(cd, logger) if err != nil { return reconcile.Result{}, err } selectorSyncSets, err := r.getSelectorSyncSetsForClusterDeployment(cd, logger) if err != nil { return reconcile.Result{}, err } needToDoFullReapply := needToCreateClusterSync || r.timeUntilFullReapply(lease) <= 0 if needToDoFullReapply { logger.Info("need to reapply all syncsets") } recobsrv.SetOutcome(hivemetrics.ReconcileOutcomeFullSync) // Apply SyncSets syncStatusesForSyncSets, syncSetsNeedRequeue := r.applySyncSets( cd, "SyncSet", syncSets, clusterSync.Status.SyncSets, needToDoFullReapply, false, // no need to report SelectorSyncSet metrics if we're reconciling non-selector SyncSets resourceHelper, logger, ) clusterSync.Status.SyncSets = syncStatusesForSyncSets // Apply SelectorSyncSets syncStatusesForSelectorSyncSets, selectorSyncSetsNeedRequeue := r.applySyncSets( cd, "SelectorSyncSet", selectorSyncSets, clusterSync.Status.SelectorSyncSets, needToDoFullReapply, clusterSync.Status.FirstSuccessTime == nil, // only report SelectorSyncSet metrics if we haven't reached first success resourceHelper, logger, ) clusterSync.Status.SelectorSyncSets = syncStatusesForSelectorSyncSets setFailedCondition(clusterSync) // Set clusterSync.Status.FirstSyncSetsSuccessTime syncStatuses := append(syncStatusesForSyncSets, syncStatusesForSelectorSyncSets...) if clusterSync.Status.FirstSuccessTime == nil { r.setFirstSuccessTime(syncStatuses, cd, clusterSync, logger) } // Update the ClusterSync if !reflect.DeepEqual(origStatus, &clusterSync.Status) { logger.Info("updating ClusterSync") if err := r.Status().Update(context.Background(), clusterSync); err != nil { logger.WithError(err).Log(controllerutils.LogLevel(err), "could not update ClusterSync") return reconcile.Result{}, err } } if needToDoFullReapply { logger.Info("setting last full apply time") lease.Spec.RenewTime = metav1.NowMicro() if needToCreateLease { logger.Info("creating lease for ClusterSync") lease.Namespace = cd.Namespace lease.Name = cd.Name ownerRef := metav1.NewControllerRef(clusterSync, clusterSync.GroupVersionKind()) ownerRef.Controller = nil lease.OwnerReferences = []metav1.OwnerReference{*ownerRef} if err := r.Create(context.Background(), lease); err != nil { logger.WithError(err).Log(controllerutils.LogLevel(err), "could not create lease for ClusterSync") return reconcile.Result{}, err } } else { logger.Info("updating lease for ClusterSync") if err := r.Update(context.Background(), lease); err != nil { logger.WithError(err).Log(controllerutils.LogLevel(err), "could not update lease for ClusterSync") return reconcile.Result{}, err } } } result := reconcile.Result{Requeue: true, RequeueAfter: r.timeUntilFullReapply(lease)} if syncSetsNeedRequeue || selectorSyncSetsNeedRequeue { result.RequeueAfter = 0 } return result, nil } func (r *ReconcileClusterSync) applySyncSets( cd *hivev1.ClusterDeployment, syncSetType string, syncSets []CommonSyncSet, syncStatuses []hiveintv1alpha1.SyncStatus, needToDoFullReapply bool, reportSelectorSyncSetMetrics bool, resourceHelper resource.Helper, logger log.FieldLogger, ) (newSyncStatuses []hiveintv1alpha1.SyncStatus, requeue bool) { // Sort the syncsets to a consistent ordering. This prevents thrashing in the ClusterSync status due to the order // of the syncset status changing from one reconcile to the next. sort.Slice(syncSets, func(i, j int) bool { return syncSets[i].AsMetaObject().GetName() < syncSets[j].AsMetaObject().GetName() }) for _, syncSet := range syncSets { logger := logger.WithField(syncSetType, syncSet.AsMetaObject().GetName()) oldSyncStatus, indexOfOldStatus := getOldSyncStatus(syncSet, syncStatuses) // Remove the matching old sync status from the slice of sync statuses so that the slice only contains sync // statuses that have not been matched to a syncset. if indexOfOldStatus >= 0 { last := len(syncStatuses) - 1 syncStatuses[indexOfOldStatus] = syncStatuses[last] syncStatuses = syncStatuses[:last] } // Determine if the syncset needs to be applied switch { case needToDoFullReapply: logger.Debug("applying syncset because it is time to do a full re-apply") case indexOfOldStatus < 0: logger.Debug("applying syncset because the syncset is new") case oldSyncStatus.Result != hiveintv1alpha1.SuccessSyncSetResult: logger.Debug("applying syncset because the last attempt to apply failed") case oldSyncStatus.ObservedGeneration != syncSet.AsMetaObject().GetGeneration(): logger.Debug("applying syncset because the syncset generation has changed") default: logger.Debug("skipping apply of syncset since it is up-to-date and it is not time to do a full re-apply") newSyncStatuses = append(newSyncStatuses, oldSyncStatus) continue } // Apply the syncset resourcesApplied, resourcesInSyncSet, syncSetNeedsRequeue, err := r.applySyncSet(syncSet, resourceHelper, logger) newSyncStatus := hiveintv1alpha1.SyncStatus{ Name: syncSet.AsMetaObject().GetName(), ObservedGeneration: syncSet.AsMetaObject().GetGeneration(), Result: hiveintv1alpha1.SuccessSyncSetResult, } if syncSet.GetSpec().ResourceApplyMode == hivev1.SyncResourceApplyMode { newSyncStatus.ResourcesToDelete = resourcesApplied } if syncSet.GetSpec().ResourceApplyMode == hivev1.UpsertResourceApplyMode && len(oldSyncStatus.ResourcesToDelete) > 0 { logger.Infof("resource apply mode is %v but there are resources to delete in clustersync status", hivev1.UpsertResourceApplyMode) oldSyncStatus.ResourcesToDelete = nil } if err != nil { newSyncStatus.Result = hiveintv1alpha1.FailureSyncSetResult newSyncStatus.FailureMessage = err.Error() } if syncSetNeedsRequeue { requeue = true } if indexOfOldStatus >= 0 { // Delete any resources that were included in the syncset previously but are no longer included now. remainingResources, err := deleteFromTargetCluster( oldSyncStatus.ResourcesToDelete, func(r hiveintv1alpha1.SyncResourceReference) bool { return !containsResource(resourcesInSyncSet, r) }, resourceHelper, logger, ) if err != nil { requeue = true newSyncStatus.Result = hiveintv1alpha1.FailureSyncSetResult if newSyncStatus.FailureMessage != "" { newSyncStatus.FailureMessage += "\n" } newSyncStatus.FailureMessage += err.Error() } newSyncStatus.ResourcesToDelete = mergeResources(newSyncStatus.ResourcesToDelete, remainingResources) newSyncStatus.LastTransitionTime = oldSyncStatus.LastTransitionTime newSyncStatus.FirstSuccessTime = oldSyncStatus.FirstSuccessTime } // Update the last transition time if there were any changes to the sync status. if !reflect.DeepEqual(oldSyncStatus, newSyncStatus) { newSyncStatus.LastTransitionTime = metav1.Now() } // Set the FirstSuccessTime if this is the first success. Also, observe the apply-duration metric. if newSyncStatus.Result == hiveintv1alpha1.SuccessSyncSetResult && oldSyncStatus.FirstSuccessTime == nil { now := metav1.Now() newSyncStatus.FirstSuccessTime = &now startTime := syncSet.AsMetaObject().GetCreationTimestamp().Time if cd.Status.InstalledTimestamp != nil && startTime.Before(cd.Status.InstalledTimestamp.Time) { startTime = cd.Status.InstalledTimestamp.Time } if cond := controllerutils.FindClusterDeploymentCondition(cd.Status.Conditions, hivev1.UnreachableCondition); cond != nil && startTime.Before(cond.LastTransitionTime.Time) { startTime = cond.LastTransitionTime.Time } applyTime := now.Sub(startTime).Seconds() if syncSet.AsMetaObject().GetNamespace() == "" { if reportSelectorSyncSetMetrics { // Report SelectorSyncSet metric *only if* we have not yet reached our "everything has applied successfully for the first time" // state. This is to handle situations where a clusters labels change long after it was installed, resulting // in a new SelectorSyncSet matching, and a metric showing days/weeks/months time to first apply. In this scenario // we have no idea when the label was added, and thus it is not currently possible to report the time from // label added to SyncSet successfully applied. logger.WithField("applyTime", applyTime).Debug("observed first successful apply of SelectorSyncSet for cluster") metricTimeToApplySelectorSyncSet.WithLabelValues(syncSet.AsMetaObject().GetName()).Observe(applyTime) } else { logger.Info("skipped observing first successful apply of SelectorSyncSet metric because ClusterSync has a FirstSuccessTime") } } else { // For non-selector SyncSets we have a more accurate startTime, either ClusterDeployment installedTimestamp // or SyncSet creationTimestamp. logger.WithField("applyTime", applyTime).Debug("observed first successful apply of SyncSet for cluster") if syncSetGroup, ok := syncSet.AsMetaObject().GetAnnotations()[constants.SyncSetMetricsGroupAnnotation]; ok && syncSetGroup != "" { metricTimeToApplySyncSet.WithLabelValues(syncSetGroup).Observe(applyTime) } else { metricTimeToApplySyncSet.WithLabelValues("none").Observe(applyTime) } } } // Sort ResourcesToDelete to prevent update thrashing. sort.Slice(newSyncStatus.ResourcesToDelete, func(i, j int) bool { return orderResources(newSyncStatus.ResourcesToDelete[i], newSyncStatus.ResourcesToDelete[j]) }) newSyncStatuses = append(newSyncStatuses, newSyncStatus) } // The remaining sync statuses in syncStatuses do not match any syncsets. Any resources to delete in the sync status // need to be deleted. for _, oldSyncStatus := range syncStatuses { remainingResources, err := deleteFromTargetCluster(oldSyncStatus.ResourcesToDelete, nil, resourceHelper, logger) if err != nil { requeue = true newSyncStatus := hiveintv1alpha1.SyncStatus{ Name: oldSyncStatus.Name, ResourcesToDelete: remainingResources, Result: hiveintv1alpha1.FailureSyncSetResult, FailureMessage: err.Error(), LastTransitionTime: oldSyncStatus.LastTransitionTime, FirstSuccessTime: oldSyncStatus.FirstSuccessTime, } if !reflect.DeepEqual(oldSyncStatus, newSyncStatus) { newSyncStatus.LastTransitionTime = metav1.Now() } newSyncStatuses = append(newSyncStatuses, newSyncStatus) } } return } func getOldSyncStatus(syncSet CommonSyncSet, syncSetStatuses []hiveintv1alpha1.SyncStatus) (hiveintv1alpha1.SyncStatus, int) { for i, status := range syncSetStatuses { if status.Name == syncSet.AsMetaObject().GetName() { return syncSetStatuses[i], i } } return hiveintv1alpha1.SyncStatus{}, -1 } func (r *ReconcileClusterSync) applySyncSet( syncSet CommonSyncSet, resourceHelper resource.Helper, logger log.FieldLogger, ) ( resourcesApplied []hiveintv1alpha1.SyncResourceReference, resourcesInSyncSet []hiveintv1alpha1.SyncResourceReference, requeue bool, returnErr error, ) { resources, referencesToResources, decodeErr := decodeResources(syncSet, logger) referencesToSecrets := referencesToSecrets(syncSet) resourcesInSyncSet = append(referencesToResources, referencesToSecrets...) if decodeErr != nil { returnErr = decodeErr return } applyFn := resourceHelper.Apply applyFnMetricsLabel := labelApply switch syncSet.GetSpec().ApplyBehavior { case hivev1.CreateOrUpdateSyncSetApplyBehavior: applyFn = resourceHelper.CreateOrUpdate applyFnMetricsLabel = labelCreateOrUpdate case hivev1.CreateOnlySyncSetApplyBehavior: applyFn = resourceHelper.Create applyFnMetricsLabel = labelCreateOnly } // Apply Resources for i, resource := range resources { returnErr, requeue = r.applyResource(i, resource, referencesToResources[i], applyFn, applyFnMetricsLabel, logger) if returnErr != nil { resourcesApplied = referencesToResources[:i] return } } resourcesApplied = referencesToResources // Apply Secrets for i, secretMapping := range syncSet.GetSpec().Secrets { returnErr, requeue = r.applySecret(syncSet, i, secretMapping, referencesToSecrets[i], applyFn, applyFnMetricsLabel, logger) if returnErr != nil { resourcesApplied = append(resourcesApplied, referencesToSecrets[:i]...) return } } resourcesApplied = append(resourcesApplied, referencesToSecrets...) // Apply Patches for i, patch := range syncSet.GetSpec().Patches { returnErr, requeue = r.applyPatch(i, patch, resourceHelper, logger) if returnErr != nil { return } } logger.Info("syncset applied") return } func decodeResources(syncSet CommonSyncSet, logger log.FieldLogger) ( resources []*unstructured.Unstructured, references []hiveintv1alpha1.SyncResourceReference, returnErr error, ) { var decodeErrors []error for i, resource := range syncSet.GetSpec().Resources { u := &unstructured.Unstructured{} if err := yaml.Unmarshal(resource.Raw, u); err != nil { logger.WithField("resourceIndex", i).WithError(err).Warn("error decoding unstructured object") decodeErrors = append(decodeErrors, errors.Wrapf(err, "failed to decode resource %d", i)) continue } resources = append(resources, u) references = append(references, hiveintv1alpha1.SyncResourceReference{ APIVersion: u.GetAPIVersion(), Kind: u.GetKind(), Namespace: u.GetNamespace(), Name: u.GetName(), }) } returnErr = utilerrors.NewAggregate(decodeErrors) return } func referencesToSecrets(syncSet CommonSyncSet) []hiveintv1alpha1.SyncResourceReference { var references []hiveintv1alpha1.SyncResourceReference for _, secretMapping := range syncSet.GetSpec().Secrets { references = append(references, hiveintv1alpha1.SyncResourceReference{ APIVersion: secretAPIVersion, Kind: secretKind, Namespace: secretMapping.TargetRef.Namespace, Name: secretMapping.TargetRef.Name, }) } return references } func (r *ReconcileClusterSync) applyResource( resourceIndex int, resource *unstructured.Unstructured, reference hiveintv1alpha1.SyncResourceReference, applyFn func(obj []byte) (resource.ApplyResult, error), applyFnMetricsLabel string, logger log.FieldLogger, ) (returnErr error, requeue bool) { logger = logger.WithField("resourceIndex", resourceIndex). WithField("resourceNamespace", reference.Namespace). WithField("resourceName", reference.Name). WithField("resourceAPIVersion", reference.APIVersion). WithField("resourceKind", reference.Kind) logger.Debug("applying resource") if err := applyToTargetCluster(resource, applyFnMetricsLabel, applyFn, logger); err != nil { return errors.Wrapf(err, "failed to apply resource %d", resourceIndex), true } return nil, false } func (r *ReconcileClusterSync) applySecret( syncSet CommonSyncSet, secretIndex int, secretMapping hivev1.SecretMapping, reference hiveintv1alpha1.SyncResourceReference, applyFn func(obj []byte) (resource.ApplyResult, error), applyFnMetricsLabel string, logger log.FieldLogger, ) (returnErr error, requeue bool) { logger = logger.WithField("secretIndex", secretIndex). WithField("secretNamespace", reference.Namespace). WithField("secretName", reference.Name) syncSetNamespace := syncSet.AsMetaObject().GetNamespace() srcNamespace := secretMapping.SourceRef.Namespace if srcNamespace == "" { // The namespace of the source secret is required for SelectorSyncSets. if syncSetNamespace == "" { logger.Warn("namespace must be specified for source secret") return fmt.Errorf("source namespace missing for secret %d", secretIndex), false } // Use the namespace of the SyncSet if the namespace of the source secret is omitted. srcNamespace = syncSetNamespace } else { // If the namespace of the source secret is specified, then it must match the namespace of the SyncSet. if syncSetNamespace != "" && syncSetNamespace != srcNamespace { logger.Warn("source secret must be in same namespace as SyncSet") return fmt.Errorf("source in wrong namespace for secret %d", secretIndex), false } } secret := &corev1.Secret{} if err := r.Get(context.Background(), types.NamespacedName{Namespace: srcNamespace, Name: secretMapping.SourceRef.Name}, secret); err != nil { logger.WithError(err).Log(controllerutils.LogLevel(err), "cannot read secret") return errors.Wrapf(err, "failed to read secret %d", secretIndex), true } // Clear out the fields of the metadata which are specific to the cluster to which the secret belongs. secret.ObjectMeta = metav1.ObjectMeta{ Namespace: secretMapping.TargetRef.Namespace, Name: secretMapping.TargetRef.Name, Annotations: secret.Annotations, Labels: secret.Labels, } logger.Debug("applying secret") if err := applyToTargetCluster(secret, applyFnMetricsLabel, applyFn, logger); err != nil { return errors.Wrapf(err, "failed to apply secret %d", secretIndex), true } return nil, false } func (r *ReconcileClusterSync) applyPatch( patchIndex int, patch hivev1.SyncObjectPatch, resourceHelper resource.Helper, logger log.FieldLogger, ) (returnErr error, requeue bool) { logger = logger.WithField("patchIndex", patchIndex). WithField("patchNamespace", patch.Namespace). WithField("patchName", patch.Name). WithField("patchAPIVersion", patch.APIVersion). WithField("patchKind", patch.Kind) logger.Debug("applying patch") if err := resourceHelper.Patch( types.NamespacedName{Namespace: patch.Namespace, Name: patch.Name}, patch.Kind, patch.APIVersion, []byte(patch.Patch), patch.PatchType, ); err != nil { return errors.Wrapf(err, "failed to apply patch %d", patchIndex), true } return nil, false } func applyToTargetCluster( obj hivev1.MetaRuntimeObject, applyFnMetricLabel string, applyFn func(obj []byte) (resource.ApplyResult, error), logger log.FieldLogger, ) error { startTime := time.Now() labels := obj.GetLabels() if labels == nil { labels = make(map[string]string, 1) } // Inject the hive managed annotation to help end-users see that a resource is managed by hive: labels[constants.HiveManagedLabel] = "true" obj.SetLabels(labels) bytes, err := json.Marshal(obj) if err != nil { logger.WithError(err).Error("error marshalling unstructured object to json bytes") return err } applyResult, err := applyFn(bytes) // Record the amount of time we took to apply this specific resource. When combined with the metric for duration of // our kube client requests, we can get an idea how much time we're spending cpu bound vs network bound. applyTime := metav1.Now().Sub(startTime).Seconds() if err != nil { logger.WithError(err).Warn("error applying resource") metricResourcesApplied.WithLabelValues(applyFnMetricLabel, metricResultError).Inc() metricTimeToApplySyncSetResource.WithLabelValues(applyFnMetricLabel, metricResultError).Observe(applyTime) } else { logger.WithField("applyResult", applyResult).Debug("resource applied") metricResourcesApplied.WithLabelValues(applyFnMetricLabel, metricResultSuccess).Inc() metricTimeToApplySyncSetResource.WithLabelValues(applyFnMetricLabel, metricResultSuccess).Observe(applyTime) } return err } func deleteFromTargetCluster( resources []hiveintv1alpha1.SyncResourceReference, shouldDelete func(hiveintv1alpha1.SyncResourceReference) bool, resourceHelper resource.Helper, logger log.FieldLogger, ) (remainingResources []hiveintv1alpha1.SyncResourceReference, returnErr error) { var allErrs []error for _, r := range resources { if shouldDelete != nil && !shouldDelete(r) { remainingResources = append(remainingResources, r) continue } logger := logger.WithField("resourceNamespace", r.Namespace). WithField("resourceName", r.Name). WithField("resourceAPIVersion", r.APIVersion). WithField("resourceKind", r.Kind) logger.Info("deleting resource") if err := resourceHelper.Delete(r.APIVersion, r.Kind, r.Namespace, r.Name); err != nil { logger.WithError(err).Warn("could not delete resource") allErrs = append(allErrs, fmt.Errorf("Failed to delete %s, Kind=%s %s/%s: %w", r.APIVersion, r.Kind, r.Namespace, r.Name, err)) remainingResources = append(remainingResources, r) } } return remainingResources, utilerrors.NewAggregate(allErrs) } func (r *ReconcileClusterSync) getSyncSetsForClusterDeployment(cd *hivev1.ClusterDeployment, logger log.FieldLogger) ([]CommonSyncSet, error) { syncSetsList := &hivev1.SyncSetList{} if err := r.List(context.Background(), syncSetsList, client.InNamespace(cd.Namespace)); err != nil { logger.WithError(err).Log(controllerutils.LogLevel(err), "could not list SyncSets") return nil, err } var syncSets []CommonSyncSet for i, ss := range syncSetsList.Items { if !doesSyncSetApplyToClusterDeployment(&ss, cd) { continue } syncSets = append(syncSets, (*SyncSetAsCommon)(&syncSetsList.Items[i])) } return syncSets, nil } func (r *ReconcileClusterSync) getSelectorSyncSetsForClusterDeployment(cd *hivev1.ClusterDeployment, logger log.FieldLogger) ([]CommonSyncSet, error) { selectorSyncSetsList := &hivev1.SelectorSyncSetList{} if err := r.List(context.Background(), selectorSyncSetsList); err != nil { logger.WithError(err).Log(controllerutils.LogLevel(err), "could not list SelectorSyncSets") return nil, err } var selectorSyncSets []CommonSyncSet for i, sss := range selectorSyncSetsList.Items { if !doesSelectorSyncSetApplyToClusterDeployment(&sss, cd, logger) { continue } selectorSyncSets = append(selectorSyncSets, (*SelectorSyncSetAsCommon)(&selectorSyncSetsList.Items[i])) } return selectorSyncSets, nil } func doesSyncSetApplyToClusterDeployment(syncSet *hivev1.SyncSet, cd *hivev1.ClusterDeployment) bool { for _, cdRef := range syncSet.Spec.ClusterDeploymentRefs { if cdRef.Name == cd.Name { return true } } return false } func doesSelectorSyncSetApplyToClusterDeployment(selectorSyncSet *hivev1.SelectorSyncSet, cd *hivev1.ClusterDeployment, logger log.FieldLogger) bool { labelSelector, err := metav1.LabelSelectorAsSelector(&selectorSyncSet.Spec.ClusterDeploymentSelector) if err != nil { logger.WithError(err).Error("unable to convert selector") return false } return labelSelector.Matches(labels.Set(cd.Labels)) } func setFailedCondition(clusterSync *hiveintv1alpha1.ClusterSync) { status := corev1.ConditionFalse reason := "Success" message := "All SyncSets and SelectorSyncSets have been applied to the cluster" failingSyncSets := getFailingSyncSets(clusterSync.Status.SyncSets) failingSelectorSyncSets := getFailingSyncSets(clusterSync.Status.SelectorSyncSets) if len(failingSyncSets)+len(failingSelectorSyncSets) != 0 { status = corev1.ConditionTrue reason = "Failure" var failureNames []string if len(failingSyncSets) != 0 { failureNames = append(failureNames, namesForFailureMessage("SyncSet", failingSyncSets)) } if len(failingSelectorSyncSets) != 0 { failureNames = append(failureNames, namesForFailureMessage("SelectorSyncSet", failingSelectorSyncSets)) } verb := "is" if len(failingSyncSets)+len(failingSelectorSyncSets) > 1 { verb = "are" } message = fmt.Sprintf("%s %s failing", strings.Join(failureNames, " and "), verb) } if len(clusterSync.Status.Conditions) > 0 { cond := clusterSync.Status.Conditions[0] if status == cond.Status && reason == cond.Reason && message == cond.Message { return } } clusterSync.Status.Conditions = []hiveintv1alpha1.ClusterSyncCondition{{ Type: hiveintv1alpha1.ClusterSyncFailed, Status: status, Reason: reason, Message: message, LastProbeTime: metav1.Now(), LastTransitionTime: metav1.Now(), }} } func getFailingSyncSets(syncStatuses []hiveintv1alpha1.SyncStatus) []string { var failures []string for _, status := range syncStatuses { if status.Result != hiveintv1alpha1.SuccessSyncSetResult { failures = append(failures, status.Name) } } return failures } func (r *ReconcileClusterSync) setFirstSuccessTime(syncStatuses []hiveintv1alpha1.SyncStatus, cd *hivev1.ClusterDeployment, clusterSync *hiveintv1alpha1.ClusterSync, logger log.FieldLogger) { if cd.Status.InstalledTimestamp == nil { return } lastSuccessTime := &metav1.Time{} for _, status := range syncStatuses { if status.FirstSuccessTime == nil { return } if status.FirstSuccessTime.Time.After(lastSuccessTime.Time) { lastSuccessTime = status.FirstSuccessTime } } // When len(syncStatuses) == 0, meaning there are no syncsets which apply to the cluster, we will use now as the last success time if len(syncStatuses) == 0 { now := metav1.Now() lastSuccessTime = &now } clusterSync.Status.FirstSuccessTime = lastSuccessTime allSyncSetsAppliedDuration := lastSuccessTime.Time.Sub(cd.Status.InstalledTimestamp.Time) logger.Infof("observed syncsets applied duration: %v seconds", allSyncSetsAppliedDuration.Seconds()) metricTimeToApplySyncSets.Observe(float64(allSyncSetsAppliedDuration.Seconds())) return } func namesForFailureMessage(syncSetKind string, names []string) string { if len(names) > 1 { syncSetKind += "s" } return fmt.Sprintf("%s %s", syncSetKind, strings.Join(names, ", ")) } func mergeResources(a, b []hiveintv1alpha1.SyncResourceReference) []hiveintv1alpha1.SyncResourceReference { if len(a) == 0 { return b } for _, r := range b { if !containsResource(a, r) { a = append(a, r) } } return a } func containsResource(resources []hiveintv1alpha1.SyncResourceReference, resource hiveintv1alpha1.SyncResourceReference) bool { for _, r := range resources { if r == resource { return true } } return false } func orderResources(a, b hiveintv1alpha1.SyncResourceReference) bool { if x, y := a.APIVersion, b.APIVersion; x != y { return x < y } if x, y := a.Kind, b.Kind; x != y { return x < y } if x, y := a.Namespace, b.Namespace; x != y { return x < y } return a.Name < b.Name } func (r *ReconcileClusterSync) timeUntilFullReapply(lease *hiveintv1alpha1.ClusterSyncLease) time.Duration { timeUntilNext := r.reapplyInterval - time.Since(lease.Spec.RenewTime.Time) + time.Duration(reapplyIntervalJitter*rand.Float64()*r.reapplyInterval.Seconds())*time.Second if timeUntilNext < 0 { return 0 } return timeUntilNext }
1
17,405
This might be working as is, but I'm a bit thrown by the || and then && and how things get evaluated. Testing real quick on the go playground, true || false && false seems to either evaluate the && first, or start on the right side. Anyhow could you group with braces, it looks to me like it should be ( a || b) && c in this case?
openshift-hive
go
@@ -0,0 +1,17 @@ +// Copyright (c) 2020 IoTeX Foundation +// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no +// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent +// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache +// License 2.0 that can be found in the LICENSE file. + +package main + +import ( + "github.com/iotexproject/iotex-core/ioctl/cmd" + "github.com/iotexproject/iotex-core/ioctl/cmd/account" +) + +func main() { + account.CryptoSm2 = true + cmd.Execute() +}
1
1
20,687
add an entry in makefile to generate binary with diff name?
iotexproject-iotex-core
go
@@ -115,6 +115,10 @@ func validatePagerDutyConfigs(configs []monitoringv1alpha1.PagerDutyConfig) erro if conf.RoutingKey == nil && conf.ServiceKey == nil { return errors.New("one of 'routingKey' or 'serviceKey' is required") } + + if err := conf.HTTPConfig.Validate(); err != nil { + return err + } } return nil }
1
// Copyright 2021 The prometheus-operator Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package alertmanager import ( "encoding/json" "fmt" "net" "regexp" "strings" "github.com/pkg/errors" monitoringv1alpha1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1" "github.com/prometheus/alertmanager/config" ) var durationRe = regexp.MustCompile(`^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$`) func ValidateConfig(amc *monitoringv1alpha1.AlertmanagerConfig) error { receivers, err := validateReceivers(amc.Spec.Receivers) if err != nil { return err } muteTimeIntervals, err := validateMuteTimeIntervals(amc.Spec.MuteTimeIntervals) if err != nil { return err } return validateAlertManagerRoutes(amc.Spec.Route, receivers, muteTimeIntervals, true) } // ValidateURL against the config.URL // This could potentially become a regex and be validated via OpenAPI // but right now, since we know we need to unmarshal into an upstream type // after conversion, we validate we don't error when doing so func ValidateURL(url string) (*config.URL, error) { var u config.URL err := json.Unmarshal([]byte(fmt.Sprintf(`"%s"`, url)), &u) if err != nil { return nil, fmt.Errorf("validate url from string failed for %s: %w", url, err) } return &u, nil } func validateReceivers(receivers []monitoringv1alpha1.Receiver) (map[string]struct{}, error) { var err error receiverNames := make(map[string]struct{}) for _, receiver := range receivers { if _, found := receiverNames[receiver.Name]; found { return nil, errors.Errorf("%q receiver is not unique", receiver.Name) } receiverNames[receiver.Name] = struct{}{} if err = validatePagerDutyConfigs(receiver.PagerDutyConfigs); err != nil { return nil, errors.Wrapf(err, "failed to validate 'pagerDutyConfig' - receiver %s", receiver.Name) } if err := validateOpsGenieConfigs(receiver.OpsGenieConfigs); err != nil { return nil, errors.Wrapf(err, "failed to validate 'opsGenieConfig' - receiver %s", receiver.Name) } if err := validateSlackConfigs(receiver.SlackConfigs); err != nil { return nil, errors.Wrapf(err, "failed to validate 'slackConfig' - receiver %s", receiver.Name) } if err := validateWebhookConfigs(receiver.WebhookConfigs); err != nil { return nil, errors.Wrapf(err, "failed to validate 'webhookConfig' - receiver %s", receiver.Name) } if err := validateWechatConfigs(receiver.WeChatConfigs); err != nil { return nil, errors.Wrapf(err, "failed to validate 'weChatConfig' - receiver %s", receiver.Name) } if err := validateEmailConfig(receiver.EmailConfigs); err != nil { return nil, errors.Wrapf(err, "failed to validate 'emailConfig' - receiver %s", receiver.Name) } if err := validateVictorOpsConfigs(receiver.VictorOpsConfigs); err != nil { return nil, errors.Wrapf(err, "failed to validate 'victorOpsConfig' - receiver %s", receiver.Name) } if err := validatePushoverConfigs(receiver.PushoverConfigs); err != nil { return nil, errors.Wrapf(err, "failed to validate 'pushOverConfig' - receiver %s", receiver.Name) } if err := validateSnsConfigs(receiver.SNSConfigs); err != nil { return nil, errors.Wrapf(err, "failed to validate 'snsConfig' - receiver %s", receiver.Name) } } return receiverNames, nil } func validatePagerDutyConfigs(configs []monitoringv1alpha1.PagerDutyConfig) error { for _, conf := range configs { if conf.URL != "" { if _, err := ValidateURL(conf.URL); err != nil { return errors.Wrap(err, "pagerduty validation failed for 'url'") } } if conf.RoutingKey == nil && conf.ServiceKey == nil { return errors.New("one of 'routingKey' or 'serviceKey' is required") } } return nil } func validateOpsGenieConfigs(configs []monitoringv1alpha1.OpsGenieConfig) error { for _, config := range configs { if err := config.Validate(); err != nil { return err } if config.APIURL != "" { if _, err := ValidateURL(config.APIURL); err != nil { return errors.Wrap(err, "invalid 'apiURL'") } } } return nil } func validateSlackConfigs(configs []monitoringv1alpha1.SlackConfig) error { for _, config := range configs { if err := config.Validate(); err != nil { return err } } return nil } func validateWebhookConfigs(configs []monitoringv1alpha1.WebhookConfig) error { for _, config := range configs { if config.URL == nil && config.URLSecret == nil { return errors.New("one of 'url' or 'urlSecret' must be specified") } if config.URL != nil { if _, err := ValidateURL(*config.URL); err != nil { return errors.Wrapf(err, "invalid 'url'") } } } return nil } func validateWechatConfigs(configs []monitoringv1alpha1.WeChatConfig) error { for _, config := range configs { if config.APIURL != "" { if _, err := ValidateURL(config.APIURL); err != nil { return errors.Wrap(err, "invalid 'apiURL'") } } } return nil } func validateEmailConfig(configs []monitoringv1alpha1.EmailConfig) error { for _, config := range configs { if config.To == "" { return errors.New("missing 'to' address") } if config.Smarthost != "" { _, _, err := net.SplitHostPort(config.Smarthost) if err != nil { return errors.Wrapf(err, "invalid field 'smarthost': %s", config.Smarthost) } } if config.Headers != nil { // Header names are case-insensitive, check for collisions. normalizedHeaders := map[string]struct{}{} for _, v := range config.Headers { normalized := strings.Title(v.Key) if _, ok := normalizedHeaders[normalized]; ok { return fmt.Errorf("duplicate header %q", normalized) } normalizedHeaders[normalized] = struct{}{} } } } return nil } func validateVictorOpsConfigs(configs []monitoringv1alpha1.VictorOpsConfig) error { for _, config := range configs { // from https://github.com/prometheus/alertmanager/blob/a7f9fdadbecbb7e692d2cd8d3334e3d6de1602e1/config/notifiers.go#L497 reservedFields := map[string]struct{}{ "routing_key": {}, "message_type": {}, "state_message": {}, "entity_display_name": {}, "monitoring_tool": {}, "entity_id": {}, "entity_state": {}, } if len(config.CustomFields) > 0 { for _, v := range config.CustomFields { if _, ok := reservedFields[v.Key]; ok { return fmt.Errorf("usage of reserved word %q is not allowed in custom fields", v.Key) } } } if config.RoutingKey == "" { return errors.New("missing 'routingKey' key") } if config.APIURL != "" { if _, err := ValidateURL(config.APIURL); err != nil { return errors.Wrapf(err, "'apiURL' %s invalid", config.APIURL) } } } return nil } func validatePushoverConfigs(configs []monitoringv1alpha1.PushoverConfig) error { for _, config := range configs { if config.UserKey == nil { return errors.Errorf("mandatory field %q is empty", "userKey") } if config.Token == nil { return errors.Errorf("mandatory field %q is empty", "token") } } return nil } func validateSnsConfigs(configs []monitoringv1alpha1.SNSConfig) error { for _, config := range configs { if (config.TargetARN == "") != (config.TopicARN == "") != (config.PhoneNumber == "") { return fmt.Errorf("must provide either a Target ARN, Topic ARN, or Phone Number for SNS config") } } return nil } // validateAlertManagerRoutes verifies that the given route and all its children are semantically valid. // because of the self-referential issues mentioned in https://github.com/kubernetes/kubernetes/issues/62872 // it is not currently possible to apply OpenAPI validation to a v1alpha1.Route func validateAlertManagerRoutes(r *monitoringv1alpha1.Route, receivers, muteTimeIntervals map[string]struct{}, topLevelRoute bool) error { if r == nil { return nil } if _, found := receivers[r.Receiver]; !found && (r.Receiver != "" || topLevelRoute) { return errors.Errorf("receiver %q not found", r.Receiver) } if groupLen := len(r.GroupBy); groupLen > 0 { groupedBy := make(map[string]struct{}, groupLen) for _, str := range r.GroupBy { if _, found := groupedBy[str]; found { return errors.Errorf("duplicate values not permitted in route 'groupBy': %v", r.GroupBy) } groupedBy[str] = struct{}{} } if _, found := groupedBy["..."]; found && groupLen > 1 { return errors.Errorf("'...' must be a sole value in route 'groupBy': %v", r.GroupBy) } } for _, namedMuteTimeInterval := range r.MuteTimeIntervals { if _, found := muteTimeIntervals[namedMuteTimeInterval]; !found { return errors.Errorf("mute time interval %q not found", namedMuteTimeInterval) } } // validate that if defaults are set, they match regex if r.GroupInterval != "" && !durationRe.MatchString(r.GroupInterval) { return errors.Errorf("groupInterval %s does not match required regex: %s", r.GroupInterval, durationRe.String()) } if r.GroupWait != "" && !durationRe.MatchString(r.GroupWait) { return errors.Errorf("groupWait %s does not match required regex: %s", r.GroupInterval, durationRe.String()) } if r.RepeatInterval != "" && !durationRe.MatchString(r.RepeatInterval) { return errors.Errorf("repeatInterval %s does not match required regex: %s", r.GroupInterval, durationRe.String()) } children, err := r.ChildRoutes() if err != nil { return err } for i := range children { if err := validateAlertManagerRoutes(&children[i], receivers, muteTimeIntervals, false); err != nil { return errors.Wrapf(err, "route[%d]", i) } } return nil } func validateMuteTimeIntervals(muteTimeIntervals []monitoringv1alpha1.MuteTimeInterval) (map[string]struct{}, error) { muteTimeIntervalNames := make(map[string]struct{}, len(muteTimeIntervals)) for i, mti := range muteTimeIntervals { if err := mti.Validate(); err != nil { return nil, errors.Wrapf(err, "mute time interval[%d] is invalid", i) } muteTimeIntervalNames[mti.Name] = struct{}{} } return muteTimeIntervalNames, nil }
1
17,374
hmm so we didn't validate the HTTP config for all receivers?
prometheus-operator-prometheus-operator
go
@@ -49,5 +49,14 @@ namespace OpenTelemetry.Context.Propagation /// <param name="getter">Function that will return string value of a key with the specified name.</param> /// <returns>Span context from it's text representation.</returns> SpanContext Extract<T>(T carrier, Func<T, string, IEnumerable<string>> getter); + + /// <summary> + /// Tests if an activity context has been injected into a carrier. + /// </summary> + /// <typeparam name="T">Type of object to extract context from. Typically HttpRequest or similar.</typeparam> + /// <param name="carrier">Object to extract context from. Instance of this object will be passed to the getter.</param> + /// <param name="getter">Function that will return string value of a key with the specified name.</param> + /// <returns><see langword="true" /> if the carrier has been injected with an activity context.</returns> + bool IsInjected<T>(T carrier, Func<T, string, IEnumerable<string>> getter); } }
1
// <copyright file="ITextFormat.cs" company="OpenTelemetry Authors"> // Copyright The OpenTelemetry Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // </copyright> using System; using System.Collections.Generic; using OpenTelemetry.Trace; namespace OpenTelemetry.Context.Propagation { /// <summary> /// Text format wire context propagator. Helps to extract and inject context from textual /// representation (typically http headers or metadata collection). /// </summary> public interface ITextFormat { /// <summary> /// Gets the list of headers used by propagator. The use cases of this are: /// * allow pre-allocation of fields, especially in systems like gRPC Metadata /// * allow a single-pass over an iterator (ex OpenTracing has no getter in TextMap). /// </summary> ISet<string> Fields { get; } /// <summary> /// Injects textual representation of span context to transmit over the wire. /// </summary> /// <typeparam name="T">Type of an object to set context on. Typically HttpRequest or similar.</typeparam> /// <param name="spanContext">Span context to transmit over the wire.</param> /// <param name="carrier">Object to set context on. Instance of this object will be passed to setter.</param> /// <param name="setter">Action that will set name and value pair on the object.</param> void Inject<T>(SpanContext spanContext, T carrier, Action<T, string, string> setter); /// <summary> /// Extracts span context from textual representation. /// </summary> /// <typeparam name="T">Type of object to extract context from. Typically HttpRequest or similar.</typeparam> /// <param name="carrier">Object to extract context from. Instance of this object will be passed to the getter.</param> /// <param name="getter">Function that will return string value of a key with the specified name.</param> /// <returns>Span context from it's text representation.</returns> SpanContext Extract<T>(T carrier, Func<T, string, IEnumerable<string>> getter); } }
1
15,279
ITextFormatActivity had this, can I add to have the same effect?
open-telemetry-opentelemetry-dotnet
.cs
@@ -20,8 +20,11 @@ import ( "sync" "time" + "github.com/aws/amazon-ecs-agent/agent/containerresource" + "github.com/aws/amazon-ecs-agent/agent/containerresource/containerstatus" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" - apierrors "github.com/aws/amazon-ecs-agent/agent/api/errors" + apierrors "github.com/aws/amazon-ecs-agent/agent/apierrors" "github.com/aws/amazon-ecs-agent/agent/credentials" resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status"
1
// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"). You may // not use this file except in compliance with the License. A copy of the // License is located at // // http://aws.amazon.com/apache2.0/ // // or in the "license" file accompanying this file. This file is distributed // on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either // express or implied. See the License for the specific language governing // permissions and limitations under the License. package container import ( "encoding/json" "fmt" "strconv" "sync" "time" apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" apierrors "github.com/aws/amazon-ecs-agent/agent/api/errors" "github.com/aws/amazon-ecs-agent/agent/credentials" resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status" "github.com/aws/aws-sdk-go/aws" "github.com/cihub/seelog" "github.com/docker/docker/api/types" dockercontainer "github.com/docker/docker/api/types/container" ) const ( // defaultContainerSteadyStateStatus defines the container status at // which the container is assumed to be in steady state. It is set // to 'ContainerRunning' unless overridden defaultContainerSteadyStateStatus = apicontainerstatus.ContainerRunning // awslogsAuthExecutionRole is the string value passed in the task payload // that specifies that the log driver should be authenticated using the // execution role awslogsAuthExecutionRole = "ExecutionRole" // DockerHealthCheckType is the type of container health check provided by docker DockerHealthCheckType = "docker" // AuthTypeECR is to use image pull auth over ECR AuthTypeECR = "ecr" // AuthTypeASM is to use image pull auth over AWS Secrets Manager AuthTypeASM = "asm" // MetadataURIEnvironmentVariableName defines the name of the environment // variable in containers' config, which can be used by the containers to access the // v3 metadata endpoint MetadataURIEnvironmentVariableName = "ECS_CONTAINER_METADATA_URI" // MetadataURIEnvVarNameV4 defines the name of the environment // variable in containers' config, which can be used by the containers to access the // v4 metadata endpoint MetadataURIEnvVarNameV4 = "ECS_CONTAINER_METADATA_URI_V4" // MetadataURIFormat defines the URI format for v4 metadata endpoint MetadataURIFormatV4 = "http://169.254.170.2/v4/%s" // SecretProviderSSM is to show secret provider being SSM SecretProviderSSM = "ssm" // SecretProviderASM is to show secret provider being ASM SecretProviderASM = "asm" // SecretTypeEnv is to show secret type being ENVIRONMENT_VARIABLE SecretTypeEnv = "ENVIRONMENT_VARIABLE" // TargetLogDriver is to show secret target being "LOG_DRIVER", the default will be "CONTAINER" SecretTargetLogDriver = "LOG_DRIVER" // neuronVisibleDevicesEnvVar is the env which indicates that the container wants to use inferentia devices. neuronVisibleDevicesEnvVar = "AWS_NEURON_VISIBLE_DEVICES" ) var ( // MetadataURIFormat defines the URI format for v3 metadata endpoint. Made as a var to be able to // overwrite it in test. MetadataURIFormat = "http://169.254.170.2/v3/%s" ) // DockerConfig represents additional metadata about a container to run. It's // remodeled from the `ecsacs` api model file. Eventually it should not exist // once this remodeling is refactored out. type DockerConfig struct { // Config is the configuration used to create container Config *string `json:"config"` // HostConfig is the configuration of container related to host resource HostConfig *string `json:"hostConfig"` // Version specifies the docker client API version to use Version *string `json:"version"` } // HealthStatus contains the health check result returned by docker type HealthStatus struct { // Status is the container health status Status apicontainerstatus.ContainerHealthStatus `json:"status,omitempty"` // Since is the timestamp when container health status changed Since *time.Time `json:"statusSince,omitempty"` // ExitCode is the exitcode of health check if failed ExitCode int `json:"exitCode,omitempty"` // Output is the output of health check Output string `json:"output,omitempty"` } type ManagedAgentState struct { // ID of this managed agent state ID string `json:"id,omitempty"` // TODO: [ecs-exec] Change variable name from Status to KnownStatus in future PR to avoid noise // Status is the managed agent health status Status apicontainerstatus.ManagedAgentStatus `json:"status,omitempty"` // SentStatus is the managed agent sent status SentStatus apicontainerstatus.ManagedAgentStatus `json:"sentStatus,omitempty"` // Reason is a placeholder for failure messaging Reason string `json:"reason,omitempty"` // LastStartedAt is the timestamp when the status last went from PENDING->RUNNING LastStartedAt time.Time `json:"lastStartedAt,omitempty"` // Metadata holds metadata about the managed agent Metadata map[string]interface{} `json:"metadata,omitempty"` // InitFailed indicates if exec agent initialization failed InitFailed bool `json:"initFailed,omitempty"` } type ManagedAgent struct { ManagedAgentState // Name is the name of this managed agent. This name is streamed down from ACS. Name string `json:"name,omitempty"` // Properties of this managed agent. Properties are streamed down from ACS. Properties map[string]string `json:"properties,omitempty"` } // Container is the internal representation of a container in the ECS agent type Container struct { // Name is the name of the container specified in the task definition Name string // RuntimeID is the docker id of the container RuntimeID string // CredentialsID is used to set the CredentialsID field for the // IAMRoleCredentials object associated with the container. This id can be // used to look up the credentials for container in the credentials manager CredentialsID string // ExecutionCredentialsID is used to set the ExecutionCredentialsID field for the // IAMRoleCredentials object associated with the container. This id can be // used to look up the credentials for container in the credentials manager ExecutionCredentialsID string // TaskARNUnsafe is the task ARN of the task that the container belongs to. Access should be // protected by lock i.e. via GetTaskARN and SetTaskARN. TaskARNUnsafe string `json:"taskARN"` // DependsOnUnsafe is the field which specifies the ordering for container startup and shutdown. DependsOnUnsafe []DependsOn `json:"dependsOn,omitempty"` // ManagedAgentsUnsafe presently contains only the executeCommandAgent ManagedAgentsUnsafe []ManagedAgent `json:"managedAgents,omitempty"` // V3EndpointID is a container identifier used to construct v3 metadata endpoint; it's unique among // all the containers managed by the agent V3EndpointID string // Image is the image name specified in the task definition Image string // ImageID is the local ID of the image used in the container ImageID string // ImageDigest is the sha-256 digest of the container image as pulled from the repository ImageDigest string // Command is the command to run in the container which is specified in the task definition Command []string // CPU is the cpu limitation of the container which is specified in the task definition CPU uint `json:"Cpu"` // GPUIDs is the list of GPU ids for a container GPUIDs []string // Memory is the memory limitation of the container which is specified in the task definition Memory uint // Links contains a list of containers to link, corresponding to docker option: --link Links []string // FirelensConfig contains configuration for a Firelens container FirelensConfig *FirelensConfig `json:"firelensConfiguration"` // VolumesFrom contains a list of container's volume to use, corresponding to docker option: --volumes-from VolumesFrom []VolumeFrom `json:"volumesFrom"` // MountPoints contains a list of volume mount paths MountPoints []MountPoint `json:"mountPoints"` // Ports contains a list of ports binding configuration Ports []PortBinding `json:"portMappings"` // Secrets contains a list of secret Secrets []Secret `json:"secrets"` // Essential denotes whether the container is essential or not Essential bool // EntryPoint is entrypoint of the container, corresponding to docker option: --entrypoint EntryPoint *[]string // Environment is the environment variable set in the container Environment map[string]string `json:"environment"` // EnvironmentFiles is the list of environmentFile used to populate environment variables EnvironmentFiles []EnvironmentFile `json:"environmentFiles"` // Overrides contains the configuration to override of a container Overrides ContainerOverrides `json:"overrides"` // DockerConfig is the configuration used to create the container DockerConfig DockerConfig `json:"dockerConfig"` // RegistryAuthentication is the auth data used to pull image RegistryAuthentication *RegistryAuthenticationData `json:"registryAuthentication"` // HealthCheckType is the mechanism to use for the container health check // currently it only supports 'DOCKER' HealthCheckType string `json:"healthCheckType,omitempty"` // Health contains the health check information of container health check Health HealthStatus `json:"-"` // LogsAuthStrategy specifies how the logs driver for the container will be // authenticated LogsAuthStrategy string // StartTimeout specifies the time value after which if a container has a dependency // on another container and the dependency conditions are 'SUCCESS', 'COMPLETE', 'HEALTHY', // then that dependency will not be resolved. StartTimeout uint // StopTimeout specifies the time value to be passed as StopContainer api call StopTimeout uint // lock is used for fields that are accessed and updated concurrently lock sync.RWMutex // DesiredStatusUnsafe represents the state where the container should go. Generally, // the desired status is informed by the ECS backend as a result of either // API calls made to ECS or decisions made by the ECS service scheduler, // though the agent may also set the DesiredStatusUnsafe if a different "essential" // container in the task exits. The DesiredStatus is almost always either // ContainerRunning or ContainerStopped. // NOTE: Do not access DesiredStatusUnsafe directly. Instead, use `GetDesiredStatus` // and `SetDesiredStatus`. // TODO DesiredStatusUnsafe should probably be private with appropriately written // setter/getter. When this is done, we need to ensure that the UnmarshalJSON // is handled properly so that the state storage continues to work. DesiredStatusUnsafe apicontainerstatus.ContainerStatus `json:"desiredStatus"` // KnownStatusUnsafe represents the state where the container is. // NOTE: Do not access `KnownStatusUnsafe` directly. Instead, use `GetKnownStatus` // and `SetKnownStatus`. // TODO KnownStatusUnsafe should probably be private with appropriately written // setter/getter. When this is done, we need to ensure that the UnmarshalJSON // is handled properly so that the state storage continues to work. KnownStatusUnsafe apicontainerstatus.ContainerStatus `json:"KnownStatus"` // TransitionDependenciesMap is a map of the dependent container status to other // dependencies that must be satisfied in order for this container to transition. TransitionDependenciesMap TransitionDependenciesMap `json:"TransitionDependencySet"` // SteadyStateDependencies is a list of containers that must be in "steady state" before // this one is created // Note: Current logic requires that the containers specified here are run // before this container can even be pulled. // // Deprecated: Use TransitionDependencySet instead. SteadyStateDependencies is retained for compatibility with old // state files. SteadyStateDependencies []string `json:"RunDependencies"` // Type specifies the container type. Except the 'Normal' type, all other types // are not directly specified by task definitions, but created by the agent. The // JSON tag is retained as this field's previous name 'IsInternal' for maintaining // backwards compatibility. Please see JSON parsing hooks for this type for more // details Type ContainerType `json:"IsInternal"` // AppliedStatus is the status that has been "applied" (e.g., we've called Pull, // Create, Start, or Stop) but we don't yet know that the application was successful. // No need to save it in the state file, as agent will synchronize the container status // on restart and for some operation eg: pull, it has to be recalled again. AppliedStatus apicontainerstatus.ContainerStatus `json:"-"` // ApplyingError is an error that occurred trying to transition the container // to its desired state. It is propagated to the backend in the form // 'Name: ErrorString' as the 'reason' field. ApplyingError *apierrors.DefaultNamedError // SentStatusUnsafe represents the last KnownStatusUnsafe that was sent to the ECS // SubmitContainerStateChange API. // TODO SentStatusUnsafe should probably be private with appropriately written // setter/getter. When this is done, we need to ensure that the UnmarshalJSON is // handled properly so that the state storage continues to work. SentStatusUnsafe apicontainerstatus.ContainerStatus `json:"SentStatus"` // MetadataFileUpdated is set to true when we have completed updating the // metadata file MetadataFileUpdated bool `json:"metadataFileUpdated"` // KnownExitCodeUnsafe specifies the exit code for the container. // It is exposed outside of the package so that it's marshalled/unmarshalled in // the JSON body while saving the state. // NOTE: Do not access KnownExitCodeUnsafe directly. Instead, use `GetKnownExitCode` // and `SetKnownExitCode`. KnownExitCodeUnsafe *int `json:"KnownExitCode"` // KnownPortBindingsUnsafe is an array of port bindings for the container. KnownPortBindingsUnsafe []PortBinding `json:"KnownPortBindings"` // VolumesUnsafe is an array of volume mounts in the container. VolumesUnsafe []types.MountPoint `json:"-"` // NetworkModeUnsafe is the network mode in which the container is started NetworkModeUnsafe string `json:"-"` // NetworksUnsafe denotes the Docker Network Settings in the container. NetworkSettingsUnsafe *types.NetworkSettings `json:"-"` // SteadyStateStatusUnsafe specifies the steady state status for the container // If uninitialized, it's assumed to be set to 'ContainerRunning'. Even though // it's not only supposed to be set when the container is being created, it's // exposed outside of the package so that it's marshalled/unmarshalled in the // the JSON body while saving the state SteadyStateStatusUnsafe *apicontainerstatus.ContainerStatus `json:"SteadyStateStatus,omitempty"` // ContainerArn is the Arn of this container. ContainerArn string `json:"ContainerArn,omitempty"` // ContainerTornDownUnsafe is set to true when we have cleaned up this container. For now this is only used for the // pause container ContainerTornDownUnsafe bool `json:"containerTornDown"` createdAt time.Time startedAt time.Time finishedAt time.Time labels map[string]string } type DependsOn struct { ContainerName string `json:"containerName"` Condition string `json:"condition"` } // DockerContainer is a mapping between containers-as-docker-knows-them and // containers-as-we-know-them. // This is primarily used in DockerState, but lives here such that tasks and // containers know how to convert themselves into Docker's desired config format type DockerContainer struct { DockerID string `json:"DockerId"` DockerName string // needed for linking Container *Container } type EnvironmentFile struct { Value string `json:"value"` Type string `json:"type"` } // MountPoint describes the in-container location of a Volume and references // that Volume by name. type MountPoint struct { SourceVolume string `json:"sourceVolume"` ContainerPath string `json:"containerPath"` ReadOnly bool `json:"readOnly"` } // FirelensConfig describes the type and options of a Firelens container. type FirelensConfig struct { Type string `json:"type"` Version string `json:"version"` CollectStdoutLogs bool `json:"collectStdoutLogs,omitempty"` StatusMessageReportingPath string `json:"statusMessageReportingPath,omitempty"` Options map[string]string `json:"options"` } // VolumeFrom is a volume which references another container as its source. type VolumeFrom struct { SourceContainer string `json:"sourceContainer"` ReadOnly bool `json:"readOnly"` } // Secret contains all essential attributes needed for ECS secrets vending as environment variables/tmpfs files type Secret struct { Name string `json:"name"` ValueFrom string `json:"valueFrom"` Region string `json:"region"` ContainerPath string `json:"containerPath"` Type string `json:"type"` Provider string `json:"provider"` Target string `json:"target"` } // GetSecretResourceCacheKey returns the key required to access the secret // from the ssmsecret resource func (s *Secret) GetSecretResourceCacheKey() string { return s.ValueFrom + "_" + s.Region } // String returns a human readable string representation of DockerContainer func (dc *DockerContainer) String() string { if dc == nil { return "nil" } return fmt.Sprintf("Id: %s, Name: %s, Container: %s", dc.DockerID, dc.DockerName, dc.Container.String()) } // NewContainerWithSteadyState creates a new Container object with the specified // steady state. Containers that need the non default steady state set will // use this method instead of setting it directly func NewContainerWithSteadyState(steadyState apicontainerstatus.ContainerStatus) *Container { steadyStateStatus := steadyState return &Container{ SteadyStateStatusUnsafe: &steadyStateStatus, } } // KnownTerminal returns true if the container's known status is STOPPED func (c *Container) KnownTerminal() bool { return c.GetKnownStatus().Terminal() } // DesiredTerminal returns true if the container's desired status is STOPPED func (c *Container) DesiredTerminal() bool { return c.GetDesiredStatus().Terminal() } // GetKnownStatus returns the known status of the container func (c *Container) GetKnownStatus() apicontainerstatus.ContainerStatus { c.lock.RLock() defer c.lock.RUnlock() return c.KnownStatusUnsafe } // SetKnownStatus sets the known status of the container and update the container // applied status func (c *Container) SetKnownStatus(status apicontainerstatus.ContainerStatus) { c.lock.Lock() defer c.lock.Unlock() c.KnownStatusUnsafe = status c.updateAppliedStatusUnsafe(status) } // GetDesiredStatus gets the desired status of the container func (c *Container) GetDesiredStatus() apicontainerstatus.ContainerStatus { c.lock.RLock() defer c.lock.RUnlock() return c.DesiredStatusUnsafe } // SetDesiredStatus sets the desired status of the container func (c *Container) SetDesiredStatus(status apicontainerstatus.ContainerStatus) { c.lock.Lock() defer c.lock.Unlock() c.DesiredStatusUnsafe = status } // GetSentStatus safely returns the SentStatusUnsafe of the container func (c *Container) GetSentStatus() apicontainerstatus.ContainerStatus { c.lock.RLock() defer c.lock.RUnlock() return c.SentStatusUnsafe } // SetSentStatus safely sets the SentStatusUnsafe of the container func (c *Container) SetSentStatus(status apicontainerstatus.ContainerStatus) { c.lock.Lock() defer c.lock.Unlock() c.SentStatusUnsafe = status } // SetKnownExitCode sets exit code field in container struct func (c *Container) SetKnownExitCode(i *int) { c.lock.Lock() defer c.lock.Unlock() c.KnownExitCodeUnsafe = i } // GetKnownExitCode returns the container exit code func (c *Container) GetKnownExitCode() *int { c.lock.RLock() defer c.lock.RUnlock() return c.KnownExitCodeUnsafe } // SetRegistryAuthCredentials sets the credentials for pulling image from ECR func (c *Container) SetRegistryAuthCredentials(credential credentials.IAMRoleCredentials) { c.lock.Lock() defer c.lock.Unlock() c.RegistryAuthentication.ECRAuthData.SetPullCredentials(credential) } // ShouldPullWithExecutionRole returns whether this container has its own ECR credentials func (c *Container) ShouldPullWithExecutionRole() bool { c.lock.RLock() defer c.lock.RUnlock() return c.RegistryAuthentication != nil && c.RegistryAuthentication.Type == AuthTypeECR && c.RegistryAuthentication.ECRAuthData != nil && c.RegistryAuthentication.ECRAuthData.UseExecutionRole } // String returns a human readable string representation of this object func (c *Container) String() string { ret := fmt.Sprintf("%s(%s) (%s->%s)", c.Name, c.Image, c.GetKnownStatus().String(), c.GetDesiredStatus().String()) if c.GetKnownExitCode() != nil { ret += " - Exit: " + strconv.Itoa(*c.GetKnownExitCode()) } return ret } // GetSteadyStateStatus returns the steady state status for the container. If // Container.steadyState is not initialized, the default steady state status // defined by `defaultContainerSteadyStateStatus` is returned. The 'pause' // container's steady state differs from that of other containers, as the // 'pause' container can reach its teady state once networking resources // have been provisioned for it, which is done in the `ContainerResourcesProvisioned` // state func (c *Container) GetSteadyStateStatus() apicontainerstatus.ContainerStatus { if c.SteadyStateStatusUnsafe == nil { return defaultContainerSteadyStateStatus } return *c.SteadyStateStatusUnsafe } // IsKnownSteadyState returns true if the `KnownState` of the container equals // the `steadyState` defined for the container func (c *Container) IsKnownSteadyState() bool { knownStatus := c.GetKnownStatus() return knownStatus == c.GetSteadyStateStatus() } // GetNextKnownStateProgression returns the state that the container should // progress to based on its `KnownState`. The progression is // incremental until the container reaches its steady state. From then on, // it transitions to `ContainerStopped`. // // For example: // a. if the steady state of the container is defined as `ContainerRunning`, // the progression is: // Container: None -> Pulled -> Created -> Running* -> Stopped -> Zombie // // b. if the steady state of the container is defined as `ContainerResourcesProvisioned`, // the progression is: // Container: None -> Pulled -> Created -> Running -> Provisioned* -> Stopped -> Zombie // // c. if the steady state of the container is defined as `ContainerCreated`, // the progression is: // Container: None -> Pulled -> Created* -> Stopped -> Zombie func (c *Container) GetNextKnownStateProgression() apicontainerstatus.ContainerStatus { if c.IsKnownSteadyState() { return apicontainerstatus.ContainerStopped } return c.GetKnownStatus() + 1 } // IsInternal returns true if the container type is `ContainerCNIPause` // or `ContainerNamespacePause`. It returns false otherwise func (c *Container) IsInternal() bool { return c.Type != ContainerNormal } // IsRunning returns true if the container's known status is either RUNNING // or RESOURCES_PROVISIONED. It returns false otherwise func (c *Container) IsRunning() bool { return c.GetKnownStatus().IsRunning() } // IsMetadataFileUpdated returns true if the metadata file has been once the // metadata file is ready and will no longer change func (c *Container) IsMetadataFileUpdated() bool { c.lock.RLock() defer c.lock.RUnlock() return c.MetadataFileUpdated } // SetMetadataFileUpdated sets the container's MetadataFileUpdated status to true func (c *Container) SetMetadataFileUpdated() { c.lock.Lock() defer c.lock.Unlock() c.MetadataFileUpdated = true } // IsEssential returns whether the container is an essential container or not func (c *Container) IsEssential() bool { c.lock.RLock() defer c.lock.RUnlock() return c.Essential } // AWSLogAuthExecutionRole returns true if the auth is by execution role func (c *Container) AWSLogAuthExecutionRole() bool { return c.LogsAuthStrategy == awslogsAuthExecutionRole } // SetCreatedAt sets the timestamp for container's creation time func (c *Container) SetCreatedAt(createdAt time.Time) { if createdAt.IsZero() { return } c.lock.Lock() defer c.lock.Unlock() c.createdAt = createdAt } // SetStartedAt sets the timestamp for container's start time func (c *Container) SetStartedAt(startedAt time.Time) { if startedAt.IsZero() { return } c.lock.Lock() defer c.lock.Unlock() c.startedAt = startedAt } // SetFinishedAt sets the timestamp for container's stopped time func (c *Container) SetFinishedAt(finishedAt time.Time) { if finishedAt.IsZero() { return } c.lock.Lock() defer c.lock.Unlock() c.finishedAt = finishedAt } // GetCreatedAt sets the timestamp for container's creation time func (c *Container) GetCreatedAt() time.Time { c.lock.RLock() defer c.lock.RUnlock() return c.createdAt } // GetStartedAt sets the timestamp for container's start time func (c *Container) GetStartedAt() time.Time { c.lock.RLock() defer c.lock.RUnlock() return c.startedAt } // GetFinishedAt sets the timestamp for container's stopped time func (c *Container) GetFinishedAt() time.Time { c.lock.RLock() defer c.lock.RUnlock() return c.finishedAt } // SetLabels sets the labels for a container func (c *Container) SetLabels(labels map[string]string) { c.lock.Lock() defer c.lock.Unlock() c.labels = labels } // SetRuntimeID sets the DockerID for a container func (c *Container) SetRuntimeID(RuntimeID string) { c.lock.Lock() defer c.lock.Unlock() c.RuntimeID = RuntimeID } // GetRuntimeID gets the DockerID for a container func (c *Container) GetRuntimeID() string { c.lock.RLock() defer c.lock.RUnlock() return c.RuntimeID } // SetImageDigest sets the ImageDigest for a container func (c *Container) SetImageDigest(ImageDigest string) { c.lock.Lock() defer c.lock.Unlock() c.ImageDigest = ImageDigest } // GetImageDigest gets the ImageDigest for a container func (c *Container) GetImageDigest() string { c.lock.RLock() defer c.lock.RUnlock() return c.ImageDigest } // GetLabels gets the labels for a container func (c *Container) GetLabels() map[string]string { c.lock.RLock() defer c.lock.RUnlock() return c.labels } // SetKnownPortBindings sets the ports for a container func (c *Container) SetKnownPortBindings(ports []PortBinding) { c.lock.Lock() defer c.lock.Unlock() c.KnownPortBindingsUnsafe = ports } // GetKnownPortBindings gets the ports for a container func (c *Container) GetKnownPortBindings() []PortBinding { c.lock.RLock() defer c.lock.RUnlock() return c.KnownPortBindingsUnsafe } // GetManagedAgents returns the managed agents configured for this container func (c *Container) GetManagedAgents() []ManagedAgent { c.lock.RLock() defer c.lock.RUnlock() return c.ManagedAgentsUnsafe } // SetVolumes sets the volumes mounted in a container func (c *Container) SetVolumes(volumes []types.MountPoint) { c.lock.Lock() defer c.lock.Unlock() c.VolumesUnsafe = volumes } // GetVolumes returns the volumes mounted in a container func (c *Container) GetVolumes() []types.MountPoint { c.lock.RLock() defer c.lock.RUnlock() return c.VolumesUnsafe } // SetNetworkSettings sets the networks field in a container func (c *Container) SetNetworkSettings(networks *types.NetworkSettings) { c.lock.Lock() defer c.lock.Unlock() c.NetworkSettingsUnsafe = networks } // GetNetworkSettings returns the networks field in a container func (c *Container) GetNetworkSettings() *types.NetworkSettings { c.lock.RLock() defer c.lock.RUnlock() return c.NetworkSettingsUnsafe } // SetNetworkMode sets the network mode of the container func (c *Container) SetNetworkMode(networkMode string) { c.lock.Lock() defer c.lock.Unlock() c.NetworkModeUnsafe = networkMode } // GetNetworkMode returns the network mode of the container func (c *Container) GetNetworkMode() string { c.lock.RLock() defer c.lock.RUnlock() return c.NetworkModeUnsafe } // HealthStatusShouldBeReported returns true if the health check is defined in // the task definition func (c *Container) HealthStatusShouldBeReported() bool { return c.HealthCheckType == DockerHealthCheckType } // SetHealthStatus sets the container health status func (c *Container) SetHealthStatus(health HealthStatus) { c.lock.Lock() defer c.lock.Unlock() if c.Health.Status == health.Status { return } c.Health.Status = health.Status c.Health.Since = aws.Time(time.Now()) c.Health.Output = health.Output // Set the health exit code if the health check failed if c.Health.Status == apicontainerstatus.ContainerUnhealthy { c.Health.ExitCode = health.ExitCode } } // GetHealthStatus returns the container health information func (c *Container) GetHealthStatus() HealthStatus { c.lock.RLock() defer c.lock.RUnlock() // Copy the pointer to avoid race condition copyHealth := c.Health if c.Health.Since != nil { copyHealth.Since = aws.Time(aws.TimeValue(c.Health.Since)) } return copyHealth } // BuildContainerDependency adds a new dependency container and satisfied status // to the dependent container func (c *Container) BuildContainerDependency(contName string, satisfiedStatus apicontainerstatus.ContainerStatus, dependentStatus apicontainerstatus.ContainerStatus) { contDep := ContainerDependency{ ContainerName: contName, SatisfiedStatus: satisfiedStatus, } if _, ok := c.TransitionDependenciesMap[dependentStatus]; !ok { c.TransitionDependenciesMap[dependentStatus] = TransitionDependencySet{} } deps := c.TransitionDependenciesMap[dependentStatus] deps.ContainerDependencies = append(deps.ContainerDependencies, contDep) c.TransitionDependenciesMap[dependentStatus] = deps } // BuildResourceDependency adds a new resource dependency by taking in the required status // of the resource that satisfies the dependency and the dependent container status, // whose transition is dependent on the resource. // example: if container's PULLED transition is dependent on volume resource's // CREATED status, then RequiredStatus=VolumeCreated and dependentStatus=ContainerPulled func (c *Container) BuildResourceDependency(resourceName string, requiredStatus resourcestatus.ResourceStatus, dependentStatus apicontainerstatus.ContainerStatus) { resourceDep := ResourceDependency{ Name: resourceName, RequiredStatus: requiredStatus, } if _, ok := c.TransitionDependenciesMap[dependentStatus]; !ok { c.TransitionDependenciesMap[dependentStatus] = TransitionDependencySet{} } deps := c.TransitionDependenciesMap[dependentStatus] deps.ResourceDependencies = append(deps.ResourceDependencies, resourceDep) c.TransitionDependenciesMap[dependentStatus] = deps } // updateAppliedStatusUnsafe updates the container transitioning status func (c *Container) updateAppliedStatusUnsafe(knownStatus apicontainerstatus.ContainerStatus) { if c.AppliedStatus == apicontainerstatus.ContainerStatusNone { return } // Check if the container transition has already finished if c.AppliedStatus <= knownStatus { c.AppliedStatus = apicontainerstatus.ContainerStatusNone } } // SetAppliedStatus sets the applied status of container and returns whether // the container is already in a transition func (c *Container) SetAppliedStatus(status apicontainerstatus.ContainerStatus) bool { c.lock.Lock() defer c.lock.Unlock() if c.AppliedStatus != apicontainerstatus.ContainerStatusNone { // return false to indicate the set operation failed return false } c.AppliedStatus = status return true } // GetAppliedStatus returns the transitioning status of container func (c *Container) GetAppliedStatus() apicontainerstatus.ContainerStatus { c.lock.RLock() defer c.lock.RUnlock() return c.AppliedStatus } // ShouldPullWithASMAuth returns true if this container needs to retrieve // private registry authentication data from ASM func (c *Container) ShouldPullWithASMAuth() bool { c.lock.RLock() defer c.lock.RUnlock() return c.RegistryAuthentication != nil && c.RegistryAuthentication.Type == AuthTypeASM && c.RegistryAuthentication.ASMAuthData != nil } // SetASMDockerAuthConfig add the docker auth config data to the // RegistryAuthentication struct held by the container, this is then passed down // to the docker client to pull the image func (c *Container) SetASMDockerAuthConfig(dac types.AuthConfig) { c.RegistryAuthentication.ASMAuthData.SetDockerAuthConfig(dac) } // SetV3EndpointID sets the v3 endpoint id of container func (c *Container) SetV3EndpointID(v3EndpointID string) { c.lock.Lock() defer c.lock.Unlock() c.V3EndpointID = v3EndpointID } // GetV3EndpointID returns the v3 endpoint id of container func (c *Container) GetV3EndpointID() string { c.lock.RLock() defer c.lock.RUnlock() return c.V3EndpointID } // InjectV3MetadataEndpoint injects the v3 metadata endpoint as an environment variable for a container func (c *Container) InjectV3MetadataEndpoint() { c.lock.Lock() defer c.lock.Unlock() // don't assume that the environment variable map has been initialized by others if c.Environment == nil { c.Environment = make(map[string]string) } c.Environment[MetadataURIEnvironmentVariableName] = fmt.Sprintf(MetadataURIFormat, c.V3EndpointID) } // InjectV4MetadataEndpoint injects the v4 metadata endpoint as an environment variable for a container func (c *Container) InjectV4MetadataEndpoint() { c.lock.Lock() defer c.lock.Unlock() // don't assume that the environment variable map has been initialized by others if c.Environment == nil { c.Environment = make(map[string]string) } c.Environment[MetadataURIEnvVarNameV4] = fmt.Sprintf(MetadataURIFormatV4, c.V3EndpointID) } // ShouldCreateWithSSMSecret returns true if this container needs to get secret // value from SSM Parameter Store func (c *Container) ShouldCreateWithSSMSecret() bool { c.lock.RLock() defer c.lock.RUnlock() // Secrets field will be nil if there is no secrets for container if c.Secrets == nil { return false } for _, secret := range c.Secrets { if secret.Provider == SecretProviderSSM { return true } } return false } // ShouldCreateWithASMSecret returns true if this container needs to get secret // value from AWS Secrets Manager func (c *Container) ShouldCreateWithASMSecret() bool { c.lock.RLock() defer c.lock.RUnlock() // Secrets field will be nil if there is no secrets for container if c.Secrets == nil { return false } for _, secret := range c.Secrets { if secret.Provider == SecretProviderASM { return true } } return false } // ShouldCreateWithEnvFiles returns true if this container needs to // retrieve environment variable files func (c *Container) ShouldCreateWithEnvFiles() bool { c.lock.RLock() defer c.lock.RUnlock() if c.EnvironmentFiles == nil { return false } return len(c.EnvironmentFiles) != 0 } // MergeEnvironmentVariables appends additional envVarName:envVarValue pairs to // the the container's environment values structure func (c *Container) MergeEnvironmentVariables(envVars map[string]string) { c.lock.Lock() defer c.lock.Unlock() // don't assume that the environment variable map has been initialized by others if c.Environment == nil { c.Environment = make(map[string]string) } for k, v := range envVars { c.Environment[k] = v } } // MergeEnvironmentVariablesFromEnvfiles appends environment variable pairs from // the retrieved envfiles to the container's environment values list // envvars from envfiles will have lower precedence than existing envvars func (c *Container) MergeEnvironmentVariablesFromEnvfiles(envVarsList []map[string]string) error { c.lock.Lock() defer c.lock.Unlock() // create map if does not exist if c.Environment == nil { c.Environment = make(map[string]string) } // envVarsList is a list of map, where each map is from an envfile // iterate over this sequentially because the original order of the // environment files give precedence to the environment variables for _, envVars := range envVarsList { for k, v := range envVars { // existing environment variables have precedence over variables from envfile // only set the env var if key does not already exist if _, ok := c.Environment[k]; !ok { c.Environment[k] = v } } } return nil } // HasSecret returns whether a container has secret based on a certain condition. func (c *Container) HasSecret(f func(s Secret) bool) bool { c.lock.RLock() defer c.lock.RUnlock() if c.Secrets == nil { return false } for _, secret := range c.Secrets { if f(secret) { return true } } return false } func (c *Container) GetStartTimeout() time.Duration { c.lock.Lock() defer c.lock.Unlock() return time.Duration(c.StartTimeout) * time.Second } func (c *Container) GetStopTimeout() time.Duration { c.lock.Lock() defer c.lock.Unlock() return time.Duration(c.StopTimeout) * time.Second } func (c *Container) GetDependsOn() []DependsOn { c.lock.RLock() defer c.lock.RUnlock() return c.DependsOnUnsafe } func (c *Container) SetDependsOn(dependsOn []DependsOn) { c.lock.Lock() defer c.lock.Unlock() c.DependsOnUnsafe = dependsOn } // DependsOnContainer checks whether a container depends on another container. func (c *Container) DependsOnContainer(name string) bool { c.lock.RLock() defer c.lock.RUnlock() for _, dependsOn := range c.DependsOnUnsafe { if dependsOn.ContainerName == name { return true } } return false } // HasContainerDependencies checks whether a container has any container dependency. func (c *Container) HasContainerDependencies() bool { c.lock.RLock() defer c.lock.RUnlock() return len(c.DependsOnUnsafe) != 0 } // AddContainerDependency adds a container dependency to a container. func (c *Container) AddContainerDependency(name string, condition string) { c.lock.Lock() defer c.lock.Unlock() c.DependsOnUnsafe = append(c.DependsOnUnsafe, DependsOn{ ContainerName: name, Condition: condition, }) } // GetLogDriver returns the log driver used by the container. func (c *Container) GetLogDriver() string { c.lock.RLock() defer c.lock.RUnlock() if c.DockerConfig.HostConfig == nil { return "" } hostConfig := &dockercontainer.HostConfig{} err := json.Unmarshal([]byte(*c.DockerConfig.HostConfig), hostConfig) if err != nil { seelog.Warnf("Encountered error when trying to get log driver for container %s: %v", c.RuntimeID, err) return "" } return hostConfig.LogConfig.Type } // GetLogOptions gets the log 'options' map passed into the task definition. // see https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_LogConfiguration.html func (c *Container) GetLogOptions() map[string]string { c.lock.RLock() defer c.lock.RUnlock() if c.DockerConfig.HostConfig == nil { return map[string]string{} } hostConfig := &dockercontainer.HostConfig{} err := json.Unmarshal([]byte(*c.DockerConfig.HostConfig), hostConfig) if err != nil { seelog.Warnf("Encountered error when trying to get log configuration for container %s: %v", c.RuntimeID, err) return map[string]string{} } return hostConfig.LogConfig.Config } // GetNetworkModeFromHostConfig returns the network mode used by the container from the host config . func (c *Container) GetNetworkModeFromHostConfig() string { c.lock.RLock() defer c.lock.RUnlock() if c.DockerConfig.HostConfig == nil { return "" } hostConfig := &dockercontainer.HostConfig{} // TODO return error to differentiate between error and default mode . err := json.Unmarshal([]byte(*c.DockerConfig.HostConfig), hostConfig) if err != nil { seelog.Warnf("Encountered error when trying to get network mode for container %s: %v", c.RuntimeID, err) return "" } return hostConfig.NetworkMode.NetworkName() } // GetHostConfig returns the container's host config. func (c *Container) GetHostConfig() *string { c.lock.RLock() defer c.lock.RUnlock() return c.DockerConfig.HostConfig } // GetFirelensConfig returns the container's firelens config. func (c *Container) GetFirelensConfig() *FirelensConfig { c.lock.RLock() defer c.lock.RUnlock() return c.FirelensConfig } // GetFirelensVersion returns the container's firelens version. func (c *Container) GetFirelensVersion() string { c.lock.RLock() defer c.lock.RUnlock() if c.FirelensConfig != nil { return c.FirelensConfig.Version } return "" } // GetEnvironmentFiles returns the container's environment files. func (c *Container) GetEnvironmentFiles() []EnvironmentFile { c.lock.RLock() defer c.lock.RUnlock() return c.EnvironmentFiles } // RequireNeuronRuntime checks if the container needs to use the neuron runtime. func (c *Container) RequireNeuronRuntime() bool { c.lock.RLock() defer c.lock.RUnlock() _, ok := c.Environment[neuronVisibleDevicesEnvVar] return ok } // SetCredentialsID sets the credentials ID for the container func (c *Container) SetCredentialsID(id string) { c.lock.Lock() defer c.lock.Unlock() c.CredentialsID = id } // GetCredentialsID gets the credentials ID for the container func (c *Container) GetCredentialsID() string { c.lock.RLock() defer c.lock.RUnlock() return c.CredentialsID } // SetExecutionCredentialsID sets the execution credentials ID for the container func (c *Container) SetExecutionCredentialsID(id string) { c.lock.Lock() defer c.lock.Unlock() c.ExecutionCredentialsID = id } // GetExecutionCredentialsID gets the execution credentials ID for the container func (c *Container) GetExecutionCredentialsID() string { c.lock.RLock() defer c.lock.RUnlock() return c.ExecutionCredentialsID } // SetTaskARN sets the task arn of the container. func (c *Container) SetTaskARN(arn string) { c.lock.Lock() defer c.lock.Unlock() c.TaskARNUnsafe = arn } // GetTaskARN returns the task arn of the container. func (c *Container) GetTaskARN() string { c.lock.RLock() defer c.lock.RUnlock() return c.TaskARNUnsafe } // HasNotAndWillNotStart returns true if the container has never started, and is not going to start in the future. // This is true if the following are all true: // 1. Container's known status is earlier than running; // 2. Container's desired status is stopped; // 3. Container is not in the middle a transition (indicated by applied status is none status). func (c *Container) HasNotAndWillNotStart() bool { c.lock.RLock() defer c.lock.RUnlock() return c.KnownStatusUnsafe < apicontainerstatus.ContainerRunning && c.DesiredStatusUnsafe.Terminal() && c.AppliedStatus == apicontainerstatus.ContainerStatusNone } // GetManagedAgentByName retrieves the managed agent with the name specified and a boolean indicating whether an agent // was found or not. // note: a zero value for ManagedAgent if the name is not known to this container. func (c *Container) GetManagedAgentByName(agentName string) (ManagedAgent, bool) { c.lock.RLock() defer c.lock.RUnlock() for _, ma := range c.ManagedAgentsUnsafe { if ma.Name == agentName { return ma, true } } return ManagedAgent{}, false } // UpdateManagedAgentByName updates the state of the managed agent with the name specified. If the agent is not found, // this method returns false. func (c *Container) UpdateManagedAgentByName(agentName string, state ManagedAgentState) bool { c.lock.Lock() defer c.lock.Unlock() for i, ma := range c.ManagedAgentsUnsafe { if ma.Name == agentName { // It's necessary to clone the whole ManagedAgent struct c.ManagedAgentsUnsafe[i] = ManagedAgent{ Name: ma.Name, Properties: ma.Properties, ManagedAgentState: state, } return true } } return false } // UpdateManagedAgentStatus updates the status of the managed agent with the name specified. If the agent is not found, // this method returns false. func (c *Container) UpdateManagedAgentStatus(agentName string, status apicontainerstatus.ManagedAgentStatus) bool { c.lock.Lock() defer c.lock.Unlock() for i, ma := range c.ManagedAgentsUnsafe { if ma.Name == agentName { c.ManagedAgentsUnsafe[i].Status = status return true } } return false } // UpdateManagedAgentSentStatus updates the sent status of the managed agent with the name specified. If the agent is not found, // this method returns false. func (c *Container) UpdateManagedAgentSentStatus(agentName string, status apicontainerstatus.ManagedAgentStatus) bool { c.lock.Lock() defer c.lock.Unlock() for i, ma := range c.ManagedAgentsUnsafe { if ma.Name == agentName { c.ManagedAgentsUnsafe[i].SentStatus = status return true } } return false } func (c *Container) GetManagedAgentStatus(agentName string) apicontainerstatus.ManagedAgentStatus { c.lock.RLock() defer c.lock.RUnlock() for i, ma := range c.ManagedAgentsUnsafe { if ma.Name == agentName { return c.ManagedAgentsUnsafe[i].Status } } // we shouldn't get here because we'll always have a valid ManagedAgentName return apicontainerstatus.ManagedAgentStatusNone } func (c *Container) GetManagedAgentSentStatus(agentName string) apicontainerstatus.ManagedAgentStatus { c.lock.RLock() defer c.lock.RUnlock() for i, ma := range c.ManagedAgentsUnsafe { if ma.Name == agentName { return c.ManagedAgentsUnsafe[i].SentStatus } } // we shouldn't get here because we'll always have a valid ManagedAgentName return apicontainerstatus.ManagedAgentStatusNone } func (c *Container) SetContainerTornDown(td bool) { c.lock.Lock() defer c.lock.Unlock() c.ContainerTornDownUnsafe = td } func (c *Container) IsContainerTornDown() bool { c.lock.RLock() defer c.lock.RUnlock() return c.ContainerTornDownUnsafe }
1
26,591
not blocking: can we remove extra lines here?
aws-amazon-ecs-agent
go
@@ -44,7 +44,7 @@ var PrivateKey = function PrivateKey(data, network, compressed) { }; // detect type of data - if (!data){ + if (typeof(data) === 'undefined' || data === 'random'){ info.bn = PrivateKey._getRandomBN(); } else if (data instanceof BN) { info.bn = data;
1
'use strict'; var Address = require('./address'); var base58check = require('./encoding/base58check'); var BN = require('./crypto/bn'); var JSUtil = require('./util/js'); var Networks = require('./networks'); var Point = require('./crypto/point'); var PublicKey = require('./publickey'); var Random = require('./crypto/random'); /** * Instantiate a PrivateKey from a BN, Buffer and WIF. * * @example * * // generate a new random key * var key = PrivateKey(); * * // get the associated address * var address = key.toAddress(); * * // encode into wallet export format * var exported = key.toWIF(); * * // instantiate from the exported (and saved) private key * var imported = PrivateKey.fromWIF(exported); * * @param {String} data - The encoded data in various formats * @param {String} [network] - Either "livenet" or "testnet" * @param {Boolean} [compressed] - If the key is in compressed format * @returns {PrivateKey} A new valid instance of an PrivateKey * @constructor */ var PrivateKey = function PrivateKey(data, network, compressed) { if (!(this instanceof PrivateKey)) { return new PrivateKey(data, network, compressed); } var info = { compressed: typeof(compressed) !== 'undefined' ? compressed : true, network: network ? Networks.get(network) : Networks.defaultNetwork }; // detect type of data if (!data){ info.bn = PrivateKey._getRandomBN(); } else if (data instanceof BN) { info.bn = data; } else if (data instanceof Buffer || data instanceof Uint8Array) { info = PrivateKey._transformBuffer(data, network, compressed); } else if (typeof(data) === 'string'){ if (JSUtil.isHexa(data)) { info.bn = BN(new Buffer(data, 'hex')); } else { info = PrivateKey._transformWIF(data, network, compressed); } } else { throw new TypeError('First argument is an unrecognized data type.'); } // validation if (!info.bn.lt(Point.getN())) { throw new TypeError('Number must be less than N'); } if (typeof(info.network) === 'undefined') { throw new TypeError('Must specify the network ("livenet" or "testnet")'); } if (typeof(info.compressed) !== 'boolean') { throw new TypeError('Must specify whether the corresponding public key is compressed or not (true or false)'); } Object.defineProperty(this, 'bn', { configurable: false, value: info.bn }); Object.defineProperty(this, 'compressed', { configurable: false, value: info.compressed }); Object.defineProperty(this, 'network', { configurable: false, value: info.network }); Object.defineProperty(this, 'publicKey', { configurable: false, get: function() { if (!info.publicKey) { info.publicKey = this.toPublicKey(); } return info.publicKey; } }); return this; }; /** * Internal function to get a random BN * * @returns {BN} A new randomly generated BN * @private */ PrivateKey._getRandomBN = function(){ var condition; var bn; do { var privbuf = Random.getRandomBuffer(32); bn = BN().fromBuffer(privbuf); condition = bn.lt(Point.getN()); } while (!condition); return bn; }; /** * Internal function to transform a WIF Buffer into a private key * * @param {Buffer} buf - An WIF string * @param {String} [network] - Either "livenet" or "testnet" * @param {String} [compressed] - If the private key is compressed * @returns {Object} An object with keys: bn, network and compressed * @private */ PrivateKey._transformBuffer = function(buf, network, compressed) { var info = {}; if (buf.length === 1 + 32 + 1 && buf[1 + 32 + 1 - 1] === 1) { info.compressed = true; } else if (buf.length === 1 + 32) { info.compressed = false; } else { throw new Error('Length of buffer must be 33 (uncompressed) or 34 (compressed)'); } if (buf[0] === Networks.livenet.privatekey) { info.network = Networks.livenet; } else if (buf[0] === Networks.testnet.privatekey) { info.network = Networks.testnet; } else { throw new Error('Invalid network'); } if (network && info.network !== Networks.get(network)) { throw TypeError('Private key network mismatch'); } if (typeof(compressed) !== 'undefined' && info.compressed !== compressed){ throw TypeError('Private key compression mismatch'); } info.bn = BN.fromBuffer(buf.slice(1, 32 + 1)); return info; }; /** * Internal function to transform a WIF string into a private key * * @param {String} buf - An WIF string * @returns {Object} An object with keys: bn, network and compressed * @private */ PrivateKey._transformWIF = function(str, network, compressed) { return PrivateKey._transformBuffer(base58check.decode(str), network, compressed); }; /** * Instantiate a PrivateKey from a WIF string * * @param {String} str - The WIF encoded private key string * @returns {PrivateKey} A new valid instance of PrivateKey */ PrivateKey.fromWIF = function(str) { var info = PrivateKey._transformWIF(str); return new PrivateKey(info.bn, info.network, info.compressed); }; /** * Instantiate a PrivateKey from a JSON string * * @param {String} json - The JSON encoded private key string * @returns {PrivateKey} A new valid instance of PrivateKey */ PrivateKey.fromJSON = function(json) { if (JSUtil.isValidJSON(json)) { json = JSON.parse(json); } var bn = BN(json.bn, 'hex'); return new PrivateKey(bn, json.network, json.compressed); }; /** * Instantiate a PrivateKey from random bytes * * @param {String} [network] - Either "livenet" or "testnet" * @param {String} [compressed] - If the private key is compressed * @returns {PrivateKey} A new valid instance of PrivateKey */ PrivateKey.fromRandom = function(network, compressed) { var bn = PrivateKey._getRandomBN(); return new PrivateKey(bn, network, compressed); }; /** * Instantiate a PrivateKey from a WIF string * * @param {String} str - The WIF encoded private key string * @returns {PrivateKey} A new valid instance of PrivateKey */ PrivateKey.fromString = function(str) { var info = PrivateKey._transformWIF(str); return new PrivateKey(info.bn, info.network, info.compressed); }; /** * Check if there would be any errors when initializing a PrivateKey * * @param {String} data - The encoded data in various formats * @param {String} [network] - Either "livenet" or "testnet" * @param {String} [compressed] - If the private key is compressed * @returns {null|Error} An error if exists */ PrivateKey.getValidationError = function(data, network, compressed) { var error; try { new PrivateKey(data, network, compressed); } catch (e) { error = e; } return error; }; /** * Check if the parameters are valid * * @param {String} data - The encoded data in various formats * @param {String} [network] - Either "livenet" or "testnet" * @param {String} [compressed] - If the private key is compressed * @returns {Boolean} If the private key is would be valid */ PrivateKey.isValid = function(data, network, compressed){ return !PrivateKey.getValidationError(data, network, compressed); }; /** * Will output the PrivateKey to a WIF string * * @returns {String} A WIP representation of the private key */ PrivateKey.prototype.toWIF = function() { var network = this.network; var compressed = this.compressed; var buf; if (compressed) { buf = Buffer.concat([new Buffer([network.privatekey]), this.bn.toBuffer({size: 32}), new Buffer([0x01])]); } else { buf = Buffer.concat([new Buffer([network.privatekey]), this.bn.toBuffer({size: 32})]); } return base58check.encode(buf); }; /** * Will return the private key as a BN instance * * @returns {BN} A BN instance of the private key */ PrivateKey.prototype.toBigNumber = function(){ return this.bn; }; /** * Will return the private key as a BN buffer * * @returns {Buffer} A buffer of the private key */ PrivateKey.prototype.toBuffer = function(){ return this.bn.toBuffer(); }; /** * Will return the corresponding public key * * @returns {PublicKey} A public key generated from the private key */ PrivateKey.prototype.toPublicKey = function(){ return PublicKey.fromPrivateKey(this); }; /** * Will return an address for the private key * * @returns {Address} An address generated from the private key */ PrivateKey.prototype.toAddress = function() { var pubkey = this.toPublicKey(); return Address.fromPublicKey(pubkey, this.network); }; /** * @returns {Object} A plain object representation */ PrivateKey.prototype.toObject = function toObject() { return { bn: this.bn.toString('hex'), compressed: this.compressed, network: this.network.toString() }; }; PrivateKey.prototype.toJSON = function toJSON() { return JSON.stringify(this.toObject()); }; /** * Will output the PrivateKey to a WIF string * * @returns {String} A WIF representation of the private key */ PrivateKey.prototype.toString = function() { return this.toWIF(); }; /** * Will return a string formatted for the console * * @returns {String} Private key */ PrivateKey.prototype.inspect = function() { return '<PrivateKey: ' + this.toString() + ', compressed: '+this.compressed+', network: '+this.network+'>'; }; module.exports = PrivateKey;
1
13,395
This just feels weird... can we use some kind of constant here? something like: `if (_.isUndefined(data) || data === PrivateKey.Random)`
bitpay-bitcore
js
@@ -61,9 +61,12 @@ class ProjectorCompilerPass implements CompilerPassInterface } $parameters = $method->getParameters(); - $eventClass = (string) $parameters[0]->getType(); - $definition->addMethodCall('add', [new Reference($serviceId), $eventClass]); + $class = $parameters[0]->getClass(); + if ($class) { + $eventClass = $class->getName(); + $definition->addMethodCall('add', [new Reference($serviceId), $eventClass]); + } } } }
1
<?php /** * Copyright © Bold Brand Commerce Sp. z o.o. All rights reserved. * See LICENSE.txt for license details. */ declare(strict_types=1); namespace Ergonode\EventSourcing\Application\DependencyInjection\CompilerPass; use Symfony\Component\DependencyInjection\Compiler\CompilerPassInterface; use Symfony\Component\DependencyInjection\ContainerBuilder; use Ergonode\EventSourcing\Infrastructure\Projector\ProjectorProvider; use Symfony\Component\DependencyInjection\Reference; use Symfony\Component\DependencyInjection\Exception\RuntimeException; use Ergonode\SharedKernel\Domain\DomainEventInterface; class ProjectorCompilerPass implements CompilerPassInterface { public const TAG = 'ergonode.event_sourcing.projector'; public function process(ContainerBuilder $container): void { if ($container->has(ProjectorProvider::class)) { $this->processServices($container); } } private function processServices(ContainerBuilder $container): void { $definition = $container->findDefinition(ProjectorProvider::class); $serviceIds = $container->findTaggedServiceIds(self::TAG); foreach (array_keys($serviceIds) as $serviceId) { $service = $container->findDefinition($serviceId); $className = $service->getClass(); $reflection = new \ReflectionClass($className); if (!$reflection->hasMethod('__invoke')) { throw new RuntimeException( sprintf( 'Invalid projector "%s": class "%s" method "__invoke" does not exist.', $serviceId, $reflection->getName(), ) ); } $method = $reflection->getMethod('__invoke'); if (1 !== $method->getNumberOfRequiredParameters()) { throw new RuntimeException( sprintf( 'Invalid projector "%s": class "%s: method "__invoke()" required one argument "%s"', $serviceId, $reflection->getName(), DomainEventInterface::class ) ); } $parameters = $method->getParameters(); $eventClass = (string) $parameters[0]->getType(); $definition->addMethodCall('add', [new Reference($serviceId), $eventClass]); } } }
1
9,402
if does not have a class I guess the exception should be thrown because we cannot recognize the type based on it?
ergonode-backend
php
@@ -262,12 +262,18 @@ public final class DiscoveryMethodModel implements MethodModel { } private DiscoveryField createFieldMaskField() { + // TODO(andrealin): Change this to a Set instead of a List. return DiscoveryField.create( StandardSchemaGenerator.createListSchema( StandardSchemaGenerator.createStringSchema( "", SurfaceNamer.Cardinality.NOT_REPEATED, true), DiscoveryMethodTransformer.FIELDMASK_STRING, - true), + true, + "The fields that should be serialized (empty values will be serialized). " + + "If the containing message object has a non-null fieldmask, " + + "then all the fields in the field mask (and only those fields in the field mask) " + + "will be serialized. If the containing object does not have a fieldmask, then " + + "only non-empty fields will be serialized. "), null); }
1
/* Copyright 2017 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.api.codegen.config; import com.google.api.codegen.configgen.transformer.DiscoveryMethodTransformer; import com.google.api.codegen.discogapic.EmptyTypeModel; import com.google.api.codegen.discogapic.transformer.DiscoGapicParser; import com.google.api.codegen.discovery.Method; import com.google.api.codegen.discovery.Schema; import com.google.api.codegen.discovery.StandardSchemaGenerator; import com.google.api.codegen.transformer.ImportTypeTable; import com.google.api.codegen.transformer.SurfaceNamer; import com.google.api.codegen.transformer.TypeNameConverter; import com.google.api.codegen.util.Name; import com.google.api.codegen.util.TypeName; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableList.Builder; import com.google.common.collect.ImmutableSet; import java.util.LinkedHashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; /** A wrapper around the model of a Discovery Method. */ public final class DiscoveryMethodModel implements MethodModel { private ImmutableSet<String> IDEMPOTENT_HTTP_METHODS = ImmutableSet.of("GET", "HEAD", "PUT", "DELETE"); private final Method method; private final DiscoveryRequestType inputType; private final TypeModel outputType; private List<DiscoveryField> inputFields; private List<DiscoveryField> outputFields; private List<DiscoveryField> resourceNameInputFields; private final DiscoApiModel apiModel; private final boolean hasExtraFieldMask; private final DiscoveryField fieldMaskField; /* Create a DiscoveryMethodModel from a non-null Discovery Method object. */ public DiscoveryMethodModel(Method method, DiscoApiModel apiModel) { Preconditions.checkNotNull(method); this.method = method; this.apiModel = apiModel; String httpMethod = method.httpMethod().toUpperCase().trim(); hasExtraFieldMask = httpMethod.equals("PATCH") || httpMethod.equals("PUT"); if (hasExtraFieldMask) { fieldMaskField = createFieldMaskField(); } else { fieldMaskField = null; } this.inputType = DiscoveryRequestType.create(this); if (method.response() != null) { this.outputType = DiscoveryField.create(method.response(), apiModel); } else { this.outputType = EmptyTypeModel.getInstance(); } } public Method getDiscoMethod() { return method; } @Override public String getOutputTypeSimpleName() { return outputType.getTypeName(); } /** * Returns the parameter with the fieldName if it exists, otherwise returns the request object * with name fieldName, if it exists. */ @Override public DiscoveryField getInputField(String fieldName) { Schema targetSchema = method.parameters().get(fieldName); if (targetSchema != null) { return DiscoveryField.create(targetSchema, apiModel); } if (method.request() != null && DiscoGapicParser.getMethodInputName(method).toLowerCamel().equals(fieldName)) { return DiscoveryField.create(method.request(), apiModel); } if (hasExtraFieldMask && DiscoveryMethodTransformer.FIELDMASK_STRING.equals(fieldName)) { return fieldMaskField; } return null; } @Override public DiscoveryField getOutputField(String fieldName) { if (outputType.isEmptyType() || outputType.isPrimitive()) { return null; } return ((DiscoveryField) outputType).getField(fieldName); } @Override public String getFullName() { return method.id(); } @Override public String getRawName() { return method.id(); } @Override public String getInputFullName() { return method.request().getIdentifier(); } @Override public String getDescription() { return method.description(); } @Override public TypeName getOutputTypeName(ImportTypeTable typeTable) { return typeTable.getTypeTable().getTypeName(typeTable.getFullNameFor(outputType)); } @Override public String getOutputFullName() { return outputType.getTypeName(); } @Override public TypeName getInputTypeName(ImportTypeTable typeTable) { return typeTable.getTypeTable().getTypeName(typeTable.getFullNameFor(inputType)); } @Override public GenericFieldSelector getInputFieldSelector(String fieldName) { return null; } @Override public boolean getRequestStreaming() { return false; } @Override public boolean getResponseStreaming() { return false; } @Override public Name asName() { return DiscoGapicParser.methodAsName(method); } @Override public boolean isOutputTypeEmpty() { return outputType == null || outputType.isEmptyType(); } @Override public boolean equals(Object o) { return o instanceof DiscoveryMethodModel && ((DiscoveryMethodModel) o).method.equals(method); } @Override public String getSimpleName() { return DiscoGapicParser.methodAsName(method).toLowerCamel(); } @Override public String getParentSimpleName() { return "getParentSimpleName() not implemented."; } @Override public String getParentNickname(TypeNameConverter typeNameConverter) { return null; } @Override public String getAndSaveRequestTypeName(ImportTypeTable typeTable, SurfaceNamer surfaceNamer) { return typeTable.getAndSaveNicknameFor(inputType); } @Override public String getAndSaveResponseTypeName(ImportTypeTable typeTable, SurfaceNamer surfaceNamer) { return typeTable.getAndSaveNicknameFor(outputType); } @Override public String getScopedDescription() { return method.description(); } private List<DiscoveryField> getResourceNameInputFields() { if (resourceNameInputFields != null) { return resourceNameInputFields; } ImmutableList.Builder<DiscoveryField> params = ImmutableList.builder(); for (DiscoveryField field : getInputFields()) { if (field.getDiscoveryField().isPathParam()) { params.add(field); } } resourceNameInputFields = params.build(); return resourceNameInputFields; } @Override public List<DiscoveryField> getInputFieldsForResourceNameMethod() { List<DiscoveryField> fields = new LinkedList<>(); for (DiscoveryField field : getInputFields()) { if (!getResourceNameInputFields().contains(field)) { // Only add fields that aren't part of the ResourceName. fields.add(field); } } // Add the field that represents the ResourceName. String resourceName = DiscoGapicParser.getResourceIdentifier(method.flatPath()).toLowerCamel(); for (DiscoveryField field : getInputFields()) { if (field.getNameAsParameterName().toLowerCamel().equals(resourceName)) { fields.add(field); break; } } return fields; } @Override public List<DiscoveryField> getInputFields() { if (inputFields != null) { return inputFields; } ImmutableList.Builder<DiscoveryField> fieldsBuilder = ImmutableList.builder(); for (Schema field : method.parameters().values()) { fieldsBuilder.add(DiscoveryField.create(field, apiModel)); } if (method.request() != null) { fieldsBuilder.add(DiscoveryField.create(method.request(), apiModel)); } if (hasExtraFieldMask) { fieldsBuilder.add(fieldMaskField); } inputFields = fieldsBuilder.build(); return inputFields; } private DiscoveryField createFieldMaskField() { return DiscoveryField.create( StandardSchemaGenerator.createListSchema( StandardSchemaGenerator.createStringSchema( "", SurfaceNamer.Cardinality.NOT_REPEATED, true), DiscoveryMethodTransformer.FIELDMASK_STRING, true), null); } /** * Returns a list containing the response schema as the sole element; or returns an empty list if * this method has no response schema. */ @Override public List<DiscoveryField> getOutputFields() { if (outputFields != null) { return outputFields; } ImmutableList.Builder<DiscoveryField> outputField = new Builder<>(); if (method.response() != null) { DiscoveryField fieldModel = DiscoveryField.create(method.response(), apiModel); outputField.add(fieldModel); } outputFields = outputField.build(); return outputFields; } /** * Return if this method, as an HTTP method, is idempotent. Based off {@link * com.google.api.tools.framework.aspects.http.model.MethodKind}. */ @Override public boolean isIdempotent() { String httpMethod = method.httpMethod().toUpperCase(); return IDEMPOTENT_HTTP_METHODS.contains(httpMethod); } @Override public Map<String, String> getResourcePatternNameMap(Map<String, String> nameMap) { Map<String, String> resources = new LinkedHashMap<>(); for (Map.Entry<String, String> entry : nameMap.entrySet()) { if (DiscoGapicParser.getCanonicalPath(method.flatPath()).equals(entry.getKey())) { String resourceNameString = DiscoGapicParser.getResourceIdentifier(entry.getKey()).toLowerCamel(); resources.put(resourceNameString, entry.getValue()); break; } } return resources; } @Override public TypeModel getInputType() { return inputType; } @Override public TypeModel getOutputType() { return outputType; } @Override public boolean hasExtraFieldMask() { return hasExtraFieldMask; } }
1
28,205
LOL: use your GitHub username?
googleapis-gapic-generator
java
@@ -116,6 +116,16 @@ public class Key implements Comparable<Key> { return toRawKey(Arrays.copyOf(value, value.length + 1)); } + /** + * nextPrefix key will be key with next available rid. For example, if the current key is + * prefix_rid, after calling this method, the return value should be prefix_rid+1 + * + * @return a new key current rid+1. + */ + public Key nextPrefix() { + return toRawKey(prefixNext(value)); + } + /** * The prefixNext key for bytes domain *
1
/* * Copyright 2017 PingCAP, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * See the License for the specific language governing permissions and * limitations under the License. */ package com.pingcap.tikv.key; import static com.pingcap.tikv.codec.KeyUtils.formatBytes; import static java.util.Objects.requireNonNull; import com.google.protobuf.ByteString; import com.pingcap.tikv.codec.CodecDataOutput; import com.pingcap.tikv.types.DataType; import com.pingcap.tikv.util.FastByteComparisons; import java.util.Arrays; public class Key implements Comparable<Key> { protected static final byte[] TBL_PREFIX = new byte[] {'t'}; protected final byte[] value; protected final int infFlag; public static final Key EMPTY = createEmpty(); public static final Key NULL = createNull(); public static final Key MIN = createTypelessMin(); public static final Key MAX = createTypelessMax(); private Key(byte[] value, boolean negative) { this.value = requireNonNull(value, "value is null"); this.infFlag = (value.length == 0 ? 1 : 0) * (negative ? -1 : 1); } protected Key(byte[] value) { this(value, false); } public static Key toRawKey(ByteString bytes, boolean negative) { return new Key(bytes.toByteArray(), negative); } public static Key toRawKey(ByteString bytes) { return new Key(bytes.toByteArray()); } public static Key toRawKey(byte[] bytes, boolean negative) { return new Key(bytes, negative); } public static Key toRawKey(byte[] bytes) { return new Key(bytes); } private static Key createNull() { CodecDataOutput cdo = new CodecDataOutput(); DataType.encodeNull(cdo); return new Key(cdo.toBytes()) { @Override public String toString() { return "null"; } }; } private static Key createEmpty() { return new Key(new byte[0]) { @Override public Key next() { return this; } @Override public String toString() { return "EMPTY"; } }; } private static Key createTypelessMin() { CodecDataOutput cdo = new CodecDataOutput(); DataType.encodeIndex(cdo); return new Key(cdo.toBytes()) { @Override public String toString() { return "MIN"; } }; } private static Key createTypelessMax() { CodecDataOutput cdo = new CodecDataOutput(); DataType.encodeMaxValue(cdo); return new Key(cdo.toBytes()) { @Override public String toString() { return "MAX"; } }; } /** * Next key simply append a zero byte to previous key. * * @return next key with a zero byte appended */ public Key next() { return toRawKey(Arrays.copyOf(value, value.length + 1)); } /** * The prefixNext key for bytes domain * * <p>It first plus one at LSB and if LSB overflows, a zero byte is appended at the end Original * bytes will be reused if possible * * @return encoded results */ static byte[] prefixNext(byte[] value) { int i; byte[] newVal = Arrays.copyOf(value, value.length); for (i = newVal.length - 1; i >= 0; i--) { newVal[i]++; if (newVal[i] != 0) { break; } } if (i == -1) { return Arrays.copyOf(value, value.length + 1); } else { return newVal; } } @Override public int compareTo(Key other) { requireNonNull(other, "other is null"); if ((this.infFlag | other.infFlag) != 0) { return this.infFlag - other.infFlag; } return FastByteComparisons.compareTo(value, other.value); } @Override public boolean equals(Object other) { if (other == this) { return true; } if (other instanceof Key) { return compareTo((Key) other) == 0; } else { return false; } } @Override public int hashCode() { return Arrays.hashCode(value) * infFlag; } public byte[] getBytes() { return value; } public ByteString toByteString() { return ByteString.copyFrom(value); } public int getInfFlag() { return infFlag; } @Override public String toString() { if (infFlag < 0) { return "-INF"; } else if (infFlag > 0) { return "+INF"; } else { return String.format("{%s}", formatBytes(value)); } } }
1
9,562
any tests for nextPrefix() ?
pingcap-tispark
java
@@ -482,7 +482,7 @@ public class PrettyPrintVisitor implements VoidVisitor<Void> { printMemberAnnotations(n.getAnnotations(), arg); printModifiers(n.getModifiers()); if (!n.getVariables().isEmpty()) { - n.getMaximumCommonType().accept(this, arg); + n.getMaximumCommonType().ifPresent(t -> t.accept(this, arg)); } printer.print(" ");
1
/* * Copyright (C) 2007-2010 Júlio Vilmar Gesser. * Copyright (C) 2011, 2013-2016 The JavaParser Team. * * This file is part of JavaParser. * * JavaParser can be used either under the terms of * a) the GNU Lesser General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * b) the terms of the Apache License * * You should have received a copy of both licenses in LICENCE.LGPL and * LICENCE.APACHE. Please refer to those files for details. * * JavaParser is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. */ package com.github.javaparser.printer; import com.github.javaparser.Position; import com.github.javaparser.ast.*; import com.github.javaparser.ast.body.*; import com.github.javaparser.ast.comments.BlockComment; import com.github.javaparser.ast.comments.Comment; import com.github.javaparser.ast.comments.JavadocComment; import com.github.javaparser.ast.comments.LineComment; import com.github.javaparser.ast.expr.*; import com.github.javaparser.ast.modules.*; import com.github.javaparser.ast.nodeTypes.NodeWithTypeArguments; import com.github.javaparser.ast.nodeTypes.NodeWithVariables; import com.github.javaparser.ast.stmt.*; import com.github.javaparser.ast.type.*; import com.github.javaparser.ast.visitor.Visitable; import com.github.javaparser.ast.visitor.VoidVisitor; import java.util.*; import java.util.stream.Collectors; import static com.github.javaparser.ast.Node.Parsedness.UNPARSABLE; import static com.github.javaparser.utils.PositionUtils.sortByBeginPosition; import static com.github.javaparser.utils.Utils.isNullOrEmpty; import static com.github.javaparser.utils.Utils.normalizeEolInTextBlock; /** * Outputs the AST as formatted Java source code. * * @author Julio Vilmar Gesser */ public class PrettyPrintVisitor implements VoidVisitor<Void> { protected final PrettyPrinterConfiguration configuration; protected final SourcePrinter printer; private Deque<Position> methodChainPositions = new LinkedList<>(); public PrettyPrintVisitor(PrettyPrinterConfiguration prettyPrinterConfiguration) { configuration = prettyPrinterConfiguration; printer = new SourcePrinter(configuration.getIndent(), configuration.getEndOfLineCharacter()); pushMethodChainPosition(printer.getCursor()); // initialize a default position for methodChainPositions, it is expected by method #resetMethodChainPosition() } public String getSource() { return printer.getSource(); } public void resetMethodChainPosition(Position position) { this.methodChainPositions.pop(); this.methodChainPositions.push(position); } public void pushMethodChainPosition(Position position) { this.methodChainPositions.push(position); } public Position peekMethodChainPosition() { return this.methodChainPositions.peek(); } public Position popMethodChainPosition() { return this.methodChainPositions.pop(); } private void printModifiers(final EnumSet<Modifier> modifiers) { if (modifiers.size() > 0) { printer.print(modifiers.stream().map(Modifier::asString).collect(Collectors.joining(" ")) + " "); } } private void printMembers(final NodeList<BodyDeclaration<?>> members, final Void arg) { for (final BodyDeclaration<?> member : members) { printer.println(); member.accept(this, arg); printer.println(); } } private void printMemberAnnotations(final NodeList<AnnotationExpr> annotations, final Void arg) { if (annotations.isEmpty()) { return; } for (final AnnotationExpr a : annotations) { a.accept(this, arg); printer.println(); } } private void printAnnotations(final NodeList<AnnotationExpr> annotations, boolean prefixWithASpace, final Void arg) { if (annotations.isEmpty()) { return; } if (prefixWithASpace) { printer.print(" "); } for (AnnotationExpr annotation : annotations) { annotation.accept(this, arg); printer.print(" "); } } private void printTypeArgs(final NodeWithTypeArguments<?> nodeWithTypeArguments, final Void arg) { NodeList<Type> typeArguments = nodeWithTypeArguments.getTypeArguments().orElse(null); if (!isNullOrEmpty(typeArguments)) { printer.print("<"); for (final Iterator<Type> i = typeArguments.iterator(); i.hasNext(); ) { final Type t = i.next(); t.accept(this, arg); if (i.hasNext()) { printer.print(", "); } } printer.print(">"); } } private void printTypeParameters(final NodeList<TypeParameter> args, final Void arg) { if (!isNullOrEmpty(args)) { printer.print("<"); for (final Iterator<TypeParameter> i = args.iterator(); i.hasNext(); ) { final TypeParameter t = i.next(); t.accept(this, arg); if (i.hasNext()) { printer.print(", "); } } printer.print(">"); } } private void printArguments(final NodeList<Expression> args, final Void arg) { printer.print("("); Position cursorRef = printer.getCursor(); if (!isNullOrEmpty(args)) { for (final Iterator<Expression> i = args.iterator(); i.hasNext(); ) { final Expression e = i.next(); e.accept(this, arg); if (i.hasNext()) { printer.print(","); if (configuration.isColumnAlignParameters()) { printer.wrapToColumn(cursorRef.column); } else { printer.print(" "); } } } } printer.print(")"); } private void printPrePostFixOptionalList(final NodeList<? extends Visitable> args, final Void arg, String prefix, String separator, String postfix) { if (!args.isEmpty()) { printer.print(prefix); for (final Iterator<? extends Visitable> i = args.iterator(); i.hasNext(); ) { final Visitable v = i.next(); v.accept(this, arg); if (i.hasNext()) { printer.print(separator); } } printer.print(postfix); } } private void printPrePostFixRequiredList(final NodeList<? extends Visitable> args, final Void arg, String prefix, String separator, String postfix) { printer.print(prefix); if (!args.isEmpty()) { for (final Iterator<? extends Visitable> i = args.iterator(); i.hasNext(); ) { final Visitable v = i.next(); v.accept(this, arg); if (i.hasNext()) { printer.print(separator); } } } printer.print(postfix); } private void printComment(final Optional<Comment> comment, final Void arg) { comment.ifPresent(c -> c.accept(this, arg)); } @Override public void visit(final CompilationUnit n, final Void arg) { printComment(n.getComment(), arg); if (n.getParsed() == UNPARSABLE) { printer.println("???"); return; } if (n.getPackageDeclaration().isPresent()) { n.getPackageDeclaration().get().accept(this, arg); } n.getImports().accept(this, arg); if (!n.getImports().isEmpty()) { printer.println(); } for (final Iterator<TypeDeclaration<?>> i = n.getTypes().iterator(); i.hasNext(); ) { i.next().accept(this, arg); printer.println(); if (i.hasNext()) { printer.println(); } } n.getModule().ifPresent(m -> m.accept(this, arg)); printOrphanCommentsEnding(n); } @Override public void visit(final PackageDeclaration n, final Void arg) { printComment(n.getComment(), arg); printAnnotations(n.getAnnotations(), false, arg); printer.print("package "); n.getName().accept(this, arg); printer.println(";"); printer.println(); printOrphanCommentsEnding(n); } @Override public void visit(final NameExpr n, final Void arg) { printComment(n.getComment(), arg); n.getName().accept(this, arg); printOrphanCommentsEnding(n); } @Override public void visit(final Name n, final Void arg) { printComment(n.getComment(), arg); if (n.getQualifier().isPresent()) { n.getQualifier().get().accept(this, arg); printer.print("."); } printAnnotations(n.getAnnotations(), false, arg); printer.print(n.getIdentifier()); printOrphanCommentsEnding(n); } @Override public void visit(SimpleName n, Void arg) { printer.print(n.getIdentifier()); } @Override public void visit(final ClassOrInterfaceDeclaration n, final Void arg) { printComment(n.getComment(), arg); printMemberAnnotations(n.getAnnotations(), arg); printModifiers(n.getModifiers()); if (n.isInterface()) { printer.print("interface "); } else { printer.print("class "); } n.getName().accept(this, arg); printTypeParameters(n.getTypeParameters(), arg); if (!n.getExtendedTypes().isEmpty()) { printer.print(" extends "); for (final Iterator<ClassOrInterfaceType> i = n.getExtendedTypes().iterator(); i.hasNext(); ) { final ClassOrInterfaceType c = i.next(); c.accept(this, arg); if (i.hasNext()) { printer.print(", "); } } } if (!n.getImplementedTypes().isEmpty()) { printer.print(" implements "); for (final Iterator<ClassOrInterfaceType> i = n.getImplementedTypes().iterator(); i.hasNext(); ) { final ClassOrInterfaceType c = i.next(); c.accept(this, arg); if (i.hasNext()) { printer.print(", "); } } } printer.println(" {"); printer.indent(); if (!isNullOrEmpty(n.getMembers())) { printMembers(n.getMembers(), arg); } printOrphanCommentsEnding(n); printer.unindent(); printer.print("}"); } @Override public void visit(final JavadocComment n, final Void arg) { if (configuration.isPrintComments() && configuration.isPrintJavadoc()) { printer.println("/**"); final String commentContent = normalizeEolInTextBlock(n.getContent(), configuration.getEndOfLineCharacter()); String[] lines = commentContent.split("\\R"); boolean skippingLeadingEmptyLines = true; boolean prependEmptyLine = false; for (String line : lines) { line = line.trim(); if (line.startsWith("*")) { line = line.substring(1).trim(); } if (line.isEmpty()) { if (!skippingLeadingEmptyLines) { prependEmptyLine = true; } } else { skippingLeadingEmptyLines = false; if (prependEmptyLine) { printer.println(" *"); prependEmptyLine = false; } printer.println(" * " + line); } } printer.println(" */"); } } @Override public void visit(final ClassOrInterfaceType n, final Void arg) { printComment(n.getComment(), arg); if (n.getScope().isPresent()) { n.getScope().get().accept(this, arg); printer.print("."); } for (AnnotationExpr ae : n.getAnnotations()) { ae.accept(this, arg); printer.print(" "); } n.getName().accept(this, arg); if (n.isUsingDiamondOperator()) { printer.print("<>"); } else { printTypeArgs(n, arg); } } @Override public void visit(final TypeParameter n, final Void arg) { printComment(n.getComment(), arg); for (AnnotationExpr ann : n.getAnnotations()) { ann.accept(this, arg); printer.print(" "); } n.getName().accept(this, arg); if (!isNullOrEmpty(n.getTypeBound())) { printer.print(" extends "); for (final Iterator<ClassOrInterfaceType> i = n.getTypeBound().iterator(); i.hasNext(); ) { final ClassOrInterfaceType c = i.next(); c.accept(this, arg); if (i.hasNext()) { printer.print(" & "); } } } } @Override public void visit(final PrimitiveType n, final Void arg) { printComment(n.getComment(), arg); printAnnotations(n.getAnnotations(), true, arg); printer.print(n.getType().asString()); } @Override public void visit(final ArrayType n, final Void arg) { final List<ArrayType> arrayTypeBuffer = new LinkedList<>(); Type type = n; while (type instanceof ArrayType) { final ArrayType arrayType = (ArrayType) type; arrayTypeBuffer.add(arrayType); type = arrayType.getComponentType(); } type.accept(this, arg); for (ArrayType arrayType : arrayTypeBuffer) { printAnnotations(arrayType.getAnnotations(), true, arg); printer.print("[]"); } } @Override public void visit(final ArrayCreationLevel n, final Void arg) { printAnnotations(n.getAnnotations(), true, arg); printer.print("["); if (n.getDimension().isPresent()) { n.getDimension().get().accept(this, arg); } printer.print("]"); } @Override public void visit(final IntersectionType n, final Void arg) { printComment(n.getComment(), arg); printAnnotations(n.getAnnotations(), false, arg); boolean isFirst = true; for (ReferenceType element : n.getElements()) { if (isFirst) { isFirst = false; } else { printer.print(" & "); } element.accept(this, arg); } } @Override public void visit(final UnionType n, final Void arg) { printComment(n.getComment(), arg); printAnnotations(n.getAnnotations(), true, arg); boolean isFirst = true; for (ReferenceType element : n.getElements()) { if (isFirst) { isFirst = false; } else { printer.print(" | "); } element.accept(this, arg); } } @Override public void visit(final WildcardType n, final Void arg) { printComment(n.getComment(), arg); printAnnotations(n.getAnnotations(), false, arg); printer.print("?"); if (n.getExtendedType().isPresent()) { printer.print(" extends "); n.getExtendedType().get().accept(this, arg); } if (n.getSuperType().isPresent()) { printer.print(" super "); n.getSuperType().get().accept(this, arg); } } @Override public void visit(final UnknownType n, final Void arg) { // Nothing to print } @Override public void visit(final FieldDeclaration n, final Void arg) { printOrphanCommentsBeforeThisChildNode(n); printComment(n.getComment(), arg); printMemberAnnotations(n.getAnnotations(), arg); printModifiers(n.getModifiers()); if (!n.getVariables().isEmpty()) { n.getMaximumCommonType().accept(this, arg); } printer.print(" "); for (final Iterator<VariableDeclarator> i = n.getVariables().iterator(); i.hasNext(); ) { final VariableDeclarator var = i.next(); var.accept(this, arg); if (i.hasNext()) { printer.print(", "); } } printer.print(";"); } @Override public void visit(final VariableDeclarator n, final Void arg) { printComment(n.getComment(), arg); n.getName().accept(this, arg); Optional<NodeWithVariables> ancestor = n.getAncestorOfType(NodeWithVariables.class); if (!ancestor.isPresent()) { throw new RuntimeException("Unable to work with VariableDeclarator not owned by a NodeWithVariables"); } Type commonType = ancestor.get().getMaximumCommonType(); Type type = n.getType(); ArrayType arrayType = null; for (int i = commonType.getArrayLevel(); i < type.getArrayLevel(); i++) { if (arrayType == null) { arrayType = (ArrayType) type; } else { arrayType = (ArrayType) arrayType.getComponentType(); } printAnnotations(arrayType.getAnnotations(), true, arg); printer.print("[]"); } if (n.getInitializer().isPresent()) { printer.print(" = "); n.getInitializer().get().accept(this, arg); } } @Override public void visit(final ArrayInitializerExpr n, final Void arg) { printComment(n.getComment(), arg); printer.print("{"); if (!isNullOrEmpty(n.getValues())) { printer.print(" "); for (final Iterator<Expression> i = n.getValues().iterator(); i.hasNext(); ) { final Expression expr = i.next(); expr.accept(this, arg); if (i.hasNext()) { printer.print(", "); } } printer.print(" "); } printer.print("}"); } @Override public void visit(final VoidType n, final Void arg) { printComment(n.getComment(), arg); printAnnotations(n.getAnnotations(), false, arg); printer.print("void"); } @Override public void visit(final ArrayAccessExpr n, final Void arg) { printComment(n.getComment(), arg); n.getName().accept(this, arg); printer.print("["); n.getIndex().accept(this, arg); printer.print("]"); } @Override public void visit(final ArrayCreationExpr n, final Void arg) { printComment(n.getComment(), arg); printer.print("new "); n.getElementType().accept(this, arg); for (ArrayCreationLevel level : n.getLevels()) { level.accept(this, arg); } if (n.getInitializer().isPresent()) { printer.print(" "); n.getInitializer().get().accept(this, arg); } } @Override public void visit(final AssignExpr n, final Void arg) { printComment(n.getComment(), arg); n.getTarget().accept(this, arg); printer.print(" "); printer.print(n.getOperator().asString()); printer.print(" "); n.getValue().accept(this, arg); } @Override public void visit(final BinaryExpr n, final Void arg) { printComment(n.getComment(), arg); n.getLeft().accept(this, arg); printer.print(" "); printer.print(n.getOperator().asString()); printer.print(" "); n.getRight().accept(this, arg); } @Override public void visit(final CastExpr n, final Void arg) { printComment(n.getComment(), arg); printer.print("("); n.getType().accept(this, arg); printer.print(") "); n.getExpression().accept(this, arg); } @Override public void visit(final ClassExpr n, final Void arg) { printComment(n.getComment(), arg); n.getType().accept(this, arg); printer.print(".class"); } @Override public void visit(final ConditionalExpr n, final Void arg) { printComment(n.getComment(), arg); n.getCondition().accept(this, arg); printer.print(" ? "); n.getThenExpr().accept(this, arg); printer.print(" : "); n.getElseExpr().accept(this, arg); } @Override public void visit(final EnclosedExpr n, final Void arg) { printComment(n.getComment(), arg); printer.print("("); n.getInner().accept(this, arg); printer.print(")"); } @Override public void visit(final FieldAccessExpr n, final Void arg) { printComment(n.getComment(), arg); n.getScope().accept(this, arg); printer.print("."); n.getName().accept(this, arg); } @Override public void visit(final InstanceOfExpr n, final Void arg) { printComment(n.getComment(), arg); n.getExpression().accept(this, arg); printer.print(" instanceof "); n.getType().accept(this, arg); } @Override public void visit(final CharLiteralExpr n, final Void arg) { printComment(n.getComment(), arg); printer.print("'"); printer.print(n.getValue()); printer.print("'"); } @Override public void visit(final DoubleLiteralExpr n, final Void arg) { printComment(n.getComment(), arg); printer.print(n.getValue()); } @Override public void visit(final IntegerLiteralExpr n, final Void arg) { printComment(n.getComment(), arg); printer.print(n.getValue()); } @Override public void visit(final LongLiteralExpr n, final Void arg) { printComment(n.getComment(), arg); printer.print(n.getValue()); } @Override public void visit(final StringLiteralExpr n, final Void arg) { printComment(n.getComment(), arg); printer.print("\""); printer.print(n.getValue()); printer.print("\""); } @Override public void visit(final BooleanLiteralExpr n, final Void arg) { printComment(n.getComment(), arg); printer.print(String.valueOf(n.getValue())); } @Override public void visit(final NullLiteralExpr n, final Void arg) { printComment(n.getComment(), arg); printer.print("null"); } @Override public void visit(final ThisExpr n, final Void arg) { printComment(n.getComment(), arg); if (n.getClassExpr().isPresent()) { n.getClassExpr().get().accept(this, arg); printer.print("."); } printer.print("this"); } @Override public void visit(final SuperExpr n, final Void arg) { printComment(n.getComment(), arg); if (n.getClassExpr().isPresent()) { n.getClassExpr().get().accept(this, arg); printer.print("."); } printer.print("super"); } @Override public void visit(final MethodCallExpr n, final Void arg) { printComment(n.getComment(), arg); if (n.getScope().isPresent()) { n.getScope().get().accept(this, arg); if (configuration.isColumnAlignFirstMethodChain()) { if (!(n.getScope().get() instanceof MethodCallExpr) || (!((MethodCallExpr) n.getScope().get()).getScope().isPresent())) { resetMethodChainPosition(printer.getCursor()); } else { printer.wrapToColumn(peekMethodChainPosition().column); } } printer.print("."); } printTypeArgs(n, arg); n.getName().accept(this, arg); pushMethodChainPosition(printer.getCursor()); printArguments(n.getArguments(), arg); popMethodChainPosition(); } @Override public void visit(final ObjectCreationExpr n, final Void arg) { printComment(n.getComment(), arg); if (n.getScope().isPresent()) { n.getScope().get().accept(this, arg); printer.print("."); } printer.print("new "); printTypeArgs(n, arg); if (!isNullOrEmpty(n.getTypeArguments().orElse(null))) { printer.print(" "); } n.getType().accept(this, arg); printArguments(n.getArguments(), arg); if (n.getAnonymousClassBody().isPresent()) { printer.println(" {"); printer.indent(); printMembers(n.getAnonymousClassBody().get(), arg); printer.unindent(); printer.print("}"); } } @Override public void visit(final UnaryExpr n, final Void arg) { printComment(n.getComment(), arg); if (n.getOperator().isPrefix()) { printer.print(n.getOperator().asString()); } n.getExpression().accept(this, arg); if (n.getOperator().isPostfix()) { printer.print(n.getOperator().asString()); } } @Override public void visit(final ConstructorDeclaration n, final Void arg) { printComment(n.getComment(), arg); printMemberAnnotations(n.getAnnotations(), arg); printModifiers(n.getModifiers()); printTypeParameters(n.getTypeParameters(), arg); if (n.isGeneric()) { printer.print(" "); } n.getName().accept(this, arg); printer.print("("); if (!n.getParameters().isEmpty()) { for (final Iterator<Parameter> i = n.getParameters().iterator(); i.hasNext(); ) { final Parameter p = i.next(); p.accept(this, arg); if (i.hasNext()) { printer.print(", "); } } } printer.print(")"); if (!isNullOrEmpty(n.getThrownExceptions())) { printer.print(" throws "); for (final Iterator<ReferenceType> i = n.getThrownExceptions().iterator(); i.hasNext(); ) { final ReferenceType name = i.next(); name.accept(this, arg); if (i.hasNext()) { printer.print(", "); } } } printer.print(" "); n.getBody().accept(this, arg); } @Override public void visit(final MethodDeclaration n, final Void arg) { printOrphanCommentsBeforeThisChildNode(n); printComment(n.getComment(), arg); printMemberAnnotations(n.getAnnotations(), arg); printModifiers(n.getModifiers()); printTypeParameters(n.getTypeParameters(), arg); if (!isNullOrEmpty(n.getTypeParameters())) { printer.print(" "); } n.getType().accept(this, arg); printer.print(" "); n.getName().accept(this, arg); printer.print("("); n.getReceiverParameter().ifPresent(rp -> { rp.accept(this, arg); printer.print(", "); }); if (!isNullOrEmpty(n.getParameters())) { for (final Iterator<Parameter> i = n.getParameters().iterator(); i.hasNext(); ) { final Parameter p = i.next(); p.accept(this, arg); if (i.hasNext()) { printer.print(", "); } } } printer.print(")"); if (!isNullOrEmpty(n.getThrownExceptions())) { printer.print(" throws "); for (final Iterator<ReferenceType> i = n.getThrownExceptions().iterator(); i.hasNext(); ) { final ReferenceType name = i.next(); name.accept(this, arg); if (i.hasNext()) { printer.print(", "); } } } if (!n.getBody().isPresent()) { printer.print(";"); } else { printer.print(" "); n.getBody().get().accept(this, arg); } } @Override public void visit(final Parameter n, final Void arg) { printComment(n.getComment(), arg); printAnnotations(n.getAnnotations(), false, arg); printModifiers(n.getModifiers()); n.getType().accept(this, arg); if (n.isVarArgs()) { printAnnotations(n.getVarArgsAnnotations(), false, arg); printer.print("..."); } if (!(n.getType() instanceof UnknownType)) { printer.print(" "); } n.getName().accept(this, arg); } @Override public void visit(final ReceiverParameter n, final Void arg) { printComment(n.getComment(), arg); printAnnotations(n.getAnnotations(), false, arg); n.getType().accept(this, arg); printer.print(" "); n.getName().accept(this, arg); } @Override public void visit(final ExplicitConstructorInvocationStmt n, final Void arg) { printComment(n.getComment(), arg); if (n.isThis()) { printTypeArgs(n, arg); printer.print("this"); } else { if (n.getExpression().isPresent()) { n.getExpression().get().accept(this, arg); printer.print("."); } printTypeArgs(n, arg); printer.print("super"); } printArguments(n.getArguments(), arg); printer.print(";"); } @Override public void visit(final VariableDeclarationExpr n, final Void arg) { printComment(n.getComment(), arg); printAnnotations(n.getAnnotations(), false, arg); printModifiers(n.getModifiers()); if (!n.getVariables().isEmpty()) { n.getMaximumCommonType().accept(this, arg); } printer.print(" "); for (final Iterator<VariableDeclarator> i = n.getVariables().iterator(); i.hasNext(); ) { final VariableDeclarator v = i.next(); v.accept(this, arg); if (i.hasNext()) { printer.print(", "); } } } @Override public void visit(final LocalClassDeclarationStmt n, final Void arg) { printComment(n.getComment(), arg); n.getClassDeclaration().accept(this, arg); } @Override public void visit(final AssertStmt n, final Void arg) { printComment(n.getComment(), arg); printer.print("assert "); n.getCheck().accept(this, arg); if (n.getMessage().isPresent()) { printer.print(" : "); n.getMessage().get().accept(this, arg); } printer.print(";"); } @Override public void visit(final BlockStmt n, final Void arg) { printOrphanCommentsBeforeThisChildNode(n); printComment(n.getComment(), arg); printer.println("{"); if (n.getStatements() != null) { printer.indent(); for (final Statement s : n.getStatements()) { s.accept(this, arg); printer.println(); } printer.unindent(); } printOrphanCommentsEnding(n); printer.print("}"); } @Override public void visit(final LabeledStmt n, final Void arg) { printComment(n.getComment(), arg); n.getLabel().accept(this, arg); printer.print(": "); n.getStatement().accept(this, arg); } @Override public void visit(final EmptyStmt n, final Void arg) { printComment(n.getComment(), arg); printer.print(";"); } @Override public void visit(final ExpressionStmt n, final Void arg) { printOrphanCommentsBeforeThisChildNode(n); printComment(n.getComment(), arg); n.getExpression().accept(this, arg); printer.print(";"); } @Override public void visit(final SwitchStmt n, final Void arg) { printComment(n.getComment(), arg); printer.print("switch("); n.getSelector().accept(this, arg); printer.println(") {"); if (n.getEntries() != null) { printer.indent(); for (final SwitchEntryStmt e : n.getEntries()) { e.accept(this, arg); } printer.unindent(); } printer.print("}"); } @Override public void visit(final SwitchEntryStmt n, final Void arg) { printComment(n.getComment(), arg); if (n.getLabel().isPresent()) { printer.print("case "); n.getLabel().get().accept(this, arg); printer.print(":"); } else { printer.print("default:"); } printer.println(); printer.indent(); if (n.getStatements() != null) { for (final Statement s : n.getStatements()) { s.accept(this, arg); printer.println(); } } printer.unindent(); } @Override public void visit(final BreakStmt n, final Void arg) { printComment(n.getComment(), arg); printer.print("break"); n.getLabel().ifPresent(l -> printer.print(" ").print(l.getIdentifier())); printer.print(";"); } @Override public void visit(final ReturnStmt n, final Void arg) { printComment(n.getComment(), arg); printer.print("return"); if (n.getExpression().isPresent()) { printer.print(" "); n.getExpression().get().accept(this, arg); } printer.print(";"); } @Override public void visit(final EnumDeclaration n, final Void arg) { printComment(n.getComment(), arg); printMemberAnnotations(n.getAnnotations(), arg); printModifiers(n.getModifiers()); printer.print("enum "); n.getName().accept(this, arg); if (!n.getImplementedTypes().isEmpty()) { printer.print(" implements "); for (final Iterator<ClassOrInterfaceType> i = n.getImplementedTypes().iterator(); i.hasNext(); ) { final ClassOrInterfaceType c = i.next(); c.accept(this, arg); if (i.hasNext()) { printer.print(", "); } } } printer.println(" {"); printer.indent(); if (n.getEntries().isNonEmpty()) { boolean alignVertically = n.getEntries().size() > configuration.getMaxEnumConstantsToAlignHorizontally(); printer.println(); for (final Iterator<EnumConstantDeclaration> i = n.getEntries().iterator(); i.hasNext(); ) { final EnumConstantDeclaration e = i.next(); e.accept(this, arg); if (i.hasNext()) { if (alignVertically) { printer.println(","); } else { printer.print(", "); } } } } if (!n.getMembers().isEmpty()) { printer.println(";"); printMembers(n.getMembers(), arg); } else { if (!n.getEntries().isEmpty()) { printer.println(); } } printer.unindent(); printer.print("}"); } @Override public void visit(final EnumConstantDeclaration n, final Void arg) { printComment(n.getComment(), arg); printMemberAnnotations(n.getAnnotations(), arg); n.getName().accept(this, arg); if (!n.getArguments().isEmpty()) { printArguments(n.getArguments(), arg); } if (!n.getClassBody().isEmpty()) { printer.println(" {"); printer.indent(); printMembers(n.getClassBody(), arg); printer.unindent(); printer.println("}"); } } @Override public void visit(final InitializerDeclaration n, final Void arg) { printComment(n.getComment(), arg); if (n.isStatic()) { printer.print("static "); } n.getBody().accept(this, arg); } @Override public void visit(final IfStmt n, final Void arg) { printComment(n.getComment(), arg); printer.print("if ("); n.getCondition().accept(this, arg); final boolean thenBlock = n.getThenStmt() instanceof BlockStmt; if (thenBlock) // block statement should start on the same line printer.print(") "); else { printer.println(")"); printer.indent(); } n.getThenStmt().accept(this, arg); if (!thenBlock) printer.unindent(); if (n.getElseStmt().isPresent()) { if (thenBlock) printer.print(" "); else printer.println(); final boolean elseIf = n.getElseStmt().orElse(null) instanceof IfStmt; final boolean elseBlock = n.getElseStmt().orElse(null) instanceof BlockStmt; if (elseIf || elseBlock) // put chained if and start of block statement on a same level printer.print("else "); else { printer.println("else"); printer.indent(); } if (n.getElseStmt().isPresent()) n.getElseStmt().get().accept(this, arg); if (!(elseIf || elseBlock)) printer.unindent(); } } @Override public void visit(final WhileStmt n, final Void arg) { printComment(n.getComment(), arg); printer.print("while ("); n.getCondition().accept(this, arg); printer.print(") "); n.getBody().accept(this, arg); } @Override public void visit(final ContinueStmt n, final Void arg) { printComment(n.getComment(), arg); printer.print("continue"); n.getLabel().ifPresent(l -> printer.print(" ").print(l.getIdentifier())); printer.print(";"); } @Override public void visit(final DoStmt n, final Void arg) { printComment(n.getComment(), arg); printer.print("do "); n.getBody().accept(this, arg); printer.print(" while ("); n.getCondition().accept(this, arg); printer.print(");"); } @Override public void visit(final ForeachStmt n, final Void arg) { printComment(n.getComment(), arg); printer.print("for ("); n.getVariable().accept(this, arg); printer.print(" : "); n.getIterable().accept(this, arg); printer.print(") "); n.getBody().accept(this, arg); } @Override public void visit(final ForStmt n, final Void arg) { printComment(n.getComment(), arg); printer.print("for ("); if (n.getInitialization() != null) { for (final Iterator<Expression> i = n.getInitialization().iterator(); i.hasNext(); ) { final Expression e = i.next(); e.accept(this, arg); if (i.hasNext()) { printer.print(", "); } } } printer.print("; "); if (n.getCompare().isPresent()) { n.getCompare().get().accept(this, arg); } printer.print("; "); if (n.getUpdate() != null) { for (final Iterator<Expression> i = n.getUpdate().iterator(); i.hasNext(); ) { final Expression e = i.next(); e.accept(this, arg); if (i.hasNext()) { printer.print(", "); } } } printer.print(") "); n.getBody().accept(this, arg); } @Override public void visit(final ThrowStmt n, final Void arg) { printComment(n.getComment(), arg); printer.print("throw "); n.getExpression().accept(this, arg); printer.print(";"); } @Override public void visit(final SynchronizedStmt n, final Void arg) { printComment(n.getComment(), arg); printer.print("synchronized ("); n.getExpression().accept(this, arg); printer.print(") "); n.getBody().accept(this, arg); } @Override public void visit(final TryStmt n, final Void arg) { printComment(n.getComment(), arg); printer.print("try "); if (!n.getResources().isEmpty()) { printer.print("("); Iterator<Expression> resources = n.getResources().iterator(); boolean first = true; while (resources.hasNext()) { resources.next().accept(this, arg); if (resources.hasNext()) { printer.print(";"); printer.println(); if (first) { printer.indent(); } } first = false; } if (n.getResources().size() > 1) { printer.unindent(); } printer.print(") "); } n.getTryBlock().accept(this, arg); for (final CatchClause c : n.getCatchClauses()) { c.accept(this, arg); } if (n.getFinallyBlock().isPresent()) { printer.print(" finally "); n.getFinallyBlock().get().accept(this, arg); } } @Override public void visit(final CatchClause n, final Void arg) { printComment(n.getComment(), arg); printer.print(" catch ("); n.getParameter().accept(this, arg); printer.print(") "); n.getBody().accept(this, arg); } @Override public void visit(final AnnotationDeclaration n, final Void arg) { printComment(n.getComment(), arg); printMemberAnnotations(n.getAnnotations(), arg); printModifiers(n.getModifiers()); printer.print("@interface "); n.getName().accept(this, arg); printer.println(" {"); printer.indent(); if (n.getMembers() != null) { printMembers(n.getMembers(), arg); } printer.unindent(); printer.print("}"); } @Override public void visit(final AnnotationMemberDeclaration n, final Void arg) { printComment(n.getComment(), arg); printMemberAnnotations(n.getAnnotations(), arg); printModifiers(n.getModifiers()); n.getType().accept(this, arg); printer.print(" "); n.getName().accept(this, arg); printer.print("()"); if (n.getDefaultValue().isPresent()) { printer.print(" default "); n.getDefaultValue().get().accept(this, arg); } printer.print(";"); } @Override public void visit(final MarkerAnnotationExpr n, final Void arg) { printComment(n.getComment(), arg); printer.print("@"); n.getName().accept(this, arg); } @Override public void visit(final SingleMemberAnnotationExpr n, final Void arg) { printComment(n.getComment(), arg); printer.print("@"); n.getName().accept(this, arg); printer.print("("); n.getMemberValue().accept(this, arg); printer.print(")"); } @Override public void visit(final NormalAnnotationExpr n, final Void arg) { printComment(n.getComment(), arg); printer.print("@"); n.getName().accept(this, arg); printer.print("("); if (n.getPairs() != null) { for (final Iterator<MemberValuePair> i = n.getPairs().iterator(); i.hasNext(); ) { final MemberValuePair m = i.next(); m.accept(this, arg); if (i.hasNext()) { printer.print(", "); } } } printer.print(")"); } @Override public void visit(final MemberValuePair n, final Void arg) { printComment(n.getComment(), arg); n.getName().accept(this, arg); printer.print(" = "); n.getValue().accept(this, arg); } @Override public void visit(final LineComment n, final Void arg) { if (configuration.isIgnoreComments()) { return; } printer .print("// ") .println(normalizeEolInTextBlock(n.getContent(), "").trim()); } @Override public void visit(final BlockComment n, final Void arg) { if (configuration.isIgnoreComments()) { return; } final String commentContent = normalizeEolInTextBlock(n.getContent(), configuration.getEndOfLineCharacter()); String[] lines = commentContent.split("\\R", -1); // as BlockComment should not be formatted, -1 to preserve any trailing empty line if present printer.print("/*"); for (int i = 0; i < (lines.length - 1); i++) { printer.print(lines[i]); printer.print(configuration.getEndOfLineCharacter()); // Avoids introducing indentation in blockcomments. ie: do not use println() as it would trigger indentation at the next print call. } printer.print(lines[lines.length - 1]); // last line is not followed by a newline, and simply terminated with `*/` printer.println("*/"); } @Override public void visit(LambdaExpr n, Void arg) { printComment(n.getComment(), arg); final NodeList<Parameter> parameters = n.getParameters(); final boolean printPar = n.isEnclosingParameters(); if (printPar) { printer.print("("); } for (Iterator<Parameter> i = parameters.iterator(); i.hasNext(); ) { Parameter p = i.next(); p.accept(this, arg); if (i.hasNext()) { printer.print(", "); } } if (printPar) { printer.print(")"); } printer.print(" -> "); final Statement body = n.getBody(); if (body instanceof ExpressionStmt) { // Print the expression directly ((ExpressionStmt) body).getExpression().accept(this, arg); } else { body.accept(this, arg); } } @Override public void visit(MethodReferenceExpr n, Void arg) { printComment(n.getComment(), arg); Expression scope = n.getScope(); String identifier = n.getIdentifier(); if (scope != null) { n.getScope().accept(this, arg); } printer.print("::"); printTypeArgs(n, arg); if (identifier != null) { printer.print(identifier); } } @Override public void visit(TypeExpr n, Void arg) { printComment(n.getComment(), arg); if (n.getType() != null) { n.getType().accept(this, arg); } } @Override public void visit(NodeList n, Void arg) { if (configuration.isOrderImports() && n.size() > 0 && n.get(0) instanceof ImportDeclaration) { //noinspection unchecked NodeList<ImportDeclaration> modifiableList = new NodeList<>(n); modifiableList.sort((left, right) -> { int sort = Integer.compare(left.isStatic() ? 0 : 1, right.isStatic() ? 0 : 1); if (sort == 0) { sort = left.getNameAsString().compareTo(right.getNameAsString()); } return sort; }); for (Object node : modifiableList) { ((Node) node).accept(this, arg); } } else { for (Object node : n) { ((Node) node).accept(this, arg); } } } @Override public void visit(final ImportDeclaration n, final Void arg) { printComment(n.getComment(), arg); printer.print("import "); if (n.isStatic()) { printer.print("static "); } n.getName().accept(this, arg); if (n.isAsterisk()) { printer.print(".*"); } printer.println(";"); printOrphanCommentsEnding(n); } @Override public void visit(ModuleDeclaration n, Void arg) { printAnnotations(n.getAnnotations(), false, arg); printer.println(); if (n.isOpen()) { printer.print("open "); } printer.print("module "); n.getName().accept(this, arg); printer.println(" {").indent(); n.getModuleStmts().accept(this, arg); printer.unindent().println("}"); } @Override public void visit(ModuleRequiresStmt n, Void arg) { printer.print("requires "); printModifiers(n.getModifiers()); n.getName().accept(this, arg); printer.println(";"); } @Override public void visit(ModuleExportsStmt n, Void arg) { printer.print("exports "); n.getName().accept(this, arg); printPrePostFixOptionalList(n.getModuleNames(), arg, " to ", ", ", ""); printer.println(";"); } @Override public void visit(ModuleProvidesStmt n, Void arg) { printer.print("provides "); n.getType().accept(this, arg); printPrePostFixRequiredList(n.getWithTypes(), arg, " with ", ", ", ""); printer.println(";"); } @Override public void visit(ModuleUsesStmt n, Void arg) { printer.print("uses "); n.getType().accept(this, arg); printer.println(";"); } @Override public void visit(ModuleOpensStmt n, Void arg) { printer.print("opens "); n.getName().accept(this, arg); printPrePostFixOptionalList(n.getModuleNames(), arg, " to ", ", ", ""); printer.println(";"); } @Override public void visit(UnparsableStmt n, Void arg) { printer.print("???;"); } private void printOrphanCommentsBeforeThisChildNode(final Node node) { if (configuration.isIgnoreComments()) return; if (node instanceof Comment) return; Node parent = node.getParentNode().orElse(null); if (parent == null) return; List<Node> everything = new LinkedList<>(); everything.addAll(parent.getChildNodes()); sortByBeginPosition(everything); int positionOfTheChild = -1; for (int i = 0; i < everything.size(); i++) { if (everything.get(i) == node) positionOfTheChild = i; } if (positionOfTheChild == -1) { throw new AssertionError("I am not a child of my parent."); } int positionOfPreviousChild = -1; for (int i = positionOfTheChild - 1; i >= 0 && positionOfPreviousChild == -1; i--) { if (!(everything.get(i) instanceof Comment)) positionOfPreviousChild = i; } for (int i = positionOfPreviousChild + 1; i < positionOfTheChild; i++) { Node nodeToPrint = everything.get(i); if (!(nodeToPrint instanceof Comment)) throw new RuntimeException( "Expected comment, instead " + nodeToPrint.getClass() + ". Position of previous child: " + positionOfPreviousChild + ", position of child " + positionOfTheChild); nodeToPrint.accept(this, null); } } private void printOrphanCommentsEnding(final Node node) { if (configuration.isIgnoreComments()) return; List<Node> everything = new LinkedList<>(); everything.addAll(node.getChildNodes()); sortByBeginPosition(everything); if (everything.isEmpty()) { return; } int commentsAtEnd = 0; boolean findingComments = true; while (findingComments && commentsAtEnd < everything.size()) { Node last = everything.get(everything.size() - 1 - commentsAtEnd); findingComments = (last instanceof Comment); if (findingComments) { commentsAtEnd++; } } for (int i = 0; i < commentsAtEnd; i++) { everything.get(everything.size() - commentsAtEnd + i).accept(this, null); } } }
1
12,052
In general I'm saying "if there is no common type I'm just going to skip some stuff and fail silently."
javaparser-javaparser
java