patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -62,8 +62,14 @@ module Beaker
def close
begin
@ssh.close if @ssh
- rescue
- @ssh.shutdown!
+ rescue => e
+ @logger.warn "Attemped ssh.close. Caught an error: #{e.message} Attempting ssh.shutdown!..."
+ begin
+ @ssh.shutdown!
+ rescue => e
+ @logger.warn "Attemped ssh.shutdown!. Caught an error: #{e.message}. Giving up and destorying ssh."
+ @ssh = nil
+ end
end
@ssh = nil
end | 1 | require 'socket'
require 'timeout'
require 'net/scp'
module Beaker
class SshConnection
attr_accessor :logger
RETRYABLE_EXCEPTIONS = [
SocketError,
Timeout::Error,
Errno::ETIMEDOUT,
Errno::EHOSTDOWN,
Errno::EHOSTUNREACH,
Errno::ECONNREFUSED,
Errno::ECONNRESET,
Errno::ENETUNREACH,
Net::SSH::Disconnect,
Net::SSH::AuthenticationFailed,
]
def initialize hostname, user = nil, ssh_opts = {}, options = {}
@hostname = hostname
@user = user
@ssh_opts = ssh_opts
@logger = options[:logger]
@options = options
end
def self.connect hostname, user = 'root', ssh_opts = {}, options = {}
connection = new hostname, user, ssh_opts, options
connection.connect
connection
end
def connect
try = 1
last_wait = 0
wait = 1
@ssh ||= begin
Net::SSH.start(@hostname, @user, @ssh_opts)
rescue *RETRYABLE_EXCEPTIONS => e
if try <= 11
@logger.warn "Try #{try} -- Host #{@hostname} unreachable: #{e.message}"
@logger.warn "Trying again in #{wait} seconds"
sleep wait
(last_wait, wait) = wait, last_wait + wait
try += 1
retry
else
# why is the logger not passed into this class?
@logger.error "Failed to connect to #{@hostname}"
raise
end
end
@logger.debug "Created ssh connection to #{@hostname}, user: #{@user}, opts: #{@ssh_opts}"
self
end
# closes this SshConnection
def close
begin
@ssh.close if @ssh
rescue
@ssh.shutdown!
end
@ssh = nil
end
def try_to_execute command, options = {}, stdout_callback = nil,
stderr_callback = stdout_callback
result = Result.new(@hostname, command)
# why are we getting to this point on a dry run anyways?
# also... the host creates connections through the class method,
# which automatically connects, so you can't do a dry run unless you also
# can connect to your hosts?
return result if options[:dry_run]
@ssh.open_channel do |channel|
request_terminal_for( channel, command ) if options[:pty]
channel.exec(command) do |terminal, success|
abort "FAILED: to execute command on a new channel on #{@hostname}" unless success
register_stdout_for terminal, result, stdout_callback
register_stderr_for terminal, result, stderr_callback
register_exit_code_for terminal, result
process_stdin_for( terminal, options[:stdin] ) if options[:stdin]
end
end
# Process SSH activity until we stop doing that - which is when our
# channel is finished with...
@ssh.loop
result.finalize!
@logger.last_result = result
result
end
def execute command, options = {}, stdout_callback = nil,
stderr_callback = stdout_callback
attempt = true
begin
result = try_to_execute(command, options, stdout_callback, stderr_callback)
rescue *RETRYABLE_EXCEPTIONS => e
if attempt
attempt = false
@logger.error "Command execution failed, attempting to reconnect to #{@hostname}"
close
connect
retry
else
raise
end
end
result
end
def request_terminal_for channel, command
channel.request_pty do |ch, success|
if success
@logger.info "Allocated a PTY on #{@hostname} for #{command.inspect}"
else
abort "FAILED: could not allocate a pty when requested on " +
"#{@hostname} for #{command.inspect}"
end
end
end
def register_stdout_for channel, output, callback = nil
channel.on_data do |ch, data|
callback[data] if callback
output.stdout << data
output.output << data
end
end
def register_stderr_for channel, output, callback = nil
channel.on_extended_data do |ch, type, data|
if type == 1
callback[data] if callback
output.stderr << data
output.output << data
end
end
end
def register_exit_code_for channel, output
channel.on_request("exit-status") do |ch, data|
output.exit_code = data.read_long
end
end
def process_stdin_for channel, stdin
# queue stdin data, force it to packets, and signal eof: this
# triggers action in many remote commands, notably including
# 'puppet apply'. It must be sent at some point before the rest
# of the action.
channel.send_data stdin.to_s
channel.process
channel.eof!
end
def scp_to source, target, options = {}, dry_run = false
return if dry_run
local_opts = options.dup
if local_opts[:recursive].nil?
local_opts[:recursive] = File.directory?(source)
end
local_opts[:chunk_size] ||= 16384
result = Result.new(@hostname, [source, target])
result.stdout = "\n"
@ssh.scp.upload! source, target, local_opts do |ch, name, sent, total|
result.stdout << "\tcopying %s: %10d/%d\n" % [name, sent, total]
end
# Setting these values allows reporting via result.log(test_name)
result.stdout << " SCP'ed file #{source} to #{@hostname}:#{target}"
# Net::Scp always returns 0, so just set the return code to 0.
result.exit_code = 0
result.finalize!
return result
end
def scp_from source, target, options = {}, dry_run = false
return if dry_run
local_opts = options.dup
if local_opts[:recursive].nil?
local_opts[:recursive] = true
end
local_opts[:chunk_size] ||= 16384
result = Result.new(@hostname, [source, target])
result.stdout = "\n"
@ssh.scp.download! source, target, local_opts do |ch, name, sent, total|
result.stdout << "\tcopying %s: %10d/%d\n" % [name, sent, total]
end
# Setting these values allows reporting via result.log(test_name)
result.stdout << " SCP'ed file #{@hostname}:#{source} to #{target}"
# Net::Scp always returns 0, so just set the return code to 0.
result.exit_code = 0
result.finalize!
result
end
end
end
| 1 | 9,415 | would be good to have a test for the case when `shutdown!` raises | voxpupuli-beaker | rb |
@@ -2747,6 +2747,10 @@ short HbaseInsert::codeGen(Generator *generator)
generator->initTdbFields(hbasescan_tdb);
+ if (CmpCommon::getDefault(HBASE_ASYNC_OPERATIONS) == DF_ON
+ && t == ComTdbHbaseAccess::INSERT_)
+ hbasescan_tdb->setAsyncOperations(TRUE);
+
if (getTableDesc()->getNATable()->isSeabaseTable())
{
hbasescan_tdb->setSQHbaseTable(TRUE); | 1 | /**********************************************************************
// @@@ START COPYRIGHT @@@
//
// (C) Copyright 1994-2015 Hewlett-Packard Development Company, L.P.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// @@@ END COPYRIGHT @@@
**********************************************************************/
/* -*-C++-*-
******************************************************************************
*
* File: GenRelUpdate.C
* Description: update/delete/insert operators
*
* Created: 5/17/94
* Language: C++
*
*
******************************************************************************
*/
#define SQLPARSERGLOBALS_FLAGS // must precede all #include's
#define SQLPARSERGLOBALS_NADEFAULTS
#include "Platform.h"
#include "Sqlcomp.h"
#include "GroupAttr.h"
#include "RelMisc.h"
#include "RelUpdate.h"
#include "RelJoin.h"
#include "ControlDB.h"
#include "GenExpGenerator.h"
#include "ComTdbDp2Oper.h"
#include "ComTdbUnion.h"
#include "ComTdbOnlj.h"
#include "ComTdbHbaseAccess.h"
#include "PartFunc.h"
#include "HashRow.h"
#include "CmpStatement.h"
#include "OptimizerSimulator.h"
#include "ComTdbFastTransport.h"
#include "CmpSeabaseDDL.h"
#include "NAExecTrans.h"
#include <algorithm>
#include "SqlParserGlobals.h" // must be last #include
/////////////////////////////////////////////////////////////////////
//
// Contents:
//
// DeleteCursor::codeGen()
// Delete::codeGen()
//
// Insert::codeGen()
//
// UpdateCursor::codeGen()
// Update::codeGen()
//
// ##IM: to be REMOVED:
// ## the imCodeGen methods, Generator::im*, the executor imd class.
//
//////////////////////////////////////////////////////////////////////
extern int CreateAllCharsExpr(const NAType &formalType,
ItemExpr &actualValue,
CmpContext *cmpContext,
ItemExpr *&newExpr);
inline static NABoolean getReturnRow(const GenericUpdate *gu,
const IndexDesc *index)
{
return gu->producesOutputs();
}
static DP2LockFlags initLockFlags(GenericUpdate *gu, Generator * generator)
{
// fix case 10-040429-7402 by checking gu's statement level access options
// first before declaring any error 3140/3141.
TransMode::IsolationLevel ilForUpd;
generator->verifyUpdatableTransMode(&gu->accessOptions(),
generator->getTransMode(),
&ilForUpd);
DP2LockFlags lf;
if (gu->accessOptions().userSpecified())
lf = gu->accessOptions().getDP2LockFlags();
else
lf = generator->getTransMode()->getDP2LockFlags();
// stable access with update/delete/insert are treated as
// read committed.
if (lf.getConsistencyLevel() == DP2LockFlags::STABLE)
lf.setConsistencyLevel(DP2LockFlags::READ_COMMITTED);
if ((ilForUpd != TransMode::IL_NOT_SPECIFIED_) &&
(NOT gu->accessOptions().userSpecified()))
{
TransMode t(ilForUpd);
lf.setConsistencyLevel(
(DP2LockFlags::ConsistencyLevel)t.getDP2LockFlags().getConsistencyLevel());
lf.setLockState(
(DP2LockFlags::LockState)t.getDP2LockFlags().getLockState());
}
return lf;
}
void GenericUpdate::setTransactionRequired(Generator *generator,
NABoolean isNeededForAllFragments)
{
if (!generator->isInternalRefreshStatement() ||
getIndexDesc()->getNAFileSet()->isAudited())
{
generator->setTransactionFlag(TRUE, isNeededForAllFragments);
}
else
{
// Internal refresh statement and table is non-audited.
if (!getTableDesc()->getNATable()->isAnMV() &&
getTableName().getSpecialType() != ExtendedQualName::IUD_LOG_TABLE &&
getTableName().getSpecialType() != ExtendedQualName::GHOST_IUD_LOG_TABLE)
{
generator->setTransactionFlag(TRUE, isNeededForAllFragments);
}
}
}
///////////////////////////////////////////////////////////
//
// DeleteCursor::codeGen()
//
///////////////////////////////////////////////////////////
short DeleteCursor::codeGen(Generator * generator)
{
GenAssert(0, "DeleteCursor::codeGen:should not reach here.");
return 0;
}
static short genUpdExpr(
Generator * generator,
TableDesc * tableDesc, // IN
const IndexDesc * indexDesc, // IN
ValueIdArray &recExprArray, // IN
const Int32 updatedRowAtpIndex, // IN
ex_expr** updateExpr, // OUT
ULng32 &updateRowLen, // OUT
ExpTupleDesc** ufRowTupleDesc, // OUT fetched/updated RowTupleDesc,
// depending on updOpt (TRUE ->fetched)
NABoolean updOpt) // IN
{
ExpGenerator * expGen = generator->getExpGenerator();
ExpTupleDesc::TupleDataFormat tupleFormat =
generator->getTableDataFormat( tableDesc->getNATable() );
NABoolean alignedFormat = tupleFormat == ExpTupleDesc::SQLMX_ALIGNED_FORMAT;
// Generate the update expression that will create the updated row
// given to DP2 at runtime.
ValueIdList updtRowVidList;
BaseColumn *updtCol = NULL,
*fetchedCol = NULL;
Lng32 updtColNum = -1,
fetchedColNum = 0;
ItemExpr *updtColVal = NULL,
*castNode = NULL;
CollIndex recEntries = recExprArray.entries(),
colEntries = indexDesc->getIndexColumns().entries(),
j = 0;
NAColumn *col;
NAColumnArray colArray;
for (CollIndex i = 0; i < colEntries; i++)
{
fetchedCol =
(BaseColumn *)(((indexDesc->getIndexColumns())[i]).getItemExpr());
fetchedColNum = fetchedCol->getColNumber();
updtCol =
(updtCol != NULL
? updtCol
: (j < recEntries
? (BaseColumn *)(recExprArray[j].getItemExpr()->child(0)->castToItemExpr())
: NULL));
updtColNum = (updtCol ? updtCol->getColNumber() : -1);
if (fetchedColNum == updtColNum)
{
updtColVal = recExprArray[j].getItemExpr()->child(1)->castToItemExpr();
j++;
updtCol = NULL;
}
else
{
updtColVal = fetchedCol;
}
ValueId updtValId = fetchedCol->getValueId();
castNode = new(generator->wHeap()) Cast(updtColVal, &(updtValId.getType()));
castNode->bindNode(generator->getBindWA());
if (((updOpt) && (fetchedColNum == updtColNum)) ||
(NOT updOpt))
{
if (updOpt)
{
// assign the attributes of the fetched col to the
// updated col.
generator->addMapInfo(
castNode->getValueId(),
generator->getMapInfo(fetchedCol->getValueId())->getAttr());
}
if ( alignedFormat &&
(col = updtValId.getNAColumn( TRUE )) &&
(col != NULL) )
colArray.insert( col );
updtRowVidList.insert(castNode->getValueId());
}
} // for each column
// Generate the update expression
//
if (NOT updOpt)
{
// Tell the expression generator that we're coming in for an insert
// or an update. This flag will be cleared in generateContigousMoveExpr.
if ( tupleFormat == ExpTupleDesc::SQLMX_FORMAT ||
tupleFormat == ExpTupleDesc::SQLMX_ALIGNED_FORMAT )
expGen->setForInsertUpdate( TRUE );
expGen->generateContiguousMoveExpr
(updtRowVidList,
// (IN) Don't add convert nodes, Cast's have already been done.
0,
// (IN) Destination Atp
1,
// (IN) Destination Atp index
updatedRowAtpIndex,
// (IN) Destination data format
tupleFormat,
// (OUT) Destination tuple length
updateRowLen,
// (OUT) Generated expression
updateExpr,
// (OUT) Tuple descriptor for destination tuple
ufRowTupleDesc,
// (IN) Tuple descriptor format
ExpTupleDesc::LONG_FORMAT,
NULL, NULL, 0, NULL, NULL,
&colArray);
}
else
{
// update opt being done. Fetched and updated row are exactly
// the same. Updated values will overwrite the copy of fetched row
// at runtime. Change the atp & atpindex for target.
expGen->assignAtpAndAtpIndex(updtRowVidList,
1, updatedRowAtpIndex);
// No need to generate a header clause since the entire fetched row
// is copied to the updated row - header is in place.
expGen->setNoHeaderNeeded( TRUE );
// generate the update expression
expGen->generateListExpr(updtRowVidList,ex_expr::exp_ARITH_EXPR,
updateExpr);
// restore the header flag
expGen->setNoHeaderNeeded( FALSE );
}
return 0;
}
static short genMergeInsertExpr(
Generator * generator,
TableDesc * tableDesc, // IN
const IndexDesc * indexDesc, // IN
ValueIdArray &mergeInsertRecExprArray, // IN
const Int32 mergeInsertKeyEncodeAtpIndex, // IN
const Int32 mergeInsertRowAtpIndex, // IN
ex_expr** mergeInsertKeyEncodeExpr, // OUT
ULng32 &mergeInsertKeyLen, // OUT
ex_expr** mergeInsertExpr, // OUT
ULng32 &mergeInsertRowLen, // OUT
ExpTupleDesc** mergeInsertRowTupleDesc) // OUT fetched/updated RowTupleDesc,
{
ExpGenerator * expGen = generator->getExpGenerator();
*mergeInsertKeyEncodeExpr = NULL;
mergeInsertKeyLen = 0;
*mergeInsertExpr = NULL;
mergeInsertRowLen = 0;
// Generate the update expression that will create the updated row
// given to DP2 at runtime.
ValueIdList mergeInsertRowVidList;
BaseColumn *updtCol = NULL,
*fetchedCol = NULL;
Lng32 updtColNum = -1,
fetchedColNum = 0;
ItemExpr *updtColVal = NULL,
*castNode = NULL;
CollIndex recEntries = mergeInsertRecExprArray.entries(),
colEntries = indexDesc->getIndexColumns().entries(),
j = 0;
NAColumnArray colArray;
NAColumn *col;
if (recEntries == 0)
return 0;
ExpTupleDesc::TupleDataFormat tupleFormat =
generator->getTableDataFormat( tableDesc->getNATable() );
NABoolean alignedFormat = (tupleFormat == ExpTupleDesc::SQLMX_ALIGNED_FORMAT);
for (CollIndex ii = 0; ii < mergeInsertRecExprArray.entries(); ii++)
{
const ItemExpr *assignExpr = mergeInsertRecExprArray[ii].getItemExpr();
ValueId tgtValueId = assignExpr->child(0)->castToItemExpr()->getValueId();
ValueId srcValueId = assignExpr->child(1)->castToItemExpr()->getValueId();
// populate the colArray because this info is needed later to identify
// the added columns.
if ( alignedFormat )
{
col = tgtValueId.getNAColumn( TRUE );
if ( col != NULL )
colArray.insert( col );
}
ItemExpr * ie = NULL;
ie = new(generator->wHeap())
Cast(assignExpr->child(1), &tgtValueId.getType());
ie->bindNode(generator->getBindWA());
mergeInsertRowVidList.insert(ie->getValueId());
}
// Tell the expression generator that we're coming in for an insert
// or an update. This flag will be cleared in generateContigousMoveExpr.
if ( tupleFormat == ExpTupleDesc::SQLMX_FORMAT ||
tupleFormat == ExpTupleDesc::SQLMX_ALIGNED_FORMAT )
expGen->setForInsertUpdate( TRUE );
// Generate the insert expression
//
expGen->generateContiguousMoveExpr
(mergeInsertRowVidList,
// (IN) Don't add convert nodes, Cast's have already been done.
0,
// (IN) Destination Atp
1,
// (IN) Destination Atp index
mergeInsertRowAtpIndex,
// (IN) Destination data format
tupleFormat,
// (OUT) Destination tuple length
mergeInsertRowLen,
// (OUT) Generated expression
mergeInsertExpr,
// (OUT) Tuple descriptor for destination tuple
mergeInsertRowTupleDesc,
// (IN) Tuple descriptor format
ExpTupleDesc::LONG_FORMAT,
NULL, NULL, 0, NULL, NULL,
&colArray); // colArray is needed to identify any added cols.
// Assign attributes to the ASSIGN nodes of the newRecExpArray()
// This is not the same as the generateContiguousMoveExpr() call
// above since different valueId's are added to the mapTable.
//
expGen->processValIdList(mergeInsertRecExprArray,
tupleFormat,
mergeInsertRowLen,
1,
mergeInsertRowAtpIndex,
mergeInsertRowTupleDesc,
ExpTupleDesc::LONG_FORMAT,
0,NULL,&colArray,
!indexDesc->isClusteringIndex());
for (CollIndex i = 0; i < indexDesc->getIndexColumns().entries();
i++)
{
generator->addMapInfo(
(indexDesc->getIndexColumns())[i],
generator->getMapInfo(mergeInsertRecExprArray[i])->getAttr()
)->getAttr()->setAtp(0);
}
ULng32 f;
expGen->generateKeyEncodeExpr(
indexDesc, // describes the columns
0, // work Atp
mergeInsertKeyEncodeAtpIndex, // work Atp entry #3
ExpTupleDesc::SQLMX_KEY_FORMAT, // Tuple format
mergeInsertKeyLen, // Key length
mergeInsertKeyEncodeExpr, // Encode expression
FALSE, // don't optimize key encoding
f);
return 0;
}
static short genHbaseUpdOrInsertExpr(
Generator * generator,
NABoolean isInsert,
ValueIdArray &updRecExprArray, // IN
const Int32 updateTuppIndex, // IN
ex_expr** updateExpr, // OUT
ULng32 &updateRowLen, // OUT
ExpTupleDesc** updateTupleDesc, // OUT updated RowTupleDesc,
Queue* &listOfUpdatedColNames, // OUT
ex_expr** mergeInsertRowIdExpr, // out
ULng32 &mergeInsertRowIdLen, // OUT
const Int32 mergeInsertRowIdTuppIndex, // IN
const IndexDesc * indexDesc) // IN
{
ExpGenerator * expGen = generator->getExpGenerator();
Space * space = generator->getSpace();
*updateExpr = NULL;
updateRowLen = 0;
// Generate the update expression that will create the updated row
ValueIdList updRowVidList;
NABoolean isAligned = FALSE;
if (indexDesc->getPrimaryTableDesc()->getNATable()->isSQLMXAlignedTable())
isAligned = TRUE;
ExpTupleDesc::TupleDataFormat tupleFormat;
if (isAligned)
tupleFormat = ExpTupleDesc::SQLMX_ALIGNED_FORMAT;
else
tupleFormat = ExpTupleDesc::SQLARK_EXPLODED_FORMAT;
listOfUpdatedColNames = NULL;
if (updRecExprArray.entries() > 0)
listOfUpdatedColNames = new(space) Queue(space);
for (CollIndex ii = 0; ii < updRecExprArray.entries(); ii++)
{
const ItemExpr *assignExpr = updRecExprArray[ii].getItemExpr();
ValueId assignExprValueId = assignExpr->getValueId();
ValueId tgtValueId = assignExpr->child(0)->castToItemExpr()->getValueId();
ValueId srcValueId = assignExpr->child(1)->castToItemExpr()->getValueId();
ItemExpr * ie = NULL;
ie = new(generator->wHeap())
Cast(assignExpr->child(1), &tgtValueId.getType());
BaseColumn * bc =
(BaseColumn*)(updRecExprArray[ii].getItemExpr()->child(0)->castToItemExpr());
GenAssert(bc->getOperatorType() == ITM_BASECOLUMN,
"unexpected type of base table column");
const NAColumn *nac = bc->getNAColumn();
if (HbaseAccess::isEncodingNeededForSerialization(bc))
{
ie = new(generator->wHeap()) CompEncode
(ie, FALSE, -1, CollationInfo::Sort, TRUE);
}
ie->bindNode(generator->getBindWA());
updRowVidList.insert(ie->getValueId());
if (NOT isAligned)
{
NAString cnInList;
HbaseAccess::createHbaseColId(nac, cnInList);
char * colNameInList =
space->AllocateAndCopyToAlignedSpace(cnInList, 0);
listOfUpdatedColNames->insert(colNameInList);
}
}
if ((isAligned) && (listOfUpdatedColNames) &&
(updRecExprArray.entries() > 0))
{
NAString cnInList(SEABASE_DEFAULT_COL_FAMILY);
cnInList += ":";
unsigned char c = 1;
cnInList.append((char*)&c, 1);
short len = cnInList.length();
cnInList.prepend((char*)&len, sizeof(short));
char * colNameInList =
space->AllocateAndCopyToAlignedSpace(cnInList, 0);
listOfUpdatedColNames->insert(colNameInList);
}
// Generate the update expression
//
expGen->generateContiguousMoveExpr
(updRowVidList,
0, // (IN) Don't add convert nodes, Cast's have already been done.
1, // (IN) Destination Atp
updateTuppIndex, // (IN) Destination Atp index
tupleFormat,
updateRowLen, // (OUT) Destination tuple length
updateExpr, // (OUT) Generated expression
updateTupleDesc, // (OUT) Tuple descriptor for destination tuple
ExpTupleDesc::LONG_FORMAT);
// Assign attributes to the ASSIGN nodes of the newRecExpArray()
// This is not the same as the generateContiguousMoveExpr() call
// above since different valueId's are added to the mapTable.
//
// Assign attributes to the ASSIGN nodes of the updRecExpArray()
// This is not the same as the generateContiguousMoveExpr() call
// above since different valueId's are added to the mapTable.
//
for (CollIndex ii = 0; ii < updRecExprArray.entries(); ii++)
{
const ItemExpr *assignExpr = updRecExprArray[ii].getItemExpr();
ValueId assignExprValueId = assignExpr->getValueId();
Attributes * assignAttr = (generator->addMapInfo(assignExprValueId, 0))->getAttr();
ValueId updValId = updRowVidList[ii];
Attributes * updValAttr = (generator->getMapInfo(updValId, 0))->getAttr();
assignAttr->copyLocationAttrs(updValAttr);
}
for (CollIndex ii = 0; ii < updRecExprArray.entries(); ii++)
{
const ItemExpr *assignExpr = updRecExprArray[ii].getItemExpr();
ValueId assignExprValueId = assignExpr->getValueId();
ValueId tgtValueId = assignExpr->child(0)->castToItemExpr()->getValueId();
Attributes * colAttr = (generator->addMapInfo(tgtValueId, 0))->getAttr();
Attributes * assignAttr = (generator->getMapInfo(assignExprValueId, 0))->getAttr();
colAttr->copyLocationAttrs(assignAttr);
BaseColumn * bc = (BaseColumn *)assignExpr->child(0)->castToItemExpr();
const NAColumn *nac = bc->getNAColumn();
if (nac->isAddedColumn())
{
colAttr->setSpecialField();
Attributes::DefaultClass dc = expGen->getDefaultClass(nac);
colAttr->setDefaultClass(dc);
Attributes * attr = (*updateTupleDesc)->getAttr(ii);
attr->setSpecialField();
attr->setDefaultClass(dc);
}
}
if ((isInsert) &&
(updRowVidList.entries() > 0))
{
ValueIdList updRowKeyVidList;
const NAColumnArray &keyColArray = indexDesc->getNAFileSet()->getIndexKeyColumns();
ULng32 firstKeyColumnOffset = 0;
for (CollIndex kc=0; kc<keyColArray.entries(); kc++)
updRowKeyVidList.insert(updRowVidList[keyColArray[kc]->getPosition()]);
expGen->generateKeyEncodeExpr(indexDesc,
1, // (IN) Destination Atp
mergeInsertRowIdTuppIndex,
ExpTupleDesc::SQLMX_KEY_FORMAT,
mergeInsertRowIdLen,
mergeInsertRowIdExpr,
FALSE,
firstKeyColumnOffset,
&updRowKeyVidList,
TRUE);
}
return 0;
}
//
// Create and bind an assign node for each vertical-partition column
// (i.e., a column in a partition of a VP table). The assign node
// assigns the base table column to the VP column.
//
static void bindVPCols(Generator *generator,
const ValueIdList & vpCols,
ValueIdList & resList)
{
BindWA *bindWA = generator->getBindWA();
for (CollIndex colNo=0; colNo<vpCols.entries(); colNo++)
{
// Get the VP column -- must be ITM_INDEXCOLUMN
IndexColumn *vpColItem = (IndexColumn*)vpCols[colNo].getItemExpr();
GenAssert(vpColItem->getOperatorType() == ITM_INDEXCOLUMN,
"unexpected type of vp column");
// Get the corresponding base table column -- must be ITM_BASECOLUMN
ItemExpr *tblColItem = vpColItem->getDefinition().getItemExpr();
GenAssert(tblColItem->getOperatorType() == ITM_BASECOLUMN,
"unexpected type of base table column");
Assign *assign = new (bindWA->wHeap()) Assign(vpColItem, tblColItem, FALSE);
assign->bindNode(bindWA);
if (bindWA->errStatus())
{
GenAssert(0,"bindNode of vpCol failed");
}
resList.insertAt(colNo, assign->getValueId());
}
}
short HiveInsert::codeGen(Generator *generator)
{
if(!generator->explainDisabled()) {
Space * space = generator->getSpace();
// a dummy tdb
ComTdbFastExtract* fe_tdb = new (space) ComTdbFastExtract();
generator->setExplainTuple( addExplainInfo(fe_tdb, 0, 0, generator));
}
return 0;
}
///////////////////////////////////////////////////////////
//
// UpdateCursor::codeGen()
//
///////////////////////////////////////////////////////////
short UpdateCursor::codeGen(Generator * generator)
{
GenAssert(0, "UpdateCursor::codeGen:should not reach here.");
return 0;
}
//
// This function is for aligned row format only.
// This will order all the fixed fields by their alignment size,
// followed by any added fixed fields,
// followed by all variable fields (original or added).
static void orderColumnsByAlignment(NAArray<BaseColumn *> columns,
UInt32 numColumns,
NAArray<BaseColumn *> * orderedCols )
{
Int16 rc = 0;
NAList<BaseColumn *> varCols(5);
NAList<BaseColumn *> addedCols(5);
NAList<BaseColumn *> align4(5);
NAList<BaseColumn *> align2(5);
NAList<BaseColumn *> align1(5);
BaseColumn *currColumn;
CollIndex i, k;
Int32 alignmentSize;
for( i = 0, k = 0; i < numColumns; i++ )
{
if ( columns.used(i) )
{
currColumn = columns[ i ];
if ( currColumn->getType().isVaryingLen() )
{
varCols.insert( currColumn );
}
else
{
if ( currColumn->getNAColumn()->isAddedColumn() )
{
addedCols.insert( currColumn );
continue;
}
alignmentSize = currColumn->getType().getDataAlignment();
if (8 == alignmentSize)
orderedCols->insertAt(k++, currColumn );
else if ( 4 == alignmentSize )
align4.insert( currColumn );
else if ( 2 == alignmentSize )
align2.insert( currColumn );
else
align1.insert( currColumn );
}
}
}
if (align4.entries() > 0)
for( i = 0; i < align4.entries(); i++ )
orderedCols->insertAt( k++, align4[ i ] );
if (align2.entries() > 0)
for( i = 0; i < align2.entries(); i++ )
orderedCols->insertAt( k++, align2[ i ] );
if (align1.entries() > 0)
for( i = 0; i < align1.entries(); i++ )
orderedCols->insertAt( k++, align1[ i ] );
if (addedCols.entries() > 0)
for( i = 0; i < addedCols.entries(); i++ )
orderedCols->insertAt( k++, addedCols[ i ] );
if (varCols.entries() > 0)
for( i = 0; i < varCols.entries(); i++ )
orderedCols->insertAt( k++, varCols[ i ] );
}
short Delete::codeGen(Generator * /*generator*/)
{
return -1;
}
short Insert::codeGen(Generator * /*generator*/)
{
return -1;
}
short Update::codeGen(Generator * /*generator*/)
{
return -1;
}
short MergeUpdate::codeGen(Generator * /*generator*/)
{
return -1;
}
short MergeDelete::codeGen(Generator * /*generator*/)
{
return -1;
}
short HbaseDelete::codeGen(Generator * generator)
{
Space * space = generator->getSpace();
ExpGenerator * expGen = generator->getExpGenerator();
// allocate a map table for the retrieved columns
// generator->appendAtEnd();
MapTable * last_map_table = generator->getLastMapTable();
ex_expr *scanExpr = 0;
ex_expr *proj_expr = 0;
ex_expr *convert_expr = NULL;
ex_expr * keyColValExpr = NULL;
ex_cri_desc * givenDesc
= generator->getCriDesc(Generator::DOWN);
ex_cri_desc * returnedDesc = NULL;
const Int32 work_atp = 1;
const Int32 convertTuppIndex = 2;
const Int32 rowIdTuppIndex = 3;
const Int32 asciiTuppIndex = 4;
const Int32 rowIdAsciiTuppIndex = 5;
const Int32 keyColValTuppIndex = 6;
ULng32 asciiRowLen = 0;
ExpTupleDesc * asciiTupleDesc = 0;
ex_cri_desc * work_cri_desc = NULL;
work_cri_desc = new(space) ex_cri_desc(7, space);
returnedDesc = new(space) ex_cri_desc(givenDesc->noTuples() + 1, space);
NABoolean returnRow = getReturnRow(this, getIndexDesc());
ExpTupleDesc::TupleDataFormat asciiRowFormat =
(getTableDesc()->getNATable()->isSQLMXAlignedTable() ?
ExpTupleDesc::SQLMX_ALIGNED_FORMAT :
ExpTupleDesc::SQLARK_EXPLODED_FORMAT);
ExpTupleDesc::TupleDataFormat hbaseRowFormat =
ExpTupleDesc::SQLARK_EXPLODED_FORMAT;
ValueIdList asciiVids;
ValueIdList executorPredCastVids;
ValueIdList convertExprCastVids;
NABoolean addDefaultValues = TRUE;
NABoolean hasAddedColumns = FALSE;
if (getTableDesc()->getNATable()->hasAddedColumn())
hasAddedColumns = TRUE;
ValueIdList columnList;
ValueIdList srcVIDlist;
ValueIdList dupVIDlist;
HbaseAccess::sortValues(retColRefSet_,
columnList,
srcVIDlist, dupVIDlist,
(getTableDesc()->getNATable()->getExtendedQualName().getSpecialType() == ExtendedQualName::INDEX_TABLE));
const CollIndex numColumns = columnList.entries();
// build key information
keyRangeGen * keyInfo = 0;
expGen->buildKeyInfo(&keyInfo, // out
generator,
getIndexDesc()->getNAFileSet()->getIndexKeyColumns(),
getIndexDesc()->getIndexKey(),
getBeginKeyPred(),
(getSearchKey() && getSearchKey()->isUnique() ? NULL : getEndKeyPred()),
getSearchKey(),
NULL, //getMdamKeyPtr(),
FALSE,
0,
ExpTupleDesc::SQLMX_KEY_FORMAT);
UInt32 keyColValLen = 0;
char * keyColName = NULL;
if ((canDoCheckAndUpdel()) &&
(getSearchKey() && getSearchKey()->isUnique()) &&
(getBeginKeyPred().entries() > 0))
{
expGen->generateKeyColValueExpr(
getBeginKeyPred()[0],
work_atp, keyColValTuppIndex,
keyColValLen,
&keyColValExpr);
if (! keyColValExpr)
canDoCheckAndUpdel() = FALSE;
else
{
ItemExpr * col_node = getBeginKeyPred()[0].getItemExpr()->child(0);
HbaseAccess::genColName(generator, col_node, keyColName);
}
}
Queue * tdbListOfUniqueRows = NULL;
Queue * tdbListOfRangeRows = NULL;
HbaseAccess::genListsOfRows(generator,
listOfDelSubsetRows_,
listOfDelUniqueRows_,
tdbListOfRangeRows,
tdbListOfUniqueRows);
ULng32 convertRowLen = 0;
for (CollIndex ii = 0; ii < numColumns; ii++)
{
ItemExpr * col_node = ((columnList[ii]).getValueDesc())->getItemExpr();
const NAType &givenType = col_node->getValueId().getType();
int res;
ItemExpr *asciiValue = NULL;
ItemExpr *castValue = NULL;
res = HbaseAccess::createAsciiColAndCastExpr2(
generator, // for heap
col_node,
givenType, // [IN] Actual type of HDFS column
asciiValue, // [OUT] Returned expression for ascii rep.
castValue, // [OUT] Returned expression for binary rep.
getTableDesc()->getNATable()->isSQLMXAlignedTable()
);
GenAssert(res == 1 && asciiValue != NULL && castValue != NULL,
"Error building expression tree for cast output value");
asciiValue->synthTypeAndValueId();
asciiValue->bindNode(generator->getBindWA());
asciiVids.insert(asciiValue->getValueId());
castValue->bindNode(generator->getBindWA());
convertExprCastVids.insert(castValue->getValueId());
} // for (ii = 0; ii < numCols; ii++)
// Add ascii columns to the MapTable. After this call the MapTable
// has ascii values in the work ATP at index asciiTuppIndex.
const NAColumnArray * colArray = NULL;
unsigned short pcm = expGen->getPCodeMode();
if ((asciiRowFormat == ExpTupleDesc::SQLMX_ALIGNED_FORMAT) &&
(hasAddedColumns))
{
colArray = &getIndexDesc()->getAllColumns();
expGen->setPCodeMode(ex_expr::PCODE_NONE);
}
expGen->processValIdList(
asciiVids, // [IN] ValueIdList
asciiRowFormat, // [IN] tuple data format
asciiRowLen, // [OUT] tuple length
work_atp, // [IN] atp number
asciiTuppIndex, // [IN] index into atp
&asciiTupleDesc, // [optional OUT] tuple desc
ExpTupleDesc::LONG_FORMAT, // [optional IN] desc format
0,
NULL,
(NAColumnArray*)colArray);
work_cri_desc->setTupleDescriptor(asciiTuppIndex, asciiTupleDesc);
ExpTupleDesc * tuple_desc = 0;
expGen->generateContiguousMoveExpr(
convertExprCastVids, // [IN] source ValueIds
FALSE, // [IN] add convert nodes?
work_atp, // [IN] target atp number
convertTuppIndex, // [IN] target tupp index
hbaseRowFormat, // [IN] target tuple format
convertRowLen, // [OUT] target tuple length
&convert_expr, // [OUT] move expression
&tuple_desc, // [optional OUT] target tuple desc
ExpTupleDesc::LONG_FORMAT, // [optional IN] target desc format
NULL,
NULL,
0,
NULL,
FALSE,
NULL,
FALSE /* doBulkMove */);
if ((asciiRowFormat == ExpTupleDesc::SQLMX_ALIGNED_FORMAT) &&
(hasAddedColumns))
{
expGen->setPCodeMode(pcm);
}
for (CollIndex i = 0; i < columnList.entries(); i++)
{
ValueId colValId = columnList[i];
ValueId castValId = convertExprCastVids[i];
Attributes * colAttr = (generator->addMapInfo(colValId, 0))->getAttr();
Attributes * castAttr = (generator->getMapInfo(castValId))->getAttr();
colAttr->copyLocationAttrs(castAttr);
} // for
if (getScanIndexDesc() != NULL)
{
for (CollIndex i = 0; i < getScanIndexDesc()->getIndexColumns().entries(); i++)
{
ValueId scanIndexDescVID = getScanIndexDesc()->getIndexColumns()[i];
const ValueId indexDescVID = getIndexDesc()->getIndexColumns()[i];
CollIndex pos = 0;
pos = columnList.index(indexDescVID);
if (pos != NULL_COLL_INDEX)
{
Attributes * colAttr = (generator->addMapInfo(scanIndexDescVID, 0))->getAttr();
ValueId castValId = convertExprCastVids[pos];
Attributes * castAttr = (generator->getMapInfo(castValId))->getAttr();
colAttr->copyLocationAttrs(castAttr);
} // if
else
{
pos = columnList.index(scanIndexDescVID);
if (pos != NULL_COLL_INDEX)
{
Attributes * colAttr = (generator->addMapInfo(indexDescVID, 0))->getAttr();
ValueId castValId = convertExprCastVids[pos];
Attributes * castAttr = (generator->getMapInfo(castValId))->getAttr();
colAttr->copyLocationAttrs(castAttr);
} // if
} // else
} // for
} // getScanIndexDesc != NULL
// assign location attributes to dup vids that were returned earlier.
for (CollIndex i = 0; i < srcVIDlist.entries(); i++)
{
ValueId srcValId = srcVIDlist[i];
ValueId dupValId = dupVIDlist[i];
Attributes * srcAttr = (generator->getMapInfo(srcValId))->getAttr();
Attributes * dupAttr = (generator->addMapInfo(dupValId, 0))->getAttr();
dupAttr->copyLocationAttrs(srcAttr);
} // for
if (addDefaultValues) //hasAddedColumns)
{
expGen->addDefaultValues(columnList,
getIndexDesc()->getAllColumns(),
tuple_desc,
TRUE);
if (asciiRowFormat == ExpTupleDesc::SQLMX_ALIGNED_FORMAT)
{
expGen->addDefaultValues(columnList,
getIndexDesc()->getAllColumns(),
asciiTupleDesc,
TRUE);
}
else
{
// copy default values from convertTupleDesc to asciiTupleDesc
expGen->copyDefaultValues(asciiTupleDesc, tuple_desc);
}
}
// generate explain selection expression, if present
// if ((NOT (getTableDesc()->getNATable()->getExtendedQualName().getSpecialType() == ExtendedQualName::INDEX_TABLE)) &&
// (! executorPred().isEmpty()))
if (! executorPred().isEmpty())
{
ItemExpr * newPredTree = executorPred().rebuildExprTree(ITM_AND,TRUE,TRUE);
expGen->generateExpr(newPredTree->getValueId(), ex_expr::exp_SCAN_PRED,
&scanExpr);
}
ULng32 rowIdAsciiRowLen = 0;
ExpTupleDesc * rowIdAsciiTupleDesc = 0;
ex_expr * rowIdExpr = NULL;
ULng32 rowIdLength = 0;
if (getTableDesc()->getNATable()->isSeabaseTable())
{
HbaseAccess::genRowIdExpr(generator,
getIndexDesc()->getNAFileSet()->getIndexKeyColumns(),
getHbaseSearchKeys(),
work_cri_desc, work_atp,
rowIdAsciiTuppIndex, rowIdTuppIndex,
rowIdAsciiRowLen, rowIdAsciiTupleDesc,
rowIdLength,
rowIdExpr);
}
else
{
HbaseAccess::genRowIdExprForNonSQ(generator,
getIndexDesc()->getNAFileSet()->getIndexKeyColumns(),
getHbaseSearchKeys(),
work_cri_desc, work_atp,
rowIdAsciiTuppIndex, rowIdTuppIndex,
rowIdAsciiRowLen, rowIdAsciiTupleDesc,
rowIdLength,
rowIdExpr);
}
Queue * listOfFetchedColNames = NULL;
if (NOT getTableDesc()->getNATable()->isSeabaseTable())
{
// for hbase cell/row tables, the listoffetchedcols is not the columns that are
// part of the virtual cell/row tables.
// This list will come from the predicate and selected items used. TBD.
// For now, do not create a list.
}
else if ((getTableDesc()->getNATable()->isSeabaseTable()) &&
(getTableDesc()->getNATable()->isSQLMXAlignedTable()))
{
listOfFetchedColNames = new(space) Queue(space);
NAString cnInList(SEABASE_DEFAULT_COL_FAMILY);
cnInList += ":";
unsigned char c = 1;
cnInList.append((char*)&c, 1);
short len = cnInList.length();
cnInList.prepend((char*)&len, sizeof(short));
char * colNameInList =
space->AllocateAndCopyToAlignedSpace(cnInList, 0);
listOfFetchedColNames->insert(colNameInList);
}
else
{
HbaseAccess::genListOfColNames(generator,
getIndexDesc(),
columnList,
listOfFetchedColNames);
}
Queue * listOfDeletedColNames = NULL;
if (csl())
{
listOfDeletedColNames = new(space) Queue(space);
for (Lng32 i = 0; i < csl()->entries(); i++)
{
NAString * nas = (NAString*)(*csl())[i];
char * colNameInList = NULL;
short len = nas->length();
nas->prepend((char*)&len, sizeof(short));
colNameInList =
space->AllocateAndCopyToAlignedSpace(*nas, 0);
listOfDeletedColNames->insert(colNameInList);
}
}
if (getTableDesc()->getNATable()->isSeabaseTable())
{
if ((keyInfo && getSearchKey() && getSearchKey()->isUnique()) ||
(tdbListOfUniqueRows))
{
// Save node for later use by RelRoot in the case of UPDATE CURRENT OF.
generator->updateCurrentOfRel() = (void*)this;
}
}
if (getOptStoi() && getOptStoi()->getStoi())
generator->addSqlTableOpenInfo(getOptStoi()->getStoi());
LateNameInfo* lateNameInfo = new(generator->wHeap()) LateNameInfo();
char * compileTimeAnsiName = (char*)getOptStoi()->getStoi()->ansiName();
lateNameInfo->setCompileTimeName(compileTimeAnsiName, space);
lateNameInfo->setLastUsedName(compileTimeAnsiName, space);
lateNameInfo->setNameSpace(COM_TABLE_NAME);
if (getIndexDesc()->getNAFileSet()->getKeytag() != 0)
// is an index.
{
lateNameInfo->setIndex(TRUE);
lateNameInfo->setNameSpace(COM_INDEX_NAME);
}
generator->addLateNameInfo(lateNameInfo);
if (returnRow)
{
// The hbase row will be returned as the last entry of the returned atp.
// Change the atp and atpindex of the returned values to indicate that.
expGen->assignAtpAndAtpIndex(getIndexDesc()->getIndexColumns(),
0, returnedDesc->noTuples()-1);
expGen->assignAtpAndAtpIndex(getScanIndexDesc()->getIndexColumns(),
0, returnedDesc->noTuples()-1);
}
Cardinality expectedRows = (Cardinality) getEstRowsUsed().getValue();
ULng32 buffersize = getDefault(GEN_DPSO_BUFFER_SIZE);
buffersize = MAXOF(3*convertRowLen, buffersize);
queue_index upqueuelength = (queue_index)getDefault(GEN_DPSO_SIZE_UP);
queue_index downqueuelength = (queue_index)getDefault(GEN_DPSO_SIZE_DOWN);
Int32 numBuffers = getDefault(GEN_DPUO_NUM_BUFFERS);
char * tablename = NULL;
if ((getTableDesc()->getNATable()->isHbaseRowTable()) ||
(getTableDesc()->getNATable()->isHbaseCellTable()))
{
if (getIndexDesc() && getIndexDesc()->getNAFileSet())
tablename = space->AllocateAndCopyToAlignedSpace(GenGetQualifiedName(getIndexDesc()->getNAFileSet()->getFileSetName().getObjectName()), 0);
}
else
{
if (getIndexDesc() && getIndexDesc()->getNAFileSet())
tablename = space->AllocateAndCopyToAlignedSpace(GenGetQualifiedName(getIndexDesc()->getNAFileSet()->getFileSetName()), 0);
}
if (! tablename)
tablename =
space->AllocateAndCopyToAlignedSpace(
GenGetQualifiedName(getTableName()), 0);
NAString serverNAS = ActiveSchemaDB()->getDefaults().getValue(HBASE_SERVER);
NAString zkPortNAS = ActiveSchemaDB()->getDefaults().getValue(HBASE_ZOOKEEPER_PORT);
char * server = space->allocateAlignedSpace(serverNAS.length() + 1);
strcpy(server, serverNAS.data());
char * zkPort = space->allocateAlignedSpace(zkPortNAS.length() + 1);
strcpy(zkPort, zkPortNAS.data());
ComTdbHbaseAccess::HbasePerfAttributes * hbpa =
new(space) ComTdbHbaseAccess::HbasePerfAttributes();
if (CmpCommon::getDefault(HBASE_CACHE_BLOCKS) != DF_OFF)
hbpa->setCacheBlocks(TRUE);
// estrowsaccessed is 0 for now, so cache size will be set to minimum
generator->setHBaseNumCacheRows(getEstRowsAccessed().getValue(), hbpa) ;
// create hdfsscan_tdb
ComTdbHbaseAccess *hbasescan_tdb = new(space)
ComTdbHbaseAccess(
ComTdbHbaseAccess::DELETE_,
tablename,
convert_expr,
scanExpr,
rowIdExpr,
NULL, // updateExpr
NULL, // mergeInsertExpr
NULL, // mergeInsertRowIdExpr
NULL, // mergeUpdScanExpr
NULL, // projExpr
NULL, // returnedUpdatedExpr
NULL, // returnMergeUpdateExpr
NULL, // encodedKeyExpr
keyColValExpr,
NULL, // hbaseFilterValExpr
asciiRowLen,
convertRowLen,
0, // updateRowLen
0, // mergeInsertRowLen
0, // fetchedRowLen
0, // returnedRowLen
rowIdLength,
convertRowLen,
rowIdAsciiRowLen,
(keyInfo ? keyInfo->getKeyLength() : 0),
keyColValLen,
0, // hbaseFilterValRowLen
asciiTuppIndex,
convertTuppIndex,
0, // updateTuppIndex
0, // mergeInsertTuppIndex
0, // mergeInsertRowIdTuppIndex
0, // returnedFetchedTuppIndex
0, // returnedUpdatedTuppIndex
rowIdTuppIndex,
returnedDesc->noTuples()-1,
rowIdAsciiTuppIndex,
0, // keyTuppIndex,
keyColValTuppIndex,
0, // hbaseFilterValTuppIndex
0, // hbaseTimestamp
0, // hbaseVersion
tdbListOfRangeRows,
tdbListOfUniqueRows,
listOfFetchedColNames,
listOfDeletedColNames,
NULL,
keyInfo,
keyColName,
work_cri_desc,
givenDesc,
returnedDesc,
downqueuelength,
upqueuelength,
expectedRows,
numBuffers,
buffersize,
server,
zkPort,
hbpa
);
generator->initTdbFields(hbasescan_tdb);
if (getTableDesc()->getNATable()->isHbaseRowTable()) //rowwiseHbaseFormat())
hbasescan_tdb->setRowwiseFormat(TRUE);
if (getTableDesc()->getNATable()->isSeabaseTable())
{
hbasescan_tdb->setSQHbaseTable(TRUE);
if (getTableDesc()->getNATable()->isSQLMXAlignedTable())
hbasescan_tdb->setAlignedFormat(TRUE);
if ((CmpCommon::getDefault(HBASE_SQL_IUD_SEMANTICS) == DF_ON) &&
(NOT noCheck()))
hbasescan_tdb->setHbaseSqlIUD(TRUE);
if (getTableDesc()->getNATable()->isEnabledForDDLQI())
generator->objectUids().insert(
getTableDesc()->getNATable()->objectUid().get_value());
}
if (keyInfo && getSearchKey() && getSearchKey()->isUnique())
hbasescan_tdb->setUniqueKeyInfo(TRUE);
if (returnRow)
hbasescan_tdb->setReturnRow(TRUE);
if (rowsAffected() != GenericUpdate::DO_NOT_COMPUTE_ROWSAFFECTED)
hbasescan_tdb->setComputeRowsAffected(TRUE);
if (! tdbListOfUniqueRows)
{
hbasescan_tdb->setSubsetOper(TRUE);
}
if (canDoCheckAndUpdel())
hbasescan_tdb->setCanDoCheckAndUpdel(TRUE);
if (uniqueRowsetHbaseOper()) {
hbasescan_tdb->setRowsetOper(TRUE);
hbasescan_tdb->setHbaseRowsetVsbbSize(getDefault(HBASE_ROWSET_VSBB_SIZE));
}
if (csl())
hbasescan_tdb->setUpdelColnameIsStr(TRUE);
if(!generator->explainDisabled()) {
generator->setExplainTuple(
addExplainInfo(hbasescan_tdb, 0, 0, generator));
}
if ((generator->computeStats()) &&
(generator->collectStatsType() == ComTdb::PERTABLE_STATS
|| generator->collectStatsType() == ComTdb::OPERATOR_STATS))
{
hbasescan_tdb->setPertableStatsTdbId((UInt16)generator->
getPertableStatsTdbId());
}
if (generator->isTransactionNeeded())
setTransactionRequired(generator);
else if (noDTMxn())
hbasescan_tdb->setUseHbaseXn(TRUE);
generator->setFoundAnUpdate(TRUE);
generator->setCriDesc(givenDesc, Generator::DOWN);
generator->setCriDesc(returnedDesc, Generator::UP);
generator->setGenObj(this, hbasescan_tdb);
return 0;
}
short HbaseUpdate::codeGen(Generator * generator)
{
Space * space = generator->getSpace();
ExpGenerator * expGen = generator->getExpGenerator();
// allocate a map table for the retrieved columns
// generator->appendAtEnd();
MapTable * last_map_table = generator->getLastMapTable();
// Append a new map table for holding attributes that are only used
// in local expressions. This map table will be removed after all
// the local expressions are generated.
//
MapTable *localMapTable = generator->appendAtEnd();
ex_expr *scanExpr = 0;
ex_expr *projExpr = 0;
ex_expr *convert_expr = NULL;
ex_expr *updateExpr = NULL;
ex_expr *mergeInsertExpr = NULL;
ex_expr *returnUpdateExpr = NULL;
ex_expr * keyColValExpr = NULL;
ex_cri_desc * givenDesc
= generator->getCriDesc(Generator::DOWN);
ex_cri_desc * returnedDesc = NULL;
const Int32 work_atp = 1;
const Int32 convertTuppIndex = 2;
const Int32 rowIdTuppIndex = 3;
const Int32 asciiTuppIndex = 4;
const Int32 rowIdAsciiTuppIndex = 5;
// const Int32 keyTuppIndex = 6;
const Int32 updateTuppIndex = 6;
const Int32 mergeInsertTuppIndex = 7;
const Int32 mergeInsertRowIdTuppIndex = 8;
const Int32 keyColValTuppIndex = 9;
ULng32 asciiRowLen = 0;
ExpTupleDesc * asciiTupleDesc = 0;
ex_cri_desc * work_cri_desc = NULL;
work_cri_desc = new(space) ex_cri_desc(10, space);
NABoolean returnRow = getReturnRow(this, getIndexDesc());
if (returnRow)
// one for fetchedRow, one for updatedRow.
returnedDesc = new(space) ex_cri_desc(givenDesc->noTuples() + 2, space);
else
returnedDesc = new(space) ex_cri_desc(givenDesc->noTuples() + 1, space);
const Int16 returnedFetchedTuppIndex = (Int16)(returnedDesc->noTuples()-2);
const Int16 returnedUpdatedTuppIndex = (Int16)(returnedFetchedTuppIndex + 1);
ExpTupleDesc::TupleDataFormat asciiRowFormat =
(getTableDesc()->getNATable()->isSQLMXAlignedTable() ?
ExpTupleDesc::SQLMX_ALIGNED_FORMAT :
ExpTupleDesc::SQLARK_EXPLODED_FORMAT);
ExpTupleDesc::TupleDataFormat hbaseRowFormat =
// ExpTupleDesc::SQLMX_ALIGNED_FORMAT;
ExpTupleDesc::SQLARK_EXPLODED_FORMAT;
ValueIdList asciiVids;
ValueIdList executorPredCastVids;
ValueIdList convertExprCastVids;
NABoolean addDefaultValues = TRUE;
NABoolean hasAddedColumns = FALSE;
if (getTableDesc()->getNATable()->hasAddedColumn())
hasAddedColumns = TRUE;
ValueIdList columnList;
ValueIdList srcVIDlist;
ValueIdList dupVIDlist;
HbaseAccess::sortValues(retColRefSet_, columnList,
srcVIDlist, dupVIDlist,
(getTableDesc()->getNATable()->getExtendedQualName().getSpecialType() == ExtendedQualName::INDEX_TABLE));
const CollIndex numColumns = columnList.entries();
// build key information
keyRangeGen * keyInfo = 0;
expGen->buildKeyInfo(&keyInfo, // out
generator,
getIndexDesc()->getNAFileSet()->getIndexKeyColumns(),
getIndexDesc()->getIndexKey(),
getBeginKeyPred(),
(getSearchKey() && getSearchKey()->isUnique() ? NULL : getEndKeyPred()),
getSearchKey(),
NULL, //getMdamKeyPtr(),
FALSE,
0,
ExpTupleDesc::SQLMX_KEY_FORMAT);
UInt32 keyColValLen = 0;
char * keyColName = NULL;
if ((canDoCheckAndUpdel()) &&
(getSearchKey() && getSearchKey()->isUnique()) &&
(getBeginKeyPred().entries() > 0))
{
expGen->generateKeyColValueExpr(
getBeginKeyPred()[0],
work_atp, keyColValTuppIndex,
keyColValLen,
&keyColValExpr);
if (! keyColValExpr)
canDoCheckAndUpdel() = FALSE;
else
{
ItemExpr * col_node = getBeginKeyPred()[0].getItemExpr()->child(0);
HbaseAccess::genColName(generator, col_node, keyColName);
}
}
Queue * tdbListOfUniqueRows = NULL;
Queue * tdbListOfRangeRows = NULL;
HbaseAccess::genListsOfRows(generator,
listOfUpdSubsetRows_,
listOfUpdUniqueRows_,
tdbListOfRangeRows,
tdbListOfUniqueRows);
ULng32 convertRowLen = 0;
for (CollIndex ii = 0; ii < numColumns; ii++)
{
ItemExpr * col_node = ((columnList[ii]).getValueDesc())->getItemExpr();
const NAType &givenType = col_node->getValueId().getType();
int res;
ItemExpr *asciiValue = NULL;
ItemExpr *castValue = NULL;
res = HbaseAccess::createAsciiColAndCastExpr2(
generator, // for heap
col_node,
givenType, // [IN] Actual type of HDFS column
asciiValue, // [OUT] Returned expression for ascii rep.
castValue, // [OUT] Returned expression for binary rep.
getTableDesc()->getNATable()->isSQLMXAlignedTable()
);
GenAssert(res == 1 && asciiValue != NULL && castValue != NULL,
"Error building expression tree for cast output value");
asciiValue->synthTypeAndValueId();
asciiValue->bindNode(generator->getBindWA());
asciiVids.insert(asciiValue->getValueId());
castValue->bindNode(generator->getBindWA());
convertExprCastVids.insert(castValue->getValueId());
} // for (ii = 0; ii < numCols; ii++)
// Add ascii columns to the MapTable. After this call the MapTable
// has ascii values in the work ATP at index asciiTuppIndex.
const NAColumnArray * colArray = NULL;
unsigned short pcm = expGen->getPCodeMode();
if ((asciiRowFormat == ExpTupleDesc::SQLMX_ALIGNED_FORMAT) &&
(hasAddedColumns))
{
colArray = &getIndexDesc()->getAllColumns();
expGen->setPCodeMode(ex_expr::PCODE_NONE);
}
expGen->processValIdList(
asciiVids, // [IN] ValueIdList
asciiRowFormat, // [IN] tuple data format
asciiRowLen, // [OUT] tuple length
work_atp, // [IN] atp number
asciiTuppIndex, // [IN] index into atp
&asciiTupleDesc, // [optional OUT] tuple desc
ExpTupleDesc::LONG_FORMAT, // [optional IN] desc format
0,
NULL,
(NAColumnArray*)colArray);
work_cri_desc->setTupleDescriptor(asciiTuppIndex, asciiTupleDesc);
ExpTupleDesc * tuple_desc = 0;
expGen->generateContiguousMoveExpr(
convertExprCastVids, // [IN] source ValueIds
FALSE, // [IN] add convert nodes?
work_atp, // [IN] target atp number
convertTuppIndex, // [IN] target tupp index
hbaseRowFormat, // [IN] target tuple format
convertRowLen, // [OUT] target tuple length
&convert_expr, // [OUT] move expression
&tuple_desc, // [optional OUT] target tuple desc
ExpTupleDesc::LONG_FORMAT, // [optional IN] target desc format
NULL,
NULL,
0,
NULL,
FALSE,
NULL,
FALSE /* doBulkMove */);
if ((asciiRowFormat == ExpTupleDesc::SQLMX_ALIGNED_FORMAT) &&
(hasAddedColumns))
{
expGen->setPCodeMode(pcm);
}
for (CollIndex i = 0; i < columnList.entries(); i++)
{
ValueId colValId = columnList[i];
ValueId castValId = convertExprCastVids[i];
Attributes * colAttr = (generator->addMapInfo(colValId, 0))->getAttr();
Attributes * castAttr = (generator->getMapInfo(castValId))->getAttr();
colAttr->copyLocationAttrs(castAttr);
} // for
if (getScanIndexDesc() != NULL)
{
for (CollIndex i = 0; i < getScanIndexDesc()->getIndexColumns().entries(); i++)
{
ValueId scanIndexDescVID = getScanIndexDesc()->getIndexColumns()[i];
const ValueId indexDescVID = getIndexDesc()->getIndexColumns()[i];
CollIndex pos = 0;
pos = columnList.index(indexDescVID);
if (pos != NULL_COLL_INDEX)
{
Attributes * colAttr = (generator->addMapInfo(scanIndexDescVID, 0))->getAttr();
ValueId castValId = convertExprCastVids[pos];
Attributes * castAttr = (generator->getMapInfo(castValId))->getAttr();
colAttr->copyLocationAttrs(castAttr);
} // if
else
{
pos = columnList.index(scanIndexDescVID);
if (pos != NULL_COLL_INDEX)
{
Attributes * colAttr = (generator->addMapInfo(indexDescVID, 0))->getAttr();
ValueId castValId = convertExprCastVids[pos];
Attributes * castAttr = (generator->getMapInfo(castValId))->getAttr();
colAttr->copyLocationAttrs(castAttr);
} // if
} // else
} // for
} // getScanIndexDesc != NULL
// assign location attributes to dup vids that were returned earlier.
for (CollIndex i = 0; i < srcVIDlist.entries(); i++)
{
ValueId srcValId = srcVIDlist[i];
ValueId dupValId = dupVIDlist[i];
Attributes * srcAttr = (generator->getMapInfo(srcValId))->getAttr();
Attributes * dupAttr = (generator->addMapInfo(dupValId, 0))->getAttr();
dupAttr->copyLocationAttrs(srcAttr);
} // for
if (addDefaultValues)
{
expGen->addDefaultValues(columnList,
getIndexDesc()->getAllColumns(),
tuple_desc,
TRUE);
if (asciiRowFormat == ExpTupleDesc::SQLMX_ALIGNED_FORMAT)
{
expGen->addDefaultValues(columnList,
getIndexDesc()->getAllColumns(),
asciiTupleDesc,
TRUE);
}
else
{
// copy default values from convertTupleDesc to asciiTupleDesc
expGen->copyDefaultValues(asciiTupleDesc, tuple_desc);
}
}
// generate explain selection expression, if present
// if ((NOT (getTableDesc()->getNATable()->getExtendedQualName().getSpecialType() == ExtendedQualName::INDEX_TABLE)) &&
// (! executorPred().isEmpty()))
if (! executorPred().isEmpty())
{
ItemExpr * newPredTree = executorPred().rebuildExprTree(ITM_AND,TRUE,TRUE);
expGen->generateExpr(newPredTree->getValueId(), ex_expr::exp_SCAN_PRED,
&scanExpr);
}
ex_expr * mergeInsertRowIdExpr = NULL;
ULng32 mergeInsertRowIdLen = 0;
ExpTupleDesc *updatedRowTupleDesc = 0;
ULng32 updateRowLen = 0;
Queue * listOfUpdatedColNames = NULL;
genHbaseUpdOrInsertExpr(generator,
FALSE,
newRecExprArray(), updateTuppIndex,
&updateExpr, updateRowLen,
&updatedRowTupleDesc,
listOfUpdatedColNames,
NULL, mergeInsertRowIdLen, 0,
getIndexDesc());
work_cri_desc->setTupleDescriptor(updateTuppIndex, updatedRowTupleDesc);
ExpTupleDesc *mergedRowTupleDesc = 0;
ULng32 mergeInsertRowLen = 0;
Queue * listOfMergedColNames = NULL;
if ((isMerge()) &&
(mergeInsertRecExprArray().entries() > 0))
{
genHbaseUpdOrInsertExpr(generator,
TRUE,
mergeInsertRecExprArray(), mergeInsertTuppIndex,
&mergeInsertExpr, mergeInsertRowLen,
&mergedRowTupleDesc,
listOfMergedColNames,
&mergeInsertRowIdExpr, mergeInsertRowIdLen,
mergeInsertRowIdTuppIndex,
getIndexDesc());
work_cri_desc->setTupleDescriptor(mergeInsertTuppIndex, mergedRowTupleDesc);
}
ULng32 rowIdAsciiRowLen = 0;
ExpTupleDesc * rowIdAsciiTupleDesc = 0;
ex_expr * rowIdExpr = NULL;
ULng32 rowIdLength = 0;
if (getTableDesc()->getNATable()->isSeabaseTable())
{
HbaseAccess::genRowIdExpr(generator,
getIndexDesc()->getNAFileSet()->getIndexKeyColumns(),
getHbaseSearchKeys(),
work_cri_desc, work_atp,
rowIdAsciiTuppIndex, rowIdTuppIndex,
rowIdAsciiRowLen, rowIdAsciiTupleDesc,
rowIdLength,
rowIdExpr);
}
else
{
HbaseAccess::genRowIdExprForNonSQ(generator,
getIndexDesc()->getNAFileSet()->getIndexKeyColumns(),
getHbaseSearchKeys(),
work_cri_desc, work_atp,
rowIdAsciiTuppIndex, rowIdTuppIndex,
rowIdAsciiRowLen, rowIdAsciiTupleDesc,
rowIdLength,
rowIdExpr);
}
Queue * listOfFetchedColNames = NULL;
if ((getTableDesc()->getNATable()->isSeabaseTable()) &&
(getTableDesc()->getNATable()->isSQLMXAlignedTable()))
{
listOfFetchedColNames = new(space) Queue(space);
NAString cnInList(SEABASE_DEFAULT_COL_FAMILY);
cnInList += ":";
unsigned char c = 1;
cnInList.append((char*)&c, 1);
short len = cnInList.length();
cnInList.prepend((char*)&len, sizeof(short));
char * colNameInList =
space->AllocateAndCopyToAlignedSpace(cnInList, 0);
listOfFetchedColNames->insert(colNameInList);
}
else
{
HbaseAccess::genListOfColNames(generator,
getIndexDesc(),
columnList,
listOfFetchedColNames);
}
ex_expr * mergeUpdScanExpr = NULL;
if (isMerge() && !mergeUpdatePred().isEmpty())
{
// Generate expression to evaluate any merge update predicate on the
// fetched row
ItemExpr* updPredTree =
mergeUpdatePred().rebuildExprTree(ITM_AND,TRUE,TRUE);
expGen->generateExpr(updPredTree->getValueId(),
ex_expr::exp_SCAN_PRED,
&mergeUpdScanExpr);
}
else if (getIndexDesc()->isClusteringIndex() && getCheckConstraints().entries())
{
GenAssert(FALSE, "Should not reach here. This update should have been transformed to delete/insert");
}
if ((getTableDesc()->getNATable()->isSeabaseTable()) &&
(NOT isMerge()))
{
if ((keyInfo && getSearchKey() && getSearchKey()->isUnique()) ||
( tdbListOfUniqueRows))
{
// Save node for later use by RelRoot in the case of UPDATE CURRENT OF.
generator->updateCurrentOfRel() = (void*)this;
}
}
if (getOptStoi() && getOptStoi()->getStoi())
generator->addSqlTableOpenInfo(getOptStoi()->getStoi());
LateNameInfo* lateNameInfo = new(generator->wHeap()) LateNameInfo();
char * compileTimeAnsiName = (char*)getOptStoi()->getStoi()->ansiName();
lateNameInfo->setCompileTimeName(compileTimeAnsiName, space);
lateNameInfo->setLastUsedName(compileTimeAnsiName, space);
lateNameInfo->setNameSpace(COM_TABLE_NAME);
if (getIndexDesc()->getNAFileSet()->getKeytag() != 0)
// is an index.
{
lateNameInfo->setIndex(TRUE);
lateNameInfo->setNameSpace(COM_INDEX_NAME);
}
generator->addLateNameInfo(lateNameInfo);
ex_expr * returnMergeInsertExpr = NULL;
ULng32 returnedFetchedRowLen = 0;
ULng32 returnedUpdatedRowLen = 0;
ULng32 returnedMergeInsertedRowLen = 0;
if (returnRow)
{
const ValueIdList &fetchedOutputs =
((getScanIndexDesc() != NULL) ?
getScanIndexDesc()->getIndexColumns() :
getIndexDesc()->getIndexColumns());
// Generate a project expression to move the fetched row from
// the fetchedRowAtpIndex in the work Atp to the returnedFetchedAtpIndex
// in the return Atp.
MapTable * returnedFetchedMapTable = 0;
ExpTupleDesc * returnedFetchedTupleDesc = NULL;
expGen->generateContiguousMoveExpr
(fetchedOutputs,
1, // add conv nodes
0,
returnedFetchedTuppIndex,
generator->getInternalFormat(),
returnedFetchedRowLen,
&projExpr,
&returnedFetchedTupleDesc,
ExpTupleDesc::SHORT_FORMAT,
&returnedFetchedMapTable);
// assign location attributes to columns referenced in scanIndex and index.
// If any of these columns/value_ids are being updated, the updated location
// will be assigned later in this code.
expGen->assignAtpAndAtpIndex(getScanIndexDesc()->getIndexColumns(),
0, returnedFetchedTuppIndex);
expGen->assignAtpAndAtpIndex(getIndexDesc()->getIndexColumns(),
0, returnedFetchedTuppIndex);
ValueIdList updatedOutputs;
if (isMerge())
{
BaseColumn *updtCol = NULL,
*fetchedCol = NULL;
Lng32 updtColNum = -1,
fetchedColNum = 0;
CollIndex recEntries = newRecExprArray().entries(),
colEntries = getIndexDesc()->getIndexColumns().entries(),
j = 0;
ValueId tgtValueId;
for (CollIndex ii = 0; ii < colEntries; ii++)
{
fetchedCol =
(BaseColumn *)(((getIndexDesc()->getIndexColumns())[ii]).getItemExpr());
fetchedColNum = fetchedCol->getColNumber();
updtCol =
(updtCol != NULL ? updtCol :
(j < recEntries ?
(BaseColumn *)(newRecExprArray()[j].getItemExpr()->child(0)->castToItemExpr()) :
NULL));
updtColNum = (updtCol ? updtCol->getColNumber() : -1);
if (fetchedColNum == updtColNum)
{
const ItemExpr *assignExpr = newRecExprArray()[j].getItemExpr();
tgtValueId = assignExpr->child(0)->castToItemExpr()->getValueId();
j++;
updtCol = NULL;
}
else
{
tgtValueId = fetchedCol->getValueId();
}
updatedOutputs.insert(tgtValueId);
}
}
else
{
for (CollIndex ii = 0; ii < newRecExprArray().entries(); ii++)
{
const ItemExpr *assignExpr = newRecExprArray()[ii].getItemExpr();
ValueId tgtValueId = assignExpr->child(0)->castToItemExpr()->getValueId();
updatedOutputs.insert(tgtValueId);
}
}
ValueIdSet outputs = fetchedOutputs;
ValueIdSet updatedOutputsSet = updatedOutputs;
outputs += updatedOutputsSet;
getGroupAttr()->setCharacteristicOutputs(outputs);
MapTable * returnedUpdatedMapTable = 0;
ExpTupleDesc * returnedUpdatedTupleDesc = NULL;
ValueIdList tgtConvValueIdList;
if (getTableDesc()->getNATable()->hasSerializedColumn())
{
// if serialized columns are present, then create a new row with
// deserialized columns before returning it.
expGen->generateDeserializedMoveExpr
(updatedOutputs,
0,
returnedUpdatedTuppIndex, //projRowTuppIndex,
generator->getInternalFormat(),
returnedUpdatedRowLen,
&returnUpdateExpr,
&returnedUpdatedTupleDesc,
ExpTupleDesc::SHORT_FORMAT,
tgtConvValueIdList);
}
else
{
expGen->generateContiguousMoveExpr
(updatedOutputs,
1, // add conv nodes
0,
returnedUpdatedTuppIndex,
generator->getInternalFormat(),
returnedUpdatedRowLen,
&returnUpdateExpr,
&returnedUpdatedTupleDesc,
ExpTupleDesc::SHORT_FORMAT,
&returnedUpdatedMapTable,
&tgtConvValueIdList);
}
const ValueIdList &indexColList = getIndexDesc()->getIndexColumns();
for (CollIndex ii = 0; ii < tgtConvValueIdList.entries(); ii++)
{
const ValueId &tgtColValueId = updatedOutputs[ii];
const ValueId &tgtColConvValueId = tgtConvValueIdList[ii];
BaseColumn * bc = (BaseColumn*)tgtColValueId.getItemExpr();
const ValueId &indexColValueId = indexColList[bc->getColNumber()];
Attributes * tgtColConvAttr = (generator->getMapInfo(tgtColConvValueId, 0))->getAttr();
Attributes * indexColAttr = (generator->addMapInfo(indexColValueId, 0))->getAttr();
indexColAttr->copyLocationAttrs(tgtColConvAttr);
}
// Set up the returned tuple descriptor for the updated tuple.
//
returnedDesc->setTupleDescriptor(returnedFetchedTuppIndex,
returnedFetchedTupleDesc);
returnedDesc->setTupleDescriptor(returnedUpdatedTuppIndex,
returnedUpdatedTupleDesc);
/*
expGen->assignAtpAndAtpIndex(getScanIndexDesc()->getIndexColumns(),
0, returnedFetchedTuppIndex);
*/
if (isMerge())
{
ValueIdList mergeInsertOutputs;
for (CollIndex ii = 0; ii < mergeInsertRecExprArray().entries(); ii++)
{
const ItemExpr *assignExpr = mergeInsertRecExprArray()[ii].getItemExpr();
ValueId tgtValueId = assignExpr->child(0)->castToItemExpr()->getValueId();
mergeInsertOutputs.insert(tgtValueId);
}
MapTable * returnedMergeInsertedMapTable = 0;
ExpTupleDesc * returnedMergeInsertedTupleDesc = NULL;
ValueIdList tgtConvValueIdList;
if (getTableDesc()->getNATable()->hasSerializedColumn())
{
// if serialized columns are present, then create a new row with
// deserialized columns before returning it.
expGen->generateDeserializedMoveExpr
(mergeInsertOutputs,
0,
returnedUpdatedTuppIndex,
generator->getInternalFormat(),
returnedMergeInsertedRowLen,
&returnMergeInsertExpr,
&returnedMergeInsertedTupleDesc,
ExpTupleDesc::SHORT_FORMAT,
tgtConvValueIdList);
}
else
{
expGen->generateContiguousMoveExpr
(mergeInsertOutputs,
1, // add conv nodes
0,
returnedUpdatedTuppIndex,
generator->getInternalFormat(),
returnedMergeInsertedRowLen,
&returnMergeInsertExpr,
&returnedMergeInsertedTupleDesc,
ExpTupleDesc::SHORT_FORMAT,
&returnedMergeInsertedMapTable,
&tgtConvValueIdList);
}
}
}
Cardinality expectedRows = (Cardinality) getEstRowsUsed().getValue();
ULng32 buffersize = getDefault(GEN_DPSO_BUFFER_SIZE);
buffersize = MAXOF(3*convertRowLen, buffersize);
queue_index upqueuelength = (queue_index)getDefault(GEN_DPSO_SIZE_UP);
queue_index downqueuelength = (queue_index)getDefault(GEN_DPSO_SIZE_DOWN);
Int32 numBuffers = getDefault(GEN_DPUO_NUM_BUFFERS);
char * tablename = NULL;
if ((getTableDesc()->getNATable()->isHbaseRowTable()) ||
(getTableDesc()->getNATable()->isHbaseCellTable()))
{
if (getIndexDesc() && getIndexDesc()->getNAFileSet())
tablename = space->AllocateAndCopyToAlignedSpace(GenGetQualifiedName(getIndexDesc()->getNAFileSet()->getFileSetName().getObjectName()), 0);
}
else
{
if (getIndexDesc() && getIndexDesc()->getNAFileSet())
tablename = space->AllocateAndCopyToAlignedSpace(GenGetQualifiedName(getIndexDesc()->getNAFileSet()->getFileSetName()), 0);
}
if (! tablename)
tablename =
space->AllocateAndCopyToAlignedSpace(
GenGetQualifiedName(getTableName()), 0);
NAString serverNAS = ActiveSchemaDB()->getDefaults().getValue(HBASE_SERVER);
NAString zkPortNAS = ActiveSchemaDB()->getDefaults().getValue(HBASE_ZOOKEEPER_PORT);
char * server = space->allocateAlignedSpace(serverNAS.length() + 1);
strcpy(server, serverNAS.data());
char * zkPort = space->allocateAlignedSpace(zkPortNAS.length() + 1);
strcpy(zkPort, zkPortNAS.data());
ComTdbHbaseAccess::HbasePerfAttributes * hbpa =
new(space) ComTdbHbaseAccess::HbasePerfAttributes();
if (CmpCommon::getDefault(HBASE_CACHE_BLOCKS) != DF_OFF)
hbpa->setCacheBlocks(TRUE);
// estrowsaccessed is 0 for now, so cache size will be set to minimum
generator->setHBaseNumCacheRows(getEstRowsAccessed().getValue(), hbpa) ;
// create hdfsscan_tdb
ComTdbHbaseAccess *hbasescan_tdb = new(space)
ComTdbHbaseAccess(
(isMerge() ? ComTdbHbaseAccess::MERGE_ : ComTdbHbaseAccess::UPDATE_),
tablename,
convert_expr,
scanExpr,
rowIdExpr,
updateExpr,
mergeInsertExpr,
mergeInsertRowIdExpr,
mergeUpdScanExpr,
projExpr,
returnUpdateExpr,
returnMergeInsertExpr,
NULL, // encodedKeyExpr
keyColValExpr,
NULL, // hbaseFilterValExpr
asciiRowLen,
convertRowLen,
updateRowLen,
mergeInsertRowLen,
returnedFetchedRowLen,
returnedUpdatedRowLen,
((rowIdLength > 0) ? rowIdLength : mergeInsertRowIdLen),
convertRowLen,
rowIdAsciiRowLen,
(keyInfo ? keyInfo->getKeyLength() : 0),
keyColValLen,
0, // hbaseFilterValRowLen
asciiTuppIndex,
convertTuppIndex,
updateTuppIndex,
mergeInsertTuppIndex,
mergeInsertRowIdTuppIndex,
returnedFetchedTuppIndex,
returnedUpdatedTuppIndex,
rowIdTuppIndex,
returnedDesc->noTuples()-1,
rowIdAsciiTuppIndex,
0, // keyTuppIndex,
keyColValTuppIndex,
0, // hbaseFilterValTuppIndex
0, // hbaseTimestamp
0, // hbaseVersion
tdbListOfRangeRows,
tdbListOfUniqueRows,
listOfFetchedColNames,
listOfUpdatedColNames,
listOfMergedColNames,
keyInfo,
keyColName,
work_cri_desc,
givenDesc,
returnedDesc,
downqueuelength,
upqueuelength,
expectedRows,
numBuffers,
buffersize,
server,
zkPort,
hbpa
);
generator->initTdbFields(hbasescan_tdb);
if (getTableDesc()->getNATable()->isHbaseRowTable())
hbasescan_tdb->setRowwiseFormat(TRUE);
if (getTableDesc()->getNATable()->isSeabaseTable())
{
hbasescan_tdb->setSQHbaseTable(TRUE);
if (getTableDesc()->getNATable()->isSQLMXAlignedTable())
hbasescan_tdb->setAlignedFormat(TRUE);
if (CmpCommon::getDefault(HBASE_SQL_IUD_SEMANTICS) == DF_ON)
hbasescan_tdb->setHbaseSqlIUD(TRUE);
if (getTableDesc()->getNATable()->isEnabledForDDLQI())
generator->objectUids().insert(
getTableDesc()->getNATable()->objectUid().get_value());
}
if (keyInfo && getSearchKey() && getSearchKey()->isUnique())
hbasescan_tdb->setUniqueKeyInfo(TRUE);
if (returnRow)
hbasescan_tdb->setReturnRow(TRUE);
if (rowsAffected() != GenericUpdate::DO_NOT_COMPUTE_ROWSAFFECTED)
hbasescan_tdb->setComputeRowsAffected(TRUE);
if (! tdbListOfUniqueRows)
{
hbasescan_tdb->setSubsetOper(TRUE);
}
if (uniqueRowsetHbaseOper()) {
hbasescan_tdb->setRowsetOper(TRUE);
hbasescan_tdb->setHbaseRowsetVsbbSize(getDefault(HBASE_ROWSET_VSBB_SIZE));
}
if (canDoCheckAndUpdel())
hbasescan_tdb->setCanDoCheckAndUpdel(TRUE);
if(!generator->explainDisabled()) {
generator->setExplainTuple(
addExplainInfo(hbasescan_tdb, 0, 0, generator));
}
if ((generator->computeStats()) &&
(generator->collectStatsType() == ComTdb::PERTABLE_STATS
|| generator->collectStatsType() == ComTdb::OPERATOR_STATS))
{
hbasescan_tdb->setPertableStatsTdbId((UInt16)generator->
getPertableStatsTdbId());
}
if (generator->isTransactionNeeded())
{
setTransactionRequired(generator);
}
else
{
if (noDTMxn())
hbasescan_tdb->setUseHbaseXn(TRUE);
}
generator->setFoundAnUpdate(TRUE);
generator->setCriDesc(givenDesc, Generator::DOWN);
generator->setCriDesc(returnedDesc, Generator::UP);
generator->setGenObj(this, hbasescan_tdb);
return 0;
}
bool compHBaseQualif ( NAString a , NAString b)
{
char * a_str = (char*)(a.data());
char * b_str = (char*)(b.data());
return (strcmp (&(a_str[sizeof(short) + sizeof(UInt32)]), &(b_str[sizeof(short)+ sizeof(UInt32)]))<0);
};
extern Int64 getDefaultSlidingSampleSize(Int64 tblRowCount);
extern Int64 getDefaultSampleSize(Int64 tblRowCount);
short HbaseInsert::codeGen(Generator *generator)
{
Space * space = generator->getSpace();
ExpGenerator * expGen = generator->getExpGenerator();
// allocate a map table for the retrieved columns
MapTable * last_map_table = generator->getLastMapTable();
NABoolean returnRow = getReturnRow(this, getIndexDesc());
if (getIsTrafLoadPrep())
returnRow = isReturnRow();
ex_cri_desc * givenDesc = generator->getCriDesc(Generator::DOWN);
ex_cri_desc * returnedDesc = givenDesc;
if (returnRow)
returnedDesc = new(space) ex_cri_desc(givenDesc->noTuples() + 1, space);
const Int32 returnRowTuppIndex = returnedDesc->noTuples() - 1;
const Int32 work_atp = 1;
ex_cri_desc * workCriDesc = NULL;
const UInt16 insertTuppIndex = 2;
const UInt16 rowIdTuppIndex = 3;
const UInt16 loggingTuppIndex = 4;
const UInt16 projRowTuppIndex = 5;
workCriDesc = new(space) ex_cri_desc(6, space);
ULng32 loggingRowLen = 0;
NABoolean addDefaultValues = TRUE;
NABoolean hasAddedColumns = FALSE;
if (getTableDesc()->getNATable()->hasAddedColumn())
hasAddedColumns = TRUE;
ValueIdList insertVIDList;
ValueIdList keyVIDList;
NAColumnArray colArray;
NAColumn *col;
ValueIdList returnRowVIDList;
NABoolean upsertColsWereSkipped = FALSE;
for (CollIndex ii = 0; ii < newRecExprArray().entries(); ii++)
{
const ItemExpr *assignExpr = newRecExprArray()[ii].getItemExpr();
ValueId tgtValueId = assignExpr->child(0)->castToItemExpr()->getValueId();
ValueId srcValueId = assignExpr->child(1)->castToItemExpr()->getValueId();
col = tgtValueId.getNAColumn( TRUE );
// if upsert stmt and this assign was not specified by user, skip it.
// If used, it will overwrite existing values if this row exists in the table.
if ((isUpsert()) &&
(NOT ((Assign*)assignExpr)->isUserSpecified()) &&
(NOT col->isSystemColumn()) &&
(NOT col->isIdentityColumn()))
{
upsertColsWereSkipped = TRUE;
continue;
}
if (returnRow)
returnRowVIDList.insert(tgtValueId);
if ( col != NULL )
colArray.insert( col );
ItemExpr * child1Expr = assignExpr->child(1);
const NAType &givenType = tgtValueId.getType();
ItemExpr * ie = new(generator->wHeap())
Cast(child1Expr, &givenType);
if (HbaseAccess::isEncodingNeededForSerialization
(assignExpr->child(0)->castToItemExpr()))
{
ie = new(generator->wHeap()) CompEncode
(ie, FALSE, -1, CollationInfo::Sort, TRUE);
}
ie->bindNode(generator->getBindWA());
if (generator->getBindWA()->errStatus())
{
GenAssert(0,"bindNode failed");
}
insertVIDList.insert(ie->getValueId());
}
const NATable *naTable = getTableDesc()->getNATable();
ULng32 insertRowLen = 0;
ExpTupleDesc * tupleDesc = 0;
ExpTupleDesc::TupleDataFormat tupleFormat;
if (naTable->isSQLMXAlignedTable())
tupleFormat = ExpTupleDesc::SQLMX_ALIGNED_FORMAT;
else
tupleFormat = ExpTupleDesc::SQLARK_EXPLODED_FORMAT;
ex_expr *insertExpr = 0;
expGen->generateContiguousMoveExpr(
insertVIDList,
0, // dont add convert nodes
1, // work atp
insertTuppIndex,
tupleFormat,
insertRowLen,
&insertExpr,
&tupleDesc,
ExpTupleDesc::LONG_FORMAT,
NULL,
NULL,
0,
NULL,
NULL,
&colArray);
if (addDefaultValues)
{
expGen->addDefaultValues(insertVIDList,
(upsertColsWereSkipped ? colArray : getIndexDesc()->getAllColumns()),
tupleDesc);
}
ex_expr * loggingDataExpr = NULL;
ExpTupleDesc * loggingDataTupleDesc = NULL;
//--bulk load error logging
if (CmpCommon::getDefault(TRAF_LOAD_LOG_ERROR_ROWS) == DF_ON) {
CmpContext *cmpContext = generator->currentCmpContext();
ValueIdList loggingDataVids;
for (CollIndex i = 0; i < insertVIDList.entries(); i++)
{
ItemExpr &inputExpr = *(insertVIDList[i].getItemExpr());
const NAType &formalType = insertVIDList[i].getType();
ItemExpr *lmExpr = NULL;
ItemExpr *lmExpr2 = NULL;
int res;
lmExpr = &inputExpr;
res = CreateAllCharsExpr(formalType, // [IN] Child output type
*lmExpr, // [IN] Actual input value
cmpContext, // [IN] Compilation context
lmExpr2 // [OUT] Returned expression
);
GenAssert(res == 0 && lmExpr != NULL,
"Error building expression tree for LM child Input value");
if (lmExpr2)
{
lmExpr2->bindNode(generator->getBindWA());
loggingDataVids.insert(lmExpr2->getValueId());
}
} // for (i = 0; i < insertVIDList.entries(); i++)
if (loggingDataVids.entries()>0)
{
expGen->generateContiguousMoveExpr (
loggingDataVids, // [IN] source ValueIds
FALSE, // [IN] add convert nodes?
1, // [IN] target atp number (work atp 1)
loggingTuppIndex, // [IN] target tupp index
tupleFormat, // [IN] target tuple data format
loggingRowLen, // [OUT] target tuple length
&loggingDataExpr, // [OUT] move expression
&loggingDataTupleDesc, // [optional OUT] target tuple desc
ExpTupleDesc::LONG_FORMAT // [optional IN] target desc format
);
}
// Add the tuple descriptor for request values to the work ATP
workCriDesc->setTupleDescriptor(loggingTuppIndex, loggingDataTupleDesc);
}
////////////
// If constraints are present, generate constraint expression.
// Only works for base tables because the constraint information is
// stored with the table descriptor which doesn't exist for indexes.
//
ex_expr * constraintExpr = NULL;
Queue * listOfUpdatedColNames = NULL;
Lng32 keyAttrPos = -1;
ex_expr * rowIdExpr = NULL;
ULng32 rowIdLen = 0;
ValueIdList savedInputVIDlist;
NAList<Attributes*> savedInputAttrsList;
const ValueIdList &indexVIDlist = getIndexDesc()->getIndexColumns();
CollIndex jj = 0;
for (CollIndex ii = 0; ii < newRecExprArray().entries(); ii++)
{
const ItemExpr *assignExpr = newRecExprArray()[ii].getItemExpr();
const ValueId &tgtValueId = assignExpr->child(0)->castToItemExpr()->getValueId();
const ValueId &indexValueId = indexVIDlist[ii];
col = tgtValueId.getNAColumn( TRUE );
if ((isUpsert()) &&
(NOT ((Assign*)assignExpr)->isUserSpecified()) &&
(NOT col->isSystemColumn()) &&
(NOT col->isIdentityColumn()))
{
continue;
}
ValueId &srcValId = insertVIDList[jj];
jj++;
Attributes * colAttr = (generator->addMapInfo(tgtValueId, 0))->getAttr();
Attributes * indexAttr = (generator->addMapInfo(indexValueId, 0))->getAttr();
Attributes * castAttr = (generator->getMapInfo(srcValId, 0))->getAttr();
colAttr->copyLocationAttrs(castAttr);
indexAttr->copyLocationAttrs(castAttr);
// if any of the target column is also an input value to this operator, then
// make the value id of that input point to the location of the target column.
// This is done as the input column value will become the target after this
// insert expr is evaluated.
// This is done if this value will be part of an expression that need to
// be evaluated on the updated columns.
const ValueIdSet& inputSet = getGroupAttr()->getCharacteristicInputs();
ValueId inputValId;
if ((inputSet.entries() > 0) &&
(getIndexDesc()->isClusteringIndex() && getCheckConstraints().entries()))
{
NAColumn *inputCol = NULL;
NABoolean found = FALSE;
for (inputValId = inputSet.init();
((NOT found) && (inputSet.next(inputValId)));
inputSet.advance(inputValId) )
{
if ((inputValId.getItemExpr()->getOperatorType() != ITM_BASECOLUMN) &&
(inputValId.getItemExpr()->getOperatorType() != ITM_INDEXCOLUMN))
{
continue;
}
if (inputValId.getItemExpr()->getOperatorType() == ITM_BASECOLUMN)
{
inputCol = ((BaseColumn*)inputValId.getItemExpr())->getNAColumn();
}
else
{
inputCol = ((IndexColumn*)inputValId.getItemExpr())->getNAColumn();
}
if ((col->getColName() == inputCol->getColName()) &&
(col->getHbaseColFam() == inputCol->getHbaseColFam()) &&
(col->getHbaseColQual() == inputCol->getHbaseColQual()) &&
(col->getNATable()->getTableName().getQualifiedNameAsAnsiString() ==
inputCol->getNATable()->getTableName().getQualifiedNameAsAnsiString()))
{
found = TRUE;
break;
}
} // for
if (found)
{
Attributes * inputValAttr = (generator->addMapInfo(inputValId, 0))->getAttr();
// save original location attributes. These will be restored back once
// constr expr has been generated.
Attributes * savedValAttr = new(generator->wHeap()) Attributes();
savedValAttr->copyLocationAttrs(inputValAttr);
savedInputAttrsList.insert(savedValAttr);
savedInputVIDlist.insert(inputValId);
inputValAttr->copyLocationAttrs(castAttr);
}
} // if
}
ULng32 f;
expGen->generateKeyEncodeExpr(
getIndexDesc(), // describes the columns
work_atp, // work Atp
rowIdTuppIndex, // work Atp entry
ExpTupleDesc::SQLMX_KEY_FORMAT, // Tuple format
rowIdLen, // Key length
&rowIdExpr, // Encode expression
FALSE,
f,
NULL,
TRUE); // handle serialization
if (getIndexDesc()->isClusteringIndex() && getCheckConstraints().entries())
{
ItemExpr *constrTree =
getCheckConstraints().rebuildExprTree(ITM_AND, TRUE, TRUE);
if (getTableDesc()->getNATable()->hasSerializedColumn())
constrTree = generator->addCompDecodeForDerialization(constrTree);
expGen->generateExpr(constrTree->getValueId(), ex_expr::exp_SCAN_PRED,
&constraintExpr);
// restore original attribute values
for (Lng32 i = 0; i < savedInputVIDlist.entries(); i++)
{
ValueId inputValId = savedInputVIDlist[i];
Attributes * inputValAttr = (generator->getMapInfo(inputValId, 0))->getAttr();
inputValAttr->copyLocationAttrs(savedInputAttrsList[i]);
}
}
listOfUpdatedColNames = new(space) Queue(space);
std::vector<NAString> columNamesVec;
if (NOT naTable->isSQLMXAlignedTable())
{
for (CollIndex c = 0; c < colArray.entries(); c++)
{
const NAColumn * nac = colArray[c];
NAString cnInList;
HbaseAccess::createHbaseColId(nac, cnInList,
(getIndexDesc()->getNAFileSet()->getKeytag() != 0));
if (this->getIsTrafLoadPrep())
{
UInt32 pos = (UInt32)c +1;
cnInList.prepend((char*)&pos, sizeof(UInt32));
columNamesVec.push_back(cnInList);
}
else
{
char * colNameInList =
space->AllocateAndCopyToAlignedSpace(cnInList, 0);
listOfUpdatedColNames->insert(colNameInList);
}
}
if (getIsTrafLoadPrep())
{
std::sort(columNamesVec.begin(), columNamesVec.end(),compHBaseQualif);
for (std::vector<NAString>::iterator it = columNamesVec.begin() ; it != columNamesVec.end(); ++it)
{
NAString cnInList2 = *it;
char * colNameInList =
space->AllocateAndCopyToAlignedSpace(cnInList2, 0);
listOfUpdatedColNames->insert(colNameInList);
}
}
}
else
{
NAString cnInList(SEABASE_DEFAULT_COL_FAMILY);
cnInList += ":";
unsigned char c = 1;
cnInList.append((char*)&c, 1);
short len = cnInList.length();
cnInList.prepend((char*)&len, sizeof(short));
char * colNameInList =
space->AllocateAndCopyToAlignedSpace(cnInList, 0);
listOfUpdatedColNames->insert(colNameInList);
}
// Assign attributes to the ASSIGN nodes of the newRecExpArray()
// This is not the same as the generateContiguousMoveExpr() call
// above since different valueId's are added to the mapTable.
//
ULng32 tempInsertRowLen = 0;
ExpTupleDesc * tempTupleDesc = 0;
expGen->processValIdList(newRecExprArray(),
tupleFormat,
tempInsertRowLen,
0,
returnRowTuppIndex, // insertTuppIndex,
&tempTupleDesc,
ExpTupleDesc::LONG_FORMAT,
0, NULL, &colArray);
// Add the inserted tuple descriptor to the work cri descriptor.
//
if (workCriDesc)
workCriDesc->setTupleDescriptor(insertTuppIndex, tupleDesc);
ex_expr * projExpr = NULL;
ULng32 projRowLen = 0;
ExpTupleDesc * projRowTupleDesc = 0;
if (returnRow)
{
if (getTableDesc()->getNATable()->hasSerializedColumn())
{
ValueIdList deserColVIDList;
// if serialized columns are present, then create a new row with
// deserialized columns before returning it.
expGen->generateDeserializedMoveExpr
(returnRowVIDList,
0,//work_atp,
returnRowTuppIndex, //projRowTuppIndex,
generator->getInternalFormat(),
projRowLen,
&projExpr,
&projRowTupleDesc,
ExpTupleDesc::SHORT_FORMAT,
deserColVIDList);
workCriDesc->setTupleDescriptor(projRowTuppIndex, projRowTupleDesc);
// make the location of returnRowVIDlist point to the newly generated values.
for (CollIndex ii = 0; ii < returnRowVIDList.entries(); ii++)
{
const ValueId &retColVID = returnRowVIDList[ii];
const ValueId &deserColVID = deserColVIDList[ii];
Attributes * retColAttr = (generator->getMapInfo(retColVID, 0))->getAttr();
Attributes * deserColAttr = (generator->addMapInfo(deserColVID, 0))->getAttr();
retColAttr->copyLocationAttrs(deserColAttr);
}
expGen->assignAtpAndAtpIndex(returnRowVIDList,
0, returnRowTuppIndex);
}
else
{
expGen->processValIdList(returnRowVIDList,
tupleFormat,
tempInsertRowLen,
0,
returnRowTuppIndex);
}
}
ComTdbDp2Oper::SqlTableType stt = ComTdbDp2Oper::NOOP_;
if (getIndexDesc()->getNAFileSet()->isKeySequenced())
{
const NAColumnArray & column_array = getIndexDesc()->getAllColumns();
if ((column_array[0]->isSyskeyColumn()) &&
(column_array[0]->getType()->getNominalSize() >= 4)) {
stt = ComTdbDp2Oper::KEY_SEQ_WITH_SYSKEY_;
}
else
stt = ComTdbDp2Oper::KEY_SEQ_;
}
Cardinality expectedRows = (Cardinality) getEstRowsUsed().getValue();
ULng32 buffersize = getDefault(GEN_DP2I_BUFFER_SIZE);
buffersize = MAXOF(3*insertRowLen, buffersize);
queue_index upqueuelength = (queue_index)getDefault(GEN_DP2I_SIZE_UP);
queue_index downqueuelength = (queue_index)getDefault(GEN_DP2I_SIZE_DOWN);
Int32 numBuffers = getDefault(GEN_DP2I_NUM_BUFFERS);
if (getInsertType() == Insert::VSBB_INSERT_USER)
downqueuelength = 400;
char * tablename = NULL;
if ((getTableDesc()->getNATable()->isHbaseRowTable()) ||
(getTableDesc()->getNATable()->isHbaseCellTable()))
{
tablename = space->AllocateAndCopyToAlignedSpace(GenGetQualifiedName(getIndexDesc()->getIndexName().getObjectName()), 0);
}
else
{
tablename = space->AllocateAndCopyToAlignedSpace(
GenGetQualifiedName(getIndexDesc()->getIndexName()), 0);
}
NAString serverNAS = ActiveSchemaDB()->getDefaults().getValue(HBASE_SERVER);
NAString zkPortNAS = ActiveSchemaDB()->getDefaults().getValue(HBASE_ZOOKEEPER_PORT);
char * server = space->allocateAlignedSpace(serverNAS.length() + 1);
strcpy(server, serverNAS.data());
char * zkPort = space->allocateAlignedSpace(zkPortNAS.length() + 1);
strcpy(zkPort, zkPortNAS.data());
ComTdbHbaseAccess::HbasePerfAttributes * hbpa =
new(space) ComTdbHbaseAccess::HbasePerfAttributes();
ComTdbHbaseAccess::ComTdbAccessType t;
if (isUpsert())
{
if (getInsertType() == Insert::UPSERT_LOAD)
t = ComTdbHbaseAccess::UPSERT_LOAD_;
else
t = ComTdbHbaseAccess::UPSERT_;
}
else
t = ComTdbHbaseAccess::INSERT_;
// create hdfsscan_tdb
ComTdbHbaseAccess *hbasescan_tdb = new(space)
ComTdbHbaseAccess(
t,
tablename,
insertExpr,
constraintExpr,
rowIdExpr,
loggingDataExpr, // logging expr
NULL, // mergeInsertExpr
NULL, // mergeInsertRowIdExpr
NULL, // mergeUpdScanExpr
NULL, // projExpr
projExpr, // returnedUpdatedExpr
NULL, // returnMergeUpdateExpr
NULL, // encodedKeyExpr,
NULL, // keyColValExpr
NULL, // hbaseFilterValExpr
0, //asciiRowLen,
insertRowLen,
loggingRowLen, //loggingRowLen
0, // mergeInsertRowLen
0, // fetchedRowLen
projRowLen, // returnedUpdatedRowLen
rowIdLen,
0, //outputRowLen,
0, //rowIdAsciiRowLen
0, // keyLen
0, // keyColValLen
0, // hbaseFilterValRowLen
0, //asciiTuppIndex,
insertTuppIndex,
loggingTuppIndex, //loggingTuppIndex
0, // mergeInsertTuppIndex
0, // mergeInsertRowIdTuppIndex
0, // returnedFetchedTuppIndex
projRowTuppIndex, // returnedUpdatedTuppIndex
rowIdTuppIndex,
returnRowTuppIndex, //returnedDesc->noTuples()-1,
0, // rowIdAsciiTuppIndex
0, // keyTuppIndex
0, // keyColValTuppIndex
0, // hbaseFilterValTuppIndex
0, // hbaseTimestamp
0, // hbaseVersion
NULL,
NULL, //tdbListOfDelRows,
NULL,
listOfUpdatedColNames,
NULL,
NULL,
NULL,
workCriDesc,
givenDesc,
returnedDesc,
downqueuelength,
upqueuelength,
expectedRows,
numBuffers,
buffersize,
server,
zkPort,
hbpa
);
generator->initTdbFields(hbasescan_tdb);
if (getTableDesc()->getNATable()->isSeabaseTable())
{
hbasescan_tdb->setSQHbaseTable(TRUE);
if (naTable->isSQLMXAlignedTable())
hbasescan_tdb->setAlignedFormat(TRUE);
if (CmpCommon::getDefault(HBASE_SQL_IUD_SEMANTICS) == DF_ON)
hbasescan_tdb->setHbaseSqlIUD(TRUE);
if ((isUpsert()) ||
(noCheck()))
hbasescan_tdb->setHbaseSqlIUD(FALSE);
if ((getInsertType() == Insert::VSBB_INSERT_USER) ||
(getInsertType() == Insert::UPSERT_LOAD)) {
hbasescan_tdb->setVsbbInsert(TRUE);
hbasescan_tdb->setHbaseRowsetVsbbSize(getDefault(HBASE_ROWSET_VSBB_SIZE));
}
if ((isUpsert()) &&
(getInsertType() == Insert::UPSERT_LOAD))
{
// this will cause tupleflow operator to send in an EOD to this upsert
// operator. On seeing that, executor will flush the buffers.
generator->setVSBBInsert(TRUE);
}
//setting parametes for hbase bulk load integration
hbasescan_tdb->setIsTrafodionLoadPrep(this->getIsTrafLoadPrep());
if (hbasescan_tdb->getIsTrafodionLoadPrep())
{
NAString tlpTmpLocationNAS = ActiveSchemaDB()->getDefaults().getValue(TRAF_LOAD_PREP_TMP_LOCATION);
char * tlpTmpLocation = space->allocateAlignedSpace(tlpTmpLocationNAS.length() + 1);
strcpy(tlpTmpLocation, tlpTmpLocationNAS.data());
hbasescan_tdb->setLoadPrepLocation(tlpTmpLocation);
hbasescan_tdb->setNoDuplicates(CmpCommon::getDefault(TRAF_LOAD_PREP_SKIP_DUPLICATES) == DF_OFF);
hbasescan_tdb->setMaxHFileSize(CmpCommon::getDefaultLong(TRAF_LOAD_MAX_HFILE_SIZE));
// For sample file, set the sample location in HDFS and the sampling rate.
// Move later, when sampling not limited to bulk loads.
if (getCreateUstatSample())
{
NAString sampleLocationNAS = ActiveSchemaDB()->getDefaults().getValue(TRAF_SAMPLE_TABLE_LOCATION);
char * sampleLocation = space->allocateAlignedSpace(sampleLocationNAS.length() + 1);
strcpy(sampleLocation, sampleLocationNAS.data());
hbasescan_tdb->setSampleLocation(sampleLocation);
Int64 totalRows = (Int64)(getInputCardinality().getValue());
//printf("*** Incoming cardinality is " PF64 ".\n", totalRows);
Int64 sampleRows;
if (CmpCommon::getDefault(USTAT_USE_SLIDING_SAMPLE_RATIO) == DF_ON)
sampleRows = getDefaultSlidingSampleSize(totalRows);
else
sampleRows = getDefaultSampleSize(totalRows);
Float32 sampleRate = (Float32)sampleRows / (Float32)totalRows;
//printf("*** In HbaseInsert::codeGen(): Sample percentage is %.2f.\n", sampleRate);
hbasescan_tdb->setSamplingRate(sampleRate);
}
hbasescan_tdb->setContinueOnError(CmpCommon::getDefault(TRAF_LOAD_CONTINUE_ON_ERROR) == DF_ON);
hbasescan_tdb->setLogErrorRows(CmpCommon::getDefault(TRAF_LOAD_LOG_ERROR_ROWS) == DF_ON);
hbasescan_tdb->setMaxErrorRows((UInt32)CmpCommon::getDefaultNumeric(TRAF_LOAD_MAX_ERROR_ROWS));
NAString errCountRowIdNAS = CmpCommon::getDefaultString(TRAF_LOAD_ERROR_COUNT_ID);
char * errCountRowId = NULL;
if (errCountRowIdNAS.length() > 0)
{
errCountRowId = space->allocateAlignedSpace(errCountRowIdNAS.length() + 1);
strcpy(errCountRowId, errCountRowIdNAS.data());
hbasescan_tdb->setErrCountRowId(errCountRowId);
}
NAString errCountTabNAS = ActiveSchemaDB()->getDefaults().getValue(TRAF_LOAD_ERROR_COUNT_TABLE);
char * errCountTab = NULL;
if (errCountTabNAS.length() > 0)
{
errCountTab = space->allocateAlignedSpace(errCountTabNAS.length() + 1);
strcpy(errCountTab, errCountTabNAS.data());
hbasescan_tdb->setErrCountTab(errCountTab);
}
NAString loggingLocNAS = ActiveSchemaDB()->getDefaults().getValue(TRAF_LOAD_ERROR_LOGGING_LOCATION);
char * loggingLoc = NULL;
if (loggingLocNAS.length() > 0)
{
loggingLoc = space->allocateAlignedSpace(loggingLocNAS.length() + 1);
strcpy(loggingLoc, loggingLocNAS.data());
hbasescan_tdb->setLoggingLocation(loggingLoc);
}
}
// setting parameters for upsert statement// not related to the hbase bulk load intergration
NABoolean traf_upsert_adjust_params =
(CmpCommon::getDefault(TRAF_UPSERT_ADJUST_PARAMS) == DF_ON);
if (traf_upsert_adjust_params)
{
ULng32 wbSize = getDefault(TRAF_UPSERT_WB_SIZE);
NABoolean traf_auto_flush =
(CmpCommon::getDefault(TRAF_UPSERT_AUTO_FLUSH) == DF_ON);
NABoolean traf_write_toWAL =
(CmpCommon::getDefault(TRAF_UPSERT_WRITE_TO_WAL) == DF_ON);
hbasescan_tdb->setTrafWriteToWAL(traf_write_toWAL);
hbasescan_tdb->setCanAdjustTrafParams(true);
hbasescan_tdb->setWBSize(wbSize);
hbasescan_tdb->setIsTrafAutoFlush(traf_auto_flush);
}
if (getTableDesc()->getNATable()->isEnabledForDDLQI())
generator->objectUids().insert(
getTableDesc()->getNATable()->objectUid().get_value());
}
else
{
if (getTableDesc()->getNATable()->isHbaseRowTable()) //rowwiseHbaseFormat())
hbasescan_tdb->setRowwiseFormat(TRUE);
}
if (returnRow)
hbasescan_tdb->setReturnRow(TRUE);
if (rowsAffected() != GenericUpdate::DO_NOT_COMPUTE_ROWSAFFECTED)
hbasescan_tdb->setComputeRowsAffected(TRUE);
if (stt == ComTdbDp2Oper::KEY_SEQ_WITH_SYSKEY_)
hbasescan_tdb->setAddSyskeyTS(TRUE);
if(!generator->explainDisabled()) {
generator->setExplainTuple(
addExplainInfo(hbasescan_tdb, 0, 0, generator));
}
if ((generator->computeStats()) &&
(generator->collectStatsType() == ComTdb::PERTABLE_STATS
|| generator->collectStatsType() == ComTdb::OPERATOR_STATS))
{
hbasescan_tdb->setPertableStatsTdbId((UInt16)generator->
getPertableStatsTdbId());
}
if (generator->isTransactionNeeded())
setTransactionRequired(generator);
else if (noDTMxn())
hbasescan_tdb->setUseHbaseXn(TRUE);
generator->setFoundAnUpdate(TRUE);
generator->setCriDesc(givenDesc, Generator::DOWN);
generator->setCriDesc(returnedDesc, Generator::UP);
generator->setGenObj(this, hbasescan_tdb);
return 0;
}
| 1 | 6,905 | Does upsert pass this check also? | apache-trafodion | cpp |
@@ -43,6 +43,9 @@ func (r *helper) Patch(name types.NamespacedName, kind, apiVersion string, patch
WithField("stderr", ioStreams.ErrOut.(*bytes.Buffer).String()).Warn("running the patch command failed")
return err
}
+ r.logger.
+ WithField("stdout", ioStreams.Out.(*bytes.Buffer).String()).
+ WithField("stderr", ioStreams.ErrOut.(*bytes.Buffer).String()).Info("patch command successful")
return nil
}
| 1 | package resource
import (
"bytes"
"fmt"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"k8s.io/cli-runtime/pkg/genericclioptions"
kcmdpatch "k8s.io/kubectl/pkg/cmd/patch"
cmdutil "k8s.io/kubectl/pkg/cmd/util"
)
var (
patchTypes = map[string]types.PatchType{
"json": types.JSONPatchType,
"merge": types.MergePatchType,
"strategic": types.StrategicMergePatchType,
}
)
// Patch invokes the kubectl patch command with the given resource, patch and patch type
func (r *helper) Patch(name types.NamespacedName, kind, apiVersion string, patch []byte, patchType string) error {
ioStreams := genericclioptions.IOStreams{
In: &bytes.Buffer{},
Out: &bytes.Buffer{},
ErrOut: &bytes.Buffer{},
}
factory, err := r.getFactory(name.Namespace)
if err != nil {
return err
}
patchOptions, err := r.setupPatchCommand(name.Name, kind, apiVersion, patchType, factory, string(patch), ioStreams)
if err != nil {
r.logger.WithError(err).Error("failed to setup patch command")
return err
}
err = patchOptions.RunPatch()
if err != nil {
r.logger.WithError(err).
WithField("stdout", ioStreams.Out.(*bytes.Buffer).String()).
WithField("stderr", ioStreams.ErrOut.(*bytes.Buffer).String()).Warn("running the patch command failed")
return err
}
return nil
}
func (r *helper) setupPatchCommand(name, kind, apiVersion, patchType string, f cmdutil.Factory, patch string, ioStreams genericclioptions.IOStreams) (*kcmdpatch.PatchOptions, error) {
cmd := kcmdpatch.NewCmdPatch(f, ioStreams)
cmd.Flags().Parse([]string{})
gv, err := schema.ParseGroupVersion(apiVersion)
if err != nil {
r.logger.WithError(err).WithField("groupVersion", apiVersion).Error("cannot parse group version")
return nil, err
}
args := []string{fmt.Sprintf("%s.%s.%s/%s", kind, gv.Version, gv.Group, name)}
o := kcmdpatch.NewPatchOptions(ioStreams)
o.Complete(f, cmd, args)
if patchType == "" {
patchType = "strategic"
}
_, ok := patchTypes[patchType]
if !ok {
return nil, fmt.Errorf("Invalid patch type: %s. Valid patch types are 'strategic', 'merge' or 'json'", patchType)
}
o.PatchType = patchType
o.Patch = patch
return o, nil
}
| 1 | 15,594 | Apologies reviewers, I did end up pushing another change. I was starting to request SRE-P help to get the apiserver configs from some clusters before and after when I realized I can just log the stdout from the patch command to see if anything was changed or not. Much simpler to verify if my work did or did not make changes to the config during the transition. Also got the spacing issue you mentioned @abhinavdahiya Thanks all. | openshift-hive | go |
@@ -23,6 +23,7 @@ import (
const (
kbfsRepoDir = ".kbfs_git"
kbfsConfigName = "kbfs_config"
+ kbfsConfigNameTemp = "._kbfs_config"
gitSuffixToIgnore = ".git"
kbfsDeletedReposDir = ".kbfs_deleted_repos"
) | 1 | // Copyright 2017 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libgit
import (
"context"
"fmt"
"io/ioutil"
"os"
"path"
"regexp"
"strings"
"github.com/keybase/client/go/libkb"
"github.com/keybase/client/go/protocol/keybase1"
"github.com/keybase/kbfs/libfs"
"github.com/keybase/kbfs/libkbfs"
"github.com/pkg/errors"
)
const (
kbfsRepoDir = ".kbfs_git"
kbfsConfigName = "kbfs_config"
gitSuffixToIgnore = ".git"
kbfsDeletedReposDir = ".kbfs_deleted_repos"
)
// This character set is what Github supports in repo names. It's
// probably to avoid any problems when cloning onto filesystems that
// have different Unicode decompression schemes
// (https://en.wikipedia.org/wiki/Unicode_equivalence). There's no
// internal reason to be so restrictive, but it probably makes sense
// to start off more restrictive and then relax things later as we
// test.
var repoNameRE = regexp.MustCompile(`^([a-zA-Z0-9][a-zA-Z0-9_\.-]*)$`)
func checkValidRepoName(repoName string, config libkbfs.Config) bool {
return len(repoName) >= 1 &&
uint32(len(repoName)) <= config.MaxNameBytes() &&
(os.Getenv("KBFS_GIT_REPONAME_SKIP_CHECK") != "" ||
repoNameRE.MatchString(repoName))
}
// UpdateRepoMD lets the Keybase service know that a repo's MD has
// been updated.
func UpdateRepoMD(ctx context.Context, config libkbfs.Config,
tlfHandle *libkbfs.TlfHandle, fs *libfs.FS) error {
folder := tlfHandle.ToFavorite().ToKBFolder(false)
// Get the user-formatted repo name.
f, err := fs.Open(kbfsConfigName)
if err != nil {
return err
}
defer f.Close()
buf, err := ioutil.ReadAll(f)
if err != nil {
return err
}
c, err := configFromBytes(buf)
if err != nil {
return err
}
log := config.MakeLogger("")
log.CDebugf(ctx, "Putting git MD update")
err = config.KBPKI().PutGitMetadata(
ctx, folder, keybase1.RepoID(c.ID.String()),
keybase1.GitRepoName(c.Name))
if err != nil {
// Just log the put error, it shouldn't block the success of
// the overall git operation.
log.CDebugf(ctx, "Failed to put git metadata: %+v", err)
}
return nil
}
func createNewRepoAndID(
ctx context.Context, config libkbfs.Config, tlfHandle *libkbfs.TlfHandle,
repoName string, fs *libfs.FS) (ID, error) {
// TODO: take a global repo lock here to make sure only one
// client generates the repo ID.
repoID, err := makeRandomID()
if err != nil {
return NullID, err
}
config.MakeLogger("").CDebugf(ctx,
"Creating a new repo %s in %s: repoID=%s",
repoName, tlfHandle.GetCanonicalPath(), repoID)
session, err := config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return NullID, err
}
c := &Config{
ID: repoID,
Name: repoName,
CreatorUID: session.UID.String(),
Ctime: config.Clock().Now().UnixNano(),
}
buf, err := c.toBytes()
if err != nil {
return NullID, err
}
f, err := fs.Create(kbfsConfigName)
if err != nil {
return NullID, err
}
defer f.Close()
_, err = f.Write(buf)
if err != nil {
return NullID, err
}
err = UpdateRepoMD(ctx, config, tlfHandle, fs)
if err != nil {
return NullID, err
}
return repoID, nil
}
func normalizeRepoName(repoName string) string {
return strings.TrimSuffix(strings.ToLower(repoName), gitSuffixToIgnore)
}
func lookupOrCreateDir(ctx context.Context, config libkbfs.Config,
n libkbfs.Node, name string) (libkbfs.Node, error) {
newNode, _, err := config.KBFSOps().Lookup(ctx, n, name)
switch errors.Cause(err).(type) {
case libkbfs.NoSuchNameError:
newNode, _, err = config.KBFSOps().CreateDir(ctx, n, name)
if err != nil {
return nil, err
}
case nil:
default:
return nil, err
}
return newNode, nil
}
func getOrCreateRepoAndID(
ctx context.Context, config libkbfs.Config, tlfHandle *libkbfs.TlfHandle,
repoName string, uniqID string, createOnly bool) (*libfs.FS, ID, error) {
if !checkValidRepoName(repoName, config) {
return nil, NullID, errors.WithStack(libkb.InvalidRepoNameError{Name: repoName})
}
rootNode, _, err := config.KBFSOps().GetOrCreateRootNode(
ctx, tlfHandle, libkbfs.MasterBranch)
if err != nil {
return nil, NullID, err
}
normalizedRepoName := normalizeRepoName(repoName)
repoDir, err := lookupOrCreateDir(ctx, config, rootNode, kbfsRepoDir)
if err != nil {
return nil, NullID, err
}
_, err = lookupOrCreateDir(ctx, config, repoDir, normalizedRepoName)
if err != nil {
return nil, NullID, err
}
fs, err := libfs.NewFS(
ctx, config, tlfHandle, path.Join(kbfsRepoDir, normalizedRepoName),
uniqID)
if err != nil {
return nil, NullID, err
}
f, err := fs.Open(kbfsConfigName)
if err != nil && !os.IsNotExist(err) {
return nil, NullID, err
} else if os.IsNotExist(err) {
// Create a new repo ID.
repoID, err := createNewRepoAndID(ctx, config, tlfHandle, repoName, fs)
if err != nil {
return nil, NullID, err
}
return fs, repoID, nil
}
defer f.Close()
buf, err := ioutil.ReadAll(f)
if err != nil {
return nil, NullID, err
}
c, err := configFromBytes(buf)
if err != nil {
return nil, NullID, err
}
if createOnly {
// If this was already created, but we were expected to create
// it, then send back an error.
return nil, NullID, libkb.RepoAlreadyExistsError{
DesiredName: repoName,
ExistingName: c.Name,
ExistingID: c.ID.String(),
}
}
fs.SetLockNamespace(c.ID.Bytes())
return fs, c.ID, nil
}
// GetOrCreateRepoAndID returns a filesystem object rooted at the
// specified repo, along with the stable repo ID. If the repo hasn't
// been created yet, it generates a new ID and creates the repo. The
// caller is responsible for syncing the FS and flushing the journal,
// if desired.
func GetOrCreateRepoAndID(
ctx context.Context, config libkbfs.Config, tlfHandle *libkbfs.TlfHandle,
repoName string, uniqID string) (*libfs.FS, ID, error) {
return getOrCreateRepoAndID(
ctx, config, tlfHandle, repoName, uniqID, false)
}
// CreateRepoAndID returns a new stable repo ID for the provided
// repoName in the given TLF. If the repo has already been created,
// it returns a `RepoAlreadyExistsError`. The caller is responsible
// for syncing the FS and flushing the journal, if desired. It
// expects the `config` object to be unique during the lifetime of
// this call.
func CreateRepoAndID(
ctx context.Context, config libkbfs.Config, tlfHandle *libkbfs.TlfHandle,
repoName string) (ID, error) {
// Create a unique ID using the verifying key and the `config`
// object, which should be unique to each call in practice.
session, err := config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return NullID, err
}
uniqID := fmt.Sprintf("%s-%p", session.VerifyingKey.String(), config)
fs, id, err := getOrCreateRepoAndID(
ctx, config, tlfHandle, repoName, uniqID, true)
if err != nil {
return NullID, err
}
err = fs.SyncAll()
if err != nil {
return NullID, err
}
return id, err
}
// DeleteRepo "deletes" the given repo in the given TLF. Right now it
// simply moves the repo out of the way to a special directory, to
// allow any concurrent writers to finish their pushes without
// triggering conflict resolution. The caller is responsible for
// syncing the FS and flushing the journal, if desired. It expects
// the `config` object to be unique during the lifetime of this call.
func DeleteRepo(
ctx context.Context, config libkbfs.Config, tlfHandle *libkbfs.TlfHandle,
repoName string) error {
// Create a unique ID using the verifying key and the `config`
// object, which should be unique to each call in practice.
session, err := config.KBPKI().GetCurrentSession(ctx)
if err != nil {
return err
}
kbfsOps := config.KBFSOps()
rootNode, _, err := kbfsOps.GetOrCreateRootNode(
ctx, tlfHandle, libkbfs.MasterBranch)
if err != nil {
return err
}
normalizedRepoName := normalizeRepoName(repoName)
repoNode, _, err := kbfsOps.Lookup(ctx, rootNode, kbfsRepoDir)
if err != nil {
return err
}
_, _, err = kbfsOps.Lookup(ctx, repoNode, normalizedRepoName)
if err != nil {
return err
}
deletedReposNode, err := lookupOrCreateDir(
ctx, config, repoNode, kbfsDeletedReposDir)
if err != nil {
return err
}
// For now, just rename the repo out of the way, using the device
// ID and the current time in nanoseconds to make uniqueness
// probable. TODO(KBFS-2442): periodically delete old-enough
// repos from `kbfsDeletedReposDir`.
dirSuffix := fmt.Sprintf(
"%s-%d", session.VerifyingKey.String(), config.Clock().Now().UnixNano())
return kbfsOps.Rename(
ctx, repoNode, normalizedRepoName, deletedReposNode,
normalizedRepoName+dirSuffix)
}
| 1 | 17,978 | I'm not sure this naming is a good idea; that's the format for macOS xattr metadata files on unsupported filesystems. | keybase-kbfs | go |
@@ -1,4 +1,4 @@
-class AddIndexOnSectionIdAndTeacherIdToSectionTeachers < ActiveRecord::Migration
+class AddIndexOnSectionIdAndTeacherIdToSectionTeachers < ActiveRecord::Migration[4.2]
def up
change_table :section_teachers do |t|
t.remove_index :section_id | 1 | class AddIndexOnSectionIdAndTeacherIdToSectionTeachers < ActiveRecord::Migration
def up
change_table :section_teachers do |t|
t.remove_index :section_id
t.remove_index :teacher_id
t.index [:section_id, :teacher_id], unique: true
end
end
def down
change_table :section_teachers do |t|
t.remove_index [:section_id, :teacher_id]
t.index :section_id
t.index :teacher_id
end
end
end
| 1 | 18,646 | Metrics/LineLength: Line is too long. [85/80] | thoughtbot-upcase | rb |
@@ -19,12 +19,13 @@
package org.apache.iceberg.flink;
-
import java.util.List;
import org.apache.flink.table.api.SqlParserException;
import org.apache.iceberg.AssertHelpers;
import org.apache.iceberg.FileFormat;
import org.apache.iceberg.catalog.Namespace;
+import org.apache.iceberg.events.Listeners;
+import org.apache.iceberg.events.ScanEvent;
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
import org.junit.After;
import org.junit.Assert; | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.flink;
import java.util.List;
import org.apache.flink.table.api.SqlParserException;
import org.apache.iceberg.AssertHelpers;
import org.apache.iceberg.FileFormat;
import org.apache.iceberg.catalog.Namespace;
import org.apache.iceberg.relocated.com.google.common.collect.Lists;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
@RunWith(Parameterized.class)
public class TestFlinkTableSource extends FlinkCatalogTestBase {
private static final String TABLE_NAME = "test_table";
private final FileFormat format;
@Parameterized.Parameters(name = "catalogName={0}, baseNamespace={1}, format={2}")
public static Iterable<Object[]> parameters() {
List<Object[]> parameters = Lists.newArrayList();
for (FileFormat format : new FileFormat[] {FileFormat.ORC, FileFormat.AVRO, FileFormat.PARQUET}) {
for (Object[] catalogParams : FlinkCatalogTestBase.parameters()) {
String catalogName = (String) catalogParams[0];
Namespace baseNamespace = (Namespace) catalogParams[1];
parameters.add(new Object[] {catalogName, baseNamespace, format});
}
}
return parameters;
}
public TestFlinkTableSource(String catalogName, Namespace baseNamespace, FileFormat format) {
super(catalogName, baseNamespace);
this.format = format;
}
@Before
public void before() {
super.before();
sql("CREATE DATABASE %s", flinkDatabase);
sql("USE CATALOG %s", catalogName);
sql("USE %s", DATABASE);
sql("CREATE TABLE %s (id INT, data VARCHAR) WITH ('write.format.default'='%s')", TABLE_NAME, format.name());
}
@After
public void clean() {
sql("DROP TABLE IF EXISTS %s.%s", flinkDatabase, TABLE_NAME);
sql("DROP DATABASE IF EXISTS %s", flinkDatabase);
super.clean();
}
@Test
public void testLimitPushDown() {
sql("INSERT INTO %s VALUES (1,'a'),(2,'b')", TABLE_NAME);
String querySql = String.format("SELECT * FROM %s LIMIT 1", TABLE_NAME);
String explain = getTableEnv().explainSql(querySql);
String expectedExplain = "LimitPushDown : 1";
Assert.assertTrue("explain should contains LimitPushDown", explain.contains(expectedExplain));
List<Object[]> result = sql(querySql);
Assert.assertEquals("should have 1 record", 1, result.size());
Assert.assertArrayEquals("Should produce the expected records", result.get(0), new Object[] {1, "a"});
AssertHelpers.assertThrows("Invalid limit number: -1 ", SqlParserException.class,
() -> sql("SELECT * FROM %s LIMIT -1", TABLE_NAME));
Assert.assertEquals("should have 0 record", 0, sql("SELECT * FROM %s LIMIT 0", TABLE_NAME).size());
String sqlLimitExceed = String.format("SELECT * FROM %s LIMIT 3", TABLE_NAME);
List<Object[]> resultExceed = sql(sqlLimitExceed);
Assert.assertEquals("should have 2 record", 2, resultExceed.size());
List<Object[]> expectedList = Lists.newArrayList();
expectedList.add(new Object[] {1, "a"});
expectedList.add(new Object[] {2, "b"});
Assert.assertArrayEquals("Should produce the expected records", resultExceed.toArray(), expectedList.toArray());
String sqlMixed = String.format("SELECT * FROM %s WHERE id = 1 LIMIT 2", TABLE_NAME);
List<Object[]> mixedResult = sql(sqlMixed);
Assert.assertEquals("should have 1 record", 1, mixedResult.size());
Assert.assertArrayEquals("Should produce the expected records", mixedResult.get(0), new Object[] {1, "a"});
}
}
| 1 | 30,033 | Please remove these imports. The project's style is to use `Assert.assertEquals` and not import static methods in general. This also caused a lot of unnecessary changes. | apache-iceberg | java |
@@ -112,7 +112,10 @@ static int http_post(struct flb_out_http *ctx,
payload_buf, payload_size,
ctx->host, ctx->port,
ctx->proxy, 0);
-
+ if (!c) {
+ flb_plg_error(ctx->ins, "[http_client] failed to create HTTP client");
+ return FLB_RETRY;
+ }
if (c->proxy.host) {
flb_plg_debug(ctx->ins, "[http_client] proxy host: %s port: %i", | 1 | /* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
/* Fluent Bit
* ==========
* Copyright (C) 2019-2021 The Fluent Bit Authors
* Copyright (C) 2015-2018 Treasure Data Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <fluent-bit/flb_output_plugin.h>
#include <fluent-bit/flb_output.h>
#include <fluent-bit/flb_http_client.h>
#include <fluent-bit/flb_pack.h>
#include <fluent-bit/flb_str.h>
#include <fluent-bit/flb_time.h>
#include <fluent-bit/flb_utils.h>
#include <fluent-bit/flb_pack.h>
#include <fluent-bit/flb_sds.h>
#include <fluent-bit/flb_gzip.h>
#include <msgpack.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <errno.h>
#include "http.h"
#include "http_conf.h"
#include <fluent-bit/flb_callback.h>
static int cb_http_init(struct flb_output_instance *ins,
struct flb_config *config, void *data)
{
struct flb_out_http *ctx = NULL;
(void) data;
ctx = flb_http_conf_create(ins, config);
if (!ctx) {
return -1;
}
/* Set the plugin context */
flb_output_set_context(ins, ctx);
/*
* This plugin instance uses the HTTP client interface, let's register
* it debugging callbacks.
*/
flb_output_set_http_debug_callbacks(ins);
return 0;
}
static int http_post(struct flb_out_http *ctx,
const void *body, size_t body_len,
const char *tag, int tag_len)
{
int ret;
int out_ret = FLB_OK;
int compressed = FLB_FALSE;
size_t b_sent;
void *payload_buf = NULL;
size_t payload_size = 0;
struct flb_upstream *u;
struct flb_upstream_conn *u_conn;
struct flb_http_client *c;
struct mk_list *head;
struct flb_config_map_val *mv;
struct flb_slist_entry *key = NULL;
struct flb_slist_entry *val = NULL;
/* Get upstream context and connection */
u = ctx->u;
u_conn = flb_upstream_conn_get(u);
if (!u_conn) {
flb_plg_error(ctx->ins, "no upstream connections available to %s:%i",
u->tcp_host, u->tcp_port);
return FLB_RETRY;
}
/* Map payload */
payload_buf = (void *) body;
payload_size = body_len;
/* Should we compress the payload ? */
if (ctx->compress_gzip == FLB_TRUE) {
ret = flb_gzip_compress((void *) body, body_len,
&payload_buf, &payload_size);
if (ret == -1) {
flb_plg_error(ctx->ins,
"cannot gzip payload, disabling compression");
}
else {
compressed = FLB_TRUE;
}
}
/* Create HTTP client context */
c = flb_http_client(u_conn, FLB_HTTP_POST, ctx->uri,
payload_buf, payload_size,
ctx->host, ctx->port,
ctx->proxy, 0);
if (c->proxy.host) {
flb_plg_debug(ctx->ins, "[http_client] proxy host: %s port: %i",
c->proxy.host, c->proxy.port);
}
/* Allow duplicated headers ? */
flb_http_allow_duplicated_headers(c, ctx->allow_dup_headers);
/*
* Direct assignment of the callback context to the HTTP client context.
* This needs to be improved through a more clean API.
*/
c->cb_ctx = ctx->ins->callback;
/* Append headers */
if ((ctx->out_format == FLB_PACK_JSON_FORMAT_JSON) ||
(ctx->out_format == FLB_PACK_JSON_FORMAT_STREAM) ||
(ctx->out_format == FLB_PACK_JSON_FORMAT_LINES) ||
(ctx->out_format == FLB_HTTP_OUT_GELF)) {
flb_http_add_header(c,
FLB_HTTP_CONTENT_TYPE,
sizeof(FLB_HTTP_CONTENT_TYPE) - 1,
FLB_HTTP_MIME_JSON,
sizeof(FLB_HTTP_MIME_JSON) - 1);
}
else {
flb_http_add_header(c,
FLB_HTTP_CONTENT_TYPE,
sizeof(FLB_HTTP_CONTENT_TYPE) - 1,
FLB_HTTP_MIME_MSGPACK,
sizeof(FLB_HTTP_MIME_MSGPACK) - 1);
}
if (ctx->header_tag) {
flb_http_add_header(c,
ctx->header_tag,
flb_sds_len(ctx->header_tag),
tag, tag_len);
}
/* Content Encoding: gzip */
if (compressed == FLB_TRUE) {
flb_http_set_content_encoding_gzip(c);
}
/* Basic Auth headers */
if (ctx->http_user && ctx->http_passwd) {
flb_http_basic_auth(c, ctx->http_user, ctx->http_passwd);
}
flb_http_add_header(c, "User-Agent", 10, "Fluent-Bit", 10);
flb_config_map_foreach(head, mv, ctx->headers) {
key = mk_list_entry_first(mv->val.list, struct flb_slist_entry, _head);
val = mk_list_entry_last(mv->val.list, struct flb_slist_entry, _head);
flb_http_add_header(c,
key->str, flb_sds_len(key->str),
val->str, flb_sds_len(val->str));
}
ret = flb_http_do(c, &b_sent);
if (ret == 0) {
/*
* Only allow the following HTTP status:
*
* - 200: OK
* - 201: Created
* - 202: Accepted
* - 203: no authorative resp
* - 204: No Content
* - 205: Reset content
*
*/
if (c->resp.status < 200 || c->resp.status > 205) {
if (ctx->log_response_payload &&
c->resp.payload && c->resp.payload_size > 0) {
flb_plg_error(ctx->ins, "%s:%i, HTTP status=%i\n%s",
ctx->host, ctx->port,
c->resp.status, c->resp.payload);
}
else {
flb_plg_error(ctx->ins, "%s:%i, HTTP status=%i",
ctx->host, ctx->port, c->resp.status);
}
out_ret = FLB_RETRY;
}
else {
if (ctx->log_response_payload &&
c->resp.payload && c->resp.payload_size > 0) {
flb_plg_info(ctx->ins, "%s:%i, HTTP status=%i\n%s",
ctx->host, ctx->port,
c->resp.status, c->resp.payload);
}
else {
flb_plg_info(ctx->ins, "%s:%i, HTTP status=%i",
ctx->host, ctx->port,
c->resp.status);
}
}
}
else {
flb_plg_error(ctx->ins, "could not flush records to %s:%i (http_do=%i)",
ctx->host, ctx->port, ret);
out_ret = FLB_RETRY;
}
/*
* If the payload buffer is different than incoming records in body, means
* we generated a different payload and must be freed.
*/
if (payload_buf != body) {
flb_free(payload_buf);
}
/* Destroy HTTP client context */
flb_http_client_destroy(c);
/* Release the TCP connection */
flb_upstream_conn_release(u_conn);
return out_ret;
}
static int http_gelf(struct flb_out_http *ctx,
const char *data, uint64_t bytes,
const char *tag, int tag_len)
{
flb_sds_t s;
flb_sds_t tmp = NULL;
msgpack_unpacked result;
size_t off = 0;
size_t size = 0;
msgpack_object root;
msgpack_object map;
msgpack_object *obj;
struct flb_time tm;
int ret;
size = bytes * 1.5;
/* Allocate buffer for our new payload */
s = flb_sds_create_size(size);
if (!s) {
return FLB_RETRY;
}
msgpack_unpacked_init(&result);
while (msgpack_unpack_next(&result, data, bytes, &off) ==
MSGPACK_UNPACK_SUCCESS) {
if (result.data.type != MSGPACK_OBJECT_ARRAY) {
continue;
}
root = result.data;
if (root.via.array.size != 2) {
continue;
}
flb_time_pop_from_msgpack(&tm, &result, &obj);
map = root.via.array.ptr[1];
tmp = flb_msgpack_to_gelf(&s, &map, &tm, &(ctx->gelf_fields));
if (!tmp) {
flb_plg_error(ctx->ins, "error encoding to GELF");
flb_sds_destroy(s);
msgpack_unpacked_destroy(&result);
return FLB_ERROR;
}
/* Append new line */
tmp = flb_sds_cat(s, "\n", 1);
if (!tmp) {
flb_plg_error(ctx->ins, "error concatenating records");
flb_sds_destroy(s);
msgpack_unpacked_destroy(&result);
return FLB_RETRY;
}
s = tmp;
}
ret = http_post(ctx, s, flb_sds_len(s), tag, tag_len);
flb_sds_destroy(s);
msgpack_unpacked_destroy(&result);
return ret;
}
static void cb_http_flush(const void *data, size_t bytes,
const char *tag, int tag_len,
struct flb_input_instance *i_ins,
void *out_context,
struct flb_config *config)
{
int ret = FLB_ERROR;
flb_sds_t json;
struct flb_out_http *ctx = out_context;
(void) i_ins;
if ((ctx->out_format == FLB_PACK_JSON_FORMAT_JSON) ||
(ctx->out_format == FLB_PACK_JSON_FORMAT_STREAM) ||
(ctx->out_format == FLB_PACK_JSON_FORMAT_LINES)) {
json = flb_pack_msgpack_to_json_format(data, bytes,
ctx->out_format,
ctx->json_date_format,
ctx->date_key);
if (json != NULL) {
ret = http_post(ctx, json, flb_sds_len(json), tag, tag_len);
flb_sds_destroy(json);
}
}
else if (ctx->out_format == FLB_HTTP_OUT_GELF) {
ret = http_gelf(ctx, data, bytes, tag, tag_len);
}
else {
ret = http_post(ctx, data, bytes, tag, tag_len);
}
FLB_OUTPUT_RETURN(ret);
}
static int cb_http_exit(void *data, struct flb_config *config)
{
struct flb_out_http *ctx = data;
flb_http_conf_destroy(ctx);
return 0;
}
/* Configuration properties map */
static struct flb_config_map config_map[] = {
{
FLB_CONFIG_MAP_STR, "proxy", NULL,
0, FLB_FALSE, 0,
"Specify an HTTP Proxy. The expected format of this value is http://host:port. "
},
{
FLB_CONFIG_MAP_BOOL, "allow_duplicated_headers", "true",
0, FLB_TRUE, offsetof(struct flb_out_http, allow_dup_headers),
"Specify if duplicated headers are allowed or not"
},
{
FLB_CONFIG_MAP_BOOL, "log_response_payload", "true",
0, FLB_TRUE, offsetof(struct flb_out_http, log_response_payload),
"Specify if the response paylod should be logged or not"
},
{
FLB_CONFIG_MAP_STR, "http_user", NULL,
0, FLB_TRUE, offsetof(struct flb_out_http, http_user),
"Set HTTP auth user"
},
{
FLB_CONFIG_MAP_STR, "http_passwd", "",
0, FLB_TRUE, offsetof(struct flb_out_http, http_passwd),
"Set HTTP auth password"
},
{
FLB_CONFIG_MAP_STR, "header_tag", NULL,
0, FLB_TRUE, offsetof(struct flb_out_http, header_tag),
"Set a HTTP header which value is the Tag"
},
{
FLB_CONFIG_MAP_STR, "format", NULL,
0, FLB_FALSE, 0,
"Set desired payload format: json, json_stream, json_lines, gelf or msgpack"
},
{
FLB_CONFIG_MAP_STR, "json_date_format", NULL,
0, FLB_FALSE, 0,
"Specify the format of the date. Supported formats are 'double' and 'iso8601'"
},
{
FLB_CONFIG_MAP_STR, "json_date_key", "date",
0, FLB_TRUE, offsetof(struct flb_out_http, json_date_key),
"Specify the name of the date field in output"
},
{
FLB_CONFIG_MAP_STR, "compress", NULL,
0, FLB_FALSE, 0,
"Set payload compression mechanism. Option available is 'gzip'"
},
{
FLB_CONFIG_MAP_SLIST_1, "header", NULL,
FLB_CONFIG_MAP_MULT, FLB_TRUE, offsetof(struct flb_out_http, headers),
"Add a HTTP header key/value pair. Multiple headers can be set"
},
{
FLB_CONFIG_MAP_STR, "uri", NULL,
0, FLB_TRUE, offsetof(struct flb_out_http, uri),
"Specify an optional HTTP URI for the target web server, e.g: /something"
},
/* Gelf Properties */
{
FLB_CONFIG_MAP_STR, "gelf_timestamp_key", NULL,
0, FLB_TRUE, offsetof(struct flb_out_http, gelf_fields.timestamp_key),
"Specify the key to use for 'timestamp' in gelf format"
},
{
FLB_CONFIG_MAP_STR, "gelf_host_key", NULL,
0, FLB_TRUE, offsetof(struct flb_out_http, gelf_fields.host_key),
"Specify the key to use for the 'host' in gelf format"
},
{
FLB_CONFIG_MAP_STR, "gelf_short_message_key", NULL,
0, FLB_TRUE, offsetof(struct flb_out_http, gelf_fields.short_message_key),
"Specify the key to use as the 'short' message in gelf format"
},
{
FLB_CONFIG_MAP_STR, "gelf_full_message_key", NULL,
0, FLB_TRUE, offsetof(struct flb_out_http, gelf_fields.full_message_key),
"Specify the key to use for the 'full' message in gelf format"
},
{
FLB_CONFIG_MAP_STR, "gelf_level_key", NULL,
0, FLB_TRUE, offsetof(struct flb_out_http, gelf_fields.level_key),
"Specify the key to use for the 'level' in gelf format"
},
/* EOF */
{0}
};
/* Plugin reference */
struct flb_output_plugin out_http_plugin = {
.name = "http",
.description = "HTTP Output",
.cb_init = cb_http_init,
.cb_pre_run = NULL,
.cb_flush = cb_http_flush,
.cb_exit = cb_http_exit,
.config_map = config_map,
.flags = FLB_OUTPUT_NET | FLB_IO_OPT_TLS,
};
| 1 | 14,724 | thanks. Note that returning at this point might leak memory from the allocations above, so the PR will need to take care of that too. | fluent-fluent-bit | c |
@@ -492,10 +492,13 @@ class MainWindow(QWidget):
@pyqtSlot(bool)
def _on_fullscreen_requested(self, on):
if on:
- self.state_before_fullscreen = self.windowState()
- self.showFullScreen()
+ self.window_state_before_fullscreen = self.windowState()
+ self.config_state_before_fullscreen = config.val.content.desktop_fullscreen
+ if config.val.content.desktop_fullscreen:
+ self.showFullScreen()
elif self.isFullScreen():
- self.setWindowState(self.state_before_fullscreen)
+ if config.val.content.desktop_fullscreen or self.config_state_before_fullscreen:
+ self.setWindowState(self.window_state_before_fullscreen)
log.misc.debug('on: {}, state before fullscreen: {}'.format(
on, debug.qflags_key(Qt, self.state_before_fullscreen)))
| 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2017 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""The main window of qutebrowser."""
import binascii
import base64
import itertools
import functools
from PyQt5.QtCore import pyqtSlot, QRect, QPoint, QTimer, Qt
from PyQt5.QtWidgets import QWidget, QVBoxLayout, QApplication, QSizePolicy
from qutebrowser.commands import runners, cmdutils
from qutebrowser.config import config, configfiles
from qutebrowser.utils import (message, log, usertypes, qtutils, objreg, utils,
jinja, debug)
from qutebrowser.mainwindow import messageview, prompt
from qutebrowser.completion import completionwidget, completer
from qutebrowser.keyinput import modeman
from qutebrowser.browser import (commands, downloadview, hints,
qtnetworkdownloads, downloads)
from qutebrowser.misc import crashsignal, keyhintwidget
win_id_gen = itertools.count(0)
def get_window(via_ipc, force_window=False, force_tab=False,
force_target=None, no_raise=False):
"""Helper function for app.py to get a window id.
Args:
via_ipc: Whether the request was made via IPC.
force_window: Whether to force opening in a window.
force_tab: Whether to force opening in a tab.
force_target: Override the new_instance_open_target config
no_raise: suppress target window raising
Return:
ID of a window that was used to open URL
"""
if force_window and force_tab:
raise ValueError("force_window and force_tab are mutually exclusive!")
if not via_ipc:
# Initial main window
return 0
open_target = config.val.new_instance_open_target
# Apply any target overrides, ordered by precedence
if force_target is not None:
open_target = force_target
if force_window:
open_target = 'window'
if force_tab and open_target == 'window':
# Command sent via IPC
open_target = 'tab-silent'
window = None
should_raise = False
# Try to find the existing tab target if opening in a tab
if open_target != 'window':
window = get_target_window()
should_raise = open_target not in ['tab-silent', 'tab-bg-silent']
# Otherwise, or if no window was found, create a new one
if window is None:
window = MainWindow(private=None)
window.show()
should_raise = True
if should_raise and not no_raise:
raise_window(window)
return window.win_id
def raise_window(window):
"""Raise the given MainWindow object."""
window.setWindowState(window.windowState() & ~Qt.WindowMinimized)
window.setWindowState(window.windowState() | Qt.WindowActive)
window.raise_()
window.activateWindow()
QApplication.instance().alert(window)
def get_target_window():
"""Get the target window for new tabs, or None if none exist."""
try:
win_mode = config.val.new_instance_open_target_window
if win_mode == 'last-focused':
return objreg.last_focused_window()
elif win_mode == 'first-opened':
return objreg.window_by_index(0)
elif win_mode == 'last-opened':
return objreg.window_by_index(-1)
elif win_mode == 'last-visible':
return objreg.last_visible_window()
else:
raise ValueError("Invalid win_mode {}".format(win_mode))
except objreg.NoWindow:
return None
class MainWindow(QWidget):
"""The main window of qutebrowser.
Adds all needed components to a vbox, initializes sub-widgets and connects
signals.
Attributes:
status: The StatusBar widget.
tabbed_browser: The TabbedBrowser widget.
state_before_fullscreen: window state before activation of fullscreen.
_downloadview: The DownloadView widget.
_vbox: The main QVBoxLayout.
_commandrunner: The main CommandRunner instance.
_overlays: Widgets shown as overlay for the current webpage.
_private: Whether the window is in private browsing mode.
"""
def __init__(self, *, private, geometry=None, parent=None):
"""Create a new main window.
Args:
geometry: The geometry to load, as a bytes-object (or None).
private: Whether the window is in private browsing mode.
parent: The parent the window should get.
"""
super().__init__(parent)
# Late import to avoid a circular dependency
# - browsertab -> hints -> webelem -> mainwindow -> bar -> browsertab
from qutebrowser.mainwindow import tabbedbrowser
from qutebrowser.mainwindow.statusbar import bar
self.setAttribute(Qt.WA_DeleteOnClose)
self._commandrunner = None
self._overlays = []
self.win_id = next(win_id_gen)
self.registry = objreg.ObjectRegistry()
objreg.window_registry[self.win_id] = self
objreg.register('main-window', self, scope='window',
window=self.win_id)
tab_registry = objreg.ObjectRegistry()
objreg.register('tab-registry', tab_registry, scope='window',
window=self.win_id)
message_bridge = message.MessageBridge(self)
objreg.register('message-bridge', message_bridge, scope='window',
window=self.win_id)
self.setWindowTitle('qutebrowser')
self._vbox = QVBoxLayout(self)
self._vbox.setContentsMargins(0, 0, 0, 0)
self._vbox.setSpacing(0)
self._init_downloadmanager()
self._downloadview = downloadview.DownloadView(self.win_id)
if config.val.content.private_browsing:
# This setting always trumps what's passed in.
private = True
else:
private = bool(private)
self._private = private
self.tabbed_browser = tabbedbrowser.TabbedBrowser(win_id=self.win_id,
private=private)
objreg.register('tabbed-browser', self.tabbed_browser, scope='window',
window=self.win_id)
self._init_command_dispatcher()
# We need to set an explicit parent for StatusBar because it does some
# show/hide magic immediately which would mean it'd show up as a
# window.
self.status = bar.StatusBar(win_id=self.win_id, private=private,
parent=self)
self._add_widgets()
self._downloadview.show()
self._init_completion()
log.init.debug("Initializing modes...")
modeman.init(self.win_id, self)
self._commandrunner = runners.CommandRunner(self.win_id,
partial_match=True)
self._keyhint = keyhintwidget.KeyHintView(self.win_id, self)
self._add_overlay(self._keyhint, self._keyhint.update_geometry)
self._prompt_container = prompt.PromptContainer(self.win_id, self)
self._add_overlay(self._prompt_container,
self._prompt_container.update_geometry,
centered=True, padding=10)
objreg.register('prompt-container', self._prompt_container,
scope='window', window=self.win_id)
self._prompt_container.hide()
self._messageview = messageview.MessageView(parent=self)
self._add_overlay(self._messageview, self._messageview.update_geometry)
self._init_geometry(geometry)
self._connect_signals()
# When we're here the statusbar might not even really exist yet, so
# resizing will fail. Therefore, we use singleShot QTimers to make sure
# we defer this until everything else is initialized.
QTimer.singleShot(0, self._connect_overlay_signals)
config.instance.changed.connect(self._on_config_changed)
objreg.get("app").new_window.emit(self)
self.state_before_fullscreen = self.windowState()
def _init_geometry(self, geometry):
"""Initialize the window geometry or load it from disk."""
if geometry is not None:
self._load_geometry(geometry)
elif self.win_id == 0:
self._load_state_geometry()
else:
self._set_default_geometry()
log.init.debug("Initial main window geometry: {}".format(
self.geometry()))
def _add_overlay(self, widget, signal, *, centered=False, padding=0):
self._overlays.append((widget, signal, centered, padding))
def _update_overlay_geometries(self):
"""Update the size/position of all overlays."""
for w, _signal, centered, padding in self._overlays:
self._update_overlay_geometry(w, centered, padding)
def _update_overlay_geometry(self, widget, centered, padding):
"""Reposition/resize the given overlay."""
if not widget.isVisible():
return
size_hint = widget.sizeHint()
if widget.sizePolicy().horizontalPolicy() == QSizePolicy.Expanding:
width = self.width() - 2 * padding
left = padding
else:
width = min(size_hint.width(), self.width() - 2 * padding)
left = (self.width() - width) / 2 if centered else 0
height_padding = 20
status_position = config.val.statusbar.position
if status_position == 'bottom':
if self.status.isVisible():
status_height = self.status.height()
bottom = self.status.geometry().top()
else:
status_height = 0
bottom = self.height()
top = self.height() - status_height - size_hint.height()
top = qtutils.check_overflow(top, 'int', fatal=False)
topleft = QPoint(left, max(height_padding, top))
bottomright = QPoint(left + width, bottom)
elif status_position == 'top':
if self.status.isVisible():
status_height = self.status.height()
top = self.status.geometry().bottom()
else:
status_height = 0
top = 0
topleft = QPoint(left, top)
bottom = status_height + size_hint.height()
bottom = qtutils.check_overflow(bottom, 'int', fatal=False)
bottomright = QPoint(left + width,
min(self.height() - height_padding, bottom))
else:
raise ValueError("Invalid position {}!".format(status_position))
rect = QRect(topleft, bottomright)
log.misc.debug('new geometry for {!r}: {}'.format(widget, rect))
if rect.isValid():
widget.setGeometry(rect)
def _init_downloadmanager(self):
log.init.debug("Initializing downloads...")
qtnetwork_download_manager = qtnetworkdownloads.DownloadManager(
self.win_id, self)
objreg.register('qtnetwork-download-manager',
qtnetwork_download_manager,
scope='window', window=self.win_id)
try:
webengine_download_manager = objreg.get(
'webengine-download-manager')
except KeyError:
webengine_download_manager = None
download_model = downloads.DownloadModel(qtnetwork_download_manager,
webengine_download_manager)
objreg.register('download-model', download_model, scope='window',
window=self.win_id)
def _init_completion(self):
self._completion = completionwidget.CompletionView(self.win_id, self)
cmd = objreg.get('status-command', scope='window', window=self.win_id)
completer_obj = completer.Completer(cmd, self._completion)
self._completion.selection_changed.connect(
completer_obj.on_selection_changed)
objreg.register('completion', self._completion, scope='window',
window=self.win_id)
self._add_overlay(self._completion, self._completion.update_geometry)
def _init_command_dispatcher(self):
dispatcher = commands.CommandDispatcher(self.win_id,
self.tabbed_browser)
objreg.register('command-dispatcher', dispatcher, scope='window',
window=self.win_id)
self.tabbed_browser.destroyed.connect(
functools.partial(objreg.delete, 'command-dispatcher',
scope='window', window=self.win_id))
def __repr__(self):
return utils.get_repr(self)
@pyqtSlot(str)
def _on_config_changed(self, option):
"""Resize the completion if related config options changed."""
if option == 'statusbar.padding':
self._update_overlay_geometries()
elif option == 'downloads.position':
self._add_widgets()
elif option == 'statusbar.position':
self._add_widgets()
self._update_overlay_geometries()
def _add_widgets(self):
"""Add or readd all widgets to the VBox."""
self._vbox.removeWidget(self.tabbed_browser)
self._vbox.removeWidget(self._downloadview)
self._vbox.removeWidget(self.status)
widgets = [self.tabbed_browser]
downloads_position = config.val.downloads.position
if downloads_position == 'top':
widgets.insert(0, self._downloadview)
elif downloads_position == 'bottom':
widgets.append(self._downloadview)
else:
raise ValueError("Invalid position {}!".format(downloads_position))
status_position = config.val.statusbar.position
if status_position == 'top':
widgets.insert(0, self.status)
elif status_position == 'bottom':
widgets.append(self.status)
else:
raise ValueError("Invalid position {}!".format(status_position))
for widget in widgets:
self._vbox.addWidget(widget)
def _load_state_geometry(self):
"""Load the geometry from the state file."""
try:
data = configfiles.state['geometry']['mainwindow']
geom = base64.b64decode(data, validate=True)
except KeyError:
# First start
self._set_default_geometry()
except binascii.Error:
log.init.exception("Error while reading geometry")
self._set_default_geometry()
else:
self._load_geometry(geom)
def _save_geometry(self):
"""Save the window geometry to the state config."""
data = bytes(self.saveGeometry())
geom = base64.b64encode(data).decode('ASCII')
configfiles.state['geometry']['mainwindow'] = geom
def _load_geometry(self, geom):
"""Load geometry from a bytes object.
If loading fails, loads default geometry.
"""
log.init.debug("Loading mainwindow from {!r}".format(geom))
ok = self.restoreGeometry(geom)
if not ok:
log.init.warning("Error while loading geometry.")
self._set_default_geometry()
def _connect_overlay_signals(self):
"""Connect the resize signal and resize everything once."""
for widget, signal, centered, padding in self._overlays:
signal.connect(
functools.partial(self._update_overlay_geometry, widget,
centered, padding))
self._update_overlay_geometry(widget, centered, padding)
def _set_default_geometry(self):
"""Set some sensible default geometry."""
self.setGeometry(QRect(50, 50, 800, 600))
def _get_object(self, name):
"""Get an object for this window in the object registry."""
return objreg.get(name, scope='window', window=self.win_id)
def _connect_signals(self):
"""Connect all mainwindow signals."""
status = self._get_object('statusbar')
keyparsers = self._get_object('keyparsers')
completion_obj = self._get_object('completion')
tabs = self._get_object('tabbed-browser')
cmd = self._get_object('status-command')
message_bridge = self._get_object('message-bridge')
mode_manager = self._get_object('mode-manager')
# misc
self.tabbed_browser.close_window.connect(self.close)
mode_manager.entered.connect(hints.on_mode_entered)
# status bar
mode_manager.entered.connect(status.on_mode_entered)
mode_manager.left.connect(status.on_mode_left)
mode_manager.left.connect(cmd.on_mode_left)
mode_manager.left.connect(message.global_bridge.mode_left)
# commands
keyparsers[usertypes.KeyMode.normal].keystring_updated.connect(
status.keystring.setText)
cmd.got_cmd[str].connect(self._commandrunner.run_safely)
cmd.got_cmd[str, int].connect(self._commandrunner.run_safely)
cmd.returnPressed.connect(tabs.on_cmd_return_pressed)
# key hint popup
for mode, parser in keyparsers.items():
parser.keystring_updated.connect(functools.partial(
self._keyhint.update_keyhint, mode.name))
# messages
message.global_bridge.show_message.connect(
self._messageview.show_message)
message.global_bridge.flush()
message.global_bridge.clear_messages.connect(
self._messageview.clear_messages)
message_bridge.s_set_text.connect(status.set_text)
message_bridge.s_maybe_reset_text.connect(status.txt.maybe_reset_text)
# statusbar
tabs.current_tab_changed.connect(status.on_tab_changed)
tabs.cur_progress.connect(status.prog.setValue)
tabs.cur_load_finished.connect(status.prog.hide)
tabs.cur_load_started.connect(status.prog.on_load_started)
tabs.cur_scroll_perc_changed.connect(status.percentage.set_perc)
tabs.tab_index_changed.connect(status.tabindex.on_tab_index_changed)
tabs.cur_url_changed.connect(status.url.set_url)
tabs.cur_url_changed.connect(functools.partial(
status.backforward.on_tab_cur_url_changed, tabs=tabs))
tabs.cur_link_hovered.connect(status.url.set_hover_url)
tabs.cur_load_status_changed.connect(status.url.on_load_status_changed)
tabs.cur_fullscreen_requested.connect(self._on_fullscreen_requested)
tabs.cur_fullscreen_requested.connect(status.maybe_hide)
# command input / completion
mode_manager.left.connect(tabs.on_mode_left)
cmd.clear_completion_selection.connect(
completion_obj.on_clear_completion_selection)
cmd.hide_completion.connect(completion_obj.hide)
@pyqtSlot(bool)
def _on_fullscreen_requested(self, on):
if on:
self.state_before_fullscreen = self.windowState()
self.showFullScreen()
elif self.isFullScreen():
self.setWindowState(self.state_before_fullscreen)
log.misc.debug('on: {}, state before fullscreen: {}'.format(
on, debug.qflags_key(Qt, self.state_before_fullscreen)))
@cmdutils.register(instance='main-window', scope='window')
@pyqtSlot()
def close(self):
"""Close the current window.
//
Extend close() so we can register it as a command.
"""
super().close()
def resizeEvent(self, e):
"""Extend resizewindow's resizeEvent to adjust completion.
Args:
e: The QResizeEvent
"""
super().resizeEvent(e)
self._update_overlay_geometries()
self._downloadview.updateGeometry()
self.tabbed_browser.tabBar().refresh()
def showEvent(self, e):
"""Extend showEvent to register us as the last-visible-main-window.
Args:
e: The QShowEvent
"""
super().showEvent(e)
objreg.register('last-visible-main-window', self, update=True)
def _do_close(self):
"""Helper function for closeEvent."""
try:
last_visible = objreg.get('last-visible-main-window')
if self is last_visible:
objreg.delete('last-visible-main-window')
except KeyError:
pass
objreg.get('session-manager').save_last_window_session()
self._save_geometry()
log.destroy.debug("Closing window {}".format(self.win_id))
self.tabbed_browser.shutdown()
def closeEvent(self, e):
"""Override closeEvent to display a confirmation if needed."""
if crashsignal.is_crashing:
e.accept()
return
tab_count = self.tabbed_browser.count()
download_model = objreg.get('download-model', scope='window',
window=self.win_id)
download_count = download_model.running_downloads()
quit_texts = []
# Ask if multiple-tabs are open
if 'multiple-tabs' in config.val.confirm_quit and tab_count > 1:
quit_texts.append("{} {} open.".format(
tab_count, "tab is" if tab_count == 1 else "tabs are"))
# Ask if multiple downloads running
if 'downloads' in config.val.confirm_quit and download_count > 0:
quit_texts.append("{} {} running.".format(
download_count,
"download is" if download_count == 1 else "downloads are"))
# Process all quit messages that user must confirm
if quit_texts or 'always' in config.val.confirm_quit:
msg = jinja.environment.from_string("""
<ul>
{% for text in quit_texts %}
<li>{{text}}</li>
{% endfor %}
</ul>
""".strip()).render(quit_texts=quit_texts)
confirmed = message.ask('Really quit?', msg,
mode=usertypes.PromptMode.yesno,
default=True)
# Stop asking if the user cancels
if not confirmed:
log.destroy.debug("Cancelling closing of window {}".format(
self.win_id))
e.ignore()
return
e.accept()
self._do_close()
| 1 | 19,680 | If you really want to rename this, you'll also need to adjust the name in `__init__` and in other places it's used (`browser/commands.py`). | qutebrowser-qutebrowser | py |
@@ -53,6 +53,7 @@ public class MapLayerMetadata implements Serializable {
// ForeignKey to DataFile
//x@ManyToOne
// For now, make this unique: Each DataFile may only have one map
+ //@OneToOne(cascade = {CascadeType.REMOVE, CascadeType.MERGE, CascadeType.PERSIST}) // TODO: Figure out why this doesn't work.
@JoinColumn(nullable=false, unique=true)
private DataFile dataFile;
| 1 | /*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package edu.harvard.iq.dataverse;
import java.io.Serializable;
import java.sql.Timestamp;
import java.util.Arrays;
import java.util.List;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.GeneratedValue;
import javax.persistence.GenerationType;
import javax.persistence.Id;
import javax.persistence.Index;
import javax.persistence.JoinColumn;
import javax.persistence.ManyToOne;
import javax.persistence.NamedQueries;
import javax.persistence.NamedQuery;
import javax.persistence.Table;
import javax.persistence.Transient;
import org.hibernate.validator.constraints.NotBlank;
/**
* File metadata: specifically WorldMap layer information for a specific DataFile
*
* @author raprasad
*/
@NamedQueries({
@NamedQuery(name = "MapLayerMetadata.findAll",
query = "SELECT mlm FROM MapLayerMetadata mlm"),})
@Entity
@Table(indexes = {@Index(columnList="dataset_id")})
public class MapLayerMetadata implements Serializable {
@Transient
public final static String dataType = "MapLayerMetadata";
@Transient
public final static List<String> MANDATORY_JSON_FIELDS = Arrays.asList("layerName", "layerLink", "embedMapLink", "worldmapUsername");
private static final long serialVersionUID = 1L;
@Id
@GeneratedValue(strategy = GenerationType.IDENTITY)
private Long id;
// ForeignKey to DataFile
//x@ManyToOne
// For now, make this unique: Each DataFile may only have one map
@JoinColumn(nullable=false, unique=true)
private DataFile dataFile;
// ForeignKey to Dataset.
// This is always reachable via the datafile.getOwner();
// However, save the Dataset itself to potentially save an extra step
@ManyToOne
@JoinColumn(nullable=false)
private Dataset dataset;
@Column(nullable=false)
@NotBlank(message = "Please specify a layer name.")
private String layerName;
@Column(nullable=false)
@NotBlank(message = "Please specify a layer link.")
private String layerLink;
@Column(nullable=false)
@NotBlank(message = "Please specify am embedded map link.")
private String embedMapLink;
@Column(nullable=true)
@NotBlank(message = "Please specify a map image link.")
private String mapImageLink;
@Column(nullable=false)
@NotBlank(message = "Please specify a WorldMap username.")
private String worldmapUsername;
/**
* Was this layer created by joining a tabular file
* to an existing file?
*/
private boolean isJoinLayer;
/**
* Description if this was created via a tabular join,
*/
@Column(columnDefinition = "TEXT")
private String joinDescription;
/**
* Links to alternative representations of the map
* in JSON format
*/
@Column(columnDefinition = "TEXT")
private String mapLayerLinks;
/**
* The HTTP Status code (200, 404, etc.) returned when you check to see if
* the map/layer exists on the WorldMap side.
*/
@Column(nullable=true)
private int lastVerifiedStatus;
/**
* The time that lastVerifiedStatus was last recorded.
*/
@Column(nullable=true)
private Timestamp lastVerifiedTime;
/**
* Get property layerName.
* @return value of property layerName.
*/
public String getLayerName() {
return this.layerName;
}
/**
* Set property layerName.
* @param layerName new value of property layerName.
*/
public void setLayerName(String layerName) {
this.layerName = layerName;
}
/**
* Get property layerLink.
* @return value of property layerLink.
*/
public String getLayerLink() {
return this.layerLink;
}
/**
* Set property layerLink.
* @param layerLink new value of property layerLink.
*/
public void setLayerLink(String layerLink) {
this.layerLink = layerLink;
}
/**
* Get property mapImageLink.
* @return value of property mapImageLink.
*/
public String getMapImageLink() {
return this.mapImageLink;
}
/**
* Set property mapImageLink.
* @param mapImageLink new value of property layerLink.
*/
public void setMapImageLink(String mapImageLink) {
this.mapImageLink = mapImageLink;
}
/**
* Get property embedMapLink.
* @return value of property embedMapLink.
*/
public String getEmbedMapLink() {
return this.embedMapLink;
}
/**
* Set property embedMapLink.
* @param embedMapLink new value of property embedMapLink.
*/
public void setEmbedMapLink(String embedMapLink) {
this.embedMapLink = embedMapLink;
}
/**
* Get property worldmapUsername.
* @return value of property worldmapUsername.
*/
public String getWorldmapUsername() {
return this.worldmapUsername;
}
/**
* Set property worldmapUsername.
* @param worldmapUsername new value of property worldmapUsername.
*/
public void setWorldmapUsername(String worldmapUsername) {
this.worldmapUsername = worldmapUsername;
}
/**
* Get property isJoinLayer.
* @return value of property isJoinLayer.
*/
public boolean isJoinLayer(){
return this.isJoinLayer;
}
/**
* Set property isJoinLayer.
* @param bool new value of property isJoinLayer.
*/
public void setIsJoinLayer(boolean bool) {
this.isJoinLayer = bool;
}
/**
* Get property joinDescription.
* @return value of property joinDescription.
*/
public String getJoinDescription() {
return this.joinDescription;
}
/**
* Set property joinDescription.
* @param joinDescription new value of property joinDescription.
*/
public void setJoinDescription(String joinDescription) {
this.joinDescription = joinDescription;
}
/**
* Get property mapLayerLinks
* @return value of property joinDescription.
*/
public String getMapLayerLinks() {
return this.mapLayerLinks;
}
/**
* Set property joinDescription.
* @param joinDescription new value of property joinDescription.
*/
public void setMapLayerLinks(String mapLayerLinks) {
this.mapLayerLinks = mapLayerLinks;
}
public Dataset getDataset() {
return dataset;
}
public void setDataset(Dataset dataset) {
this.dataset = dataset;
}
public DataFile getDataFile() {
return dataFile;
}
public void setDataFile(DataFile dataFile) {
this.dataFile = dataFile;
}
/**
* Getter for property id.
* @return Value of property id.
*/
public Long getId() {
return this.id;
}
/**
* Setter for property id.
* @param id New value of property id.
*/
public void setId(Long id) {
this.id = id;
}
public Timestamp getLastVerifiedTime() {
return lastVerifiedTime;
}
public void setLastVerifiedTime(Timestamp lastVerifiedTime) {
this.lastVerifiedTime = lastVerifiedTime;
}
public int getLastVerifiedStatus() {
return lastVerifiedStatus;
}
public void setLastVerifiedStatus(int lastVerifiedStatus) {
this.lastVerifiedStatus = lastVerifiedStatus;
}
@Override
public String toString() {
return "edu.harvard.iq.dataverse.MaplayerMetadata[id=" + this.id + "]";
//return "WorldMap Layer: " + this.layerName + " for DataFile: " + this.dataFile.toString();
}
}
| 1 | 37,093 | As discussed at standup, I gave up on this. Calling `DeleteMapLayerMetadataCommand` felt cleaner anyway because there might be other cleanup that needs to happen. @scolapasta and @matthew-a-dunlap plan to discuss this. | IQSS-dataverse | java |
@@ -84,5 +84,8 @@ func (a *action) Matches(act coretesting.Action) error {
return nil
}
+ fmt.Printf("EXP:%+v\n", objExp.GetObject())
+ fmt.Printf("ACT:%+v\n", objExp.GetObject())
+
return fmt.Errorf("unexpected difference between actions: %s", pretty.Diff(objExp.GetObject(), objAct.GetObject()))
} | 1 | /*
Copyright 2019 The Jetstack cert-manager contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package test
import (
"fmt"
"reflect"
"github.com/kr/pretty"
coretesting "k8s.io/client-go/testing"
)
type ActionMatchFn func(coretesting.Action, coretesting.Action) error
type Action interface {
Action() coretesting.Action
Matches(coretesting.Action) error
}
type customMatchAction struct {
action coretesting.Action
matchFn ActionMatchFn
}
var _ Action = &customMatchAction{}
func NewCustomMatch(a coretesting.Action, matchFn ActionMatchFn) Action {
return &customMatchAction{
action: a,
matchFn: matchFn,
}
}
func (a *customMatchAction) Action() coretesting.Action {
return a.action
}
func (a *customMatchAction) Matches(act coretesting.Action) error {
return a.matchFn(a.action, act)
}
type action struct {
action coretesting.Action
}
var _ Action = &action{}
func NewAction(a coretesting.Action) Action {
return &action{
action: a,
}
}
func (a *action) Action() coretesting.Action {
return a.action
}
func (a *action) Matches(act coretesting.Action) error {
matches := reflect.DeepEqual(a.action, act)
if matches == true {
return nil
}
objAct, ok := act.(coretesting.CreateAction)
if !ok {
return nil
}
objExp, ok := a.action.(coretesting.CreateAction)
if !ok {
return nil
}
return fmt.Errorf("unexpected difference between actions: %s", pretty.Diff(objExp.GetObject(), objAct.GetObject()))
}
| 1 | 18,018 | Do we need these changes? If so, can you tidy up the messages? Looks like it might have been your testing/debugging changes | jetstack-cert-manager | go |
@@ -63,6 +63,9 @@ const (
// SecretTypeEnv is to show secret type being ENVIRONMENT_VARIABLE
SecretTypeEnv = "ENVIRONMENT_VARIABLE"
+
+ // TargetLogDriver is to show secret target being "LOG_DRIVER", the default will be "CONTAINER"
+ SecretTargetLogDriver = "LOG_DRIVER"
)
// DockerConfig represents additional metadata about a container to run. It's | 1 | // Copyright 2014-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package container
import (
"fmt"
"strconv"
"sync"
"time"
apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status"
apierrors "github.com/aws/amazon-ecs-agent/agent/api/errors"
"github.com/aws/amazon-ecs-agent/agent/credentials"
resourcestatus "github.com/aws/amazon-ecs-agent/agent/taskresource/status"
"github.com/aws/aws-sdk-go/aws"
"github.com/docker/docker/api/types"
)
const (
// defaultContainerSteadyStateStatus defines the container status at
// which the container is assumed to be in steady state. It is set
// to 'ContainerRunning' unless overridden
defaultContainerSteadyStateStatus = apicontainerstatus.ContainerRunning
// awslogsAuthExecutionRole is the string value passed in the task payload
// that specifies that the log driver should be authenticated using the
// execution role
awslogsAuthExecutionRole = "ExecutionRole"
// DockerHealthCheckType is the type of container health check provided by docker
DockerHealthCheckType = "docker"
// AuthTypeECR is to use image pull auth over ECR
AuthTypeECR = "ecr"
// AuthTypeASM is to use image pull auth over AWS Secrets Manager
AuthTypeASM = "asm"
// MetadataURIEnvironmentVariableName defines the name of the environment
// variable in containers' config, which can be used by the containers to access the
// v3 metadata endpoint
MetadataURIEnvironmentVariableName = "ECS_CONTAINER_METADATA_URI"
// MetadataURIFormat defines the URI format for v3 metadata endpoint
MetadataURIFormat = "http://169.254.170.2/v3/%s"
// SecretProviderSSM is to show secret provider being SSM
SecretProviderSSM = "ssm"
// SecretProviderASM is to show secret provider being ASM
SecretProviderASM = "asm"
// SecretTypeEnv is to show secret type being ENVIRONMENT_VARIABLE
SecretTypeEnv = "ENVIRONMENT_VARIABLE"
)
// DockerConfig represents additional metadata about a container to run. It's
// remodeled from the `ecsacs` api model file. Eventually it should not exist
// once this remodeling is refactored out.
type DockerConfig struct {
// Config is the configuration used to create container
Config *string `json:"config"`
// HostConfig is the configuration of container related to host resource
HostConfig *string `json:"hostConfig"`
// Version specifies the docker client API version to use
Version *string `json:"version"`
}
// HealthStatus contains the health check result returned by docker
type HealthStatus struct {
// Status is the container health status
Status apicontainerstatus.ContainerHealthStatus `json:"status,omitempty"`
// Since is the timestamp when container health status changed
Since *time.Time `json:"statusSince,omitempty"`
// ExitCode is the exitcode of health check if failed
ExitCode int `json:"exitCode,omitempty"`
// Output is the output of health check
Output string `json:"output,omitempty"`
}
// Container is the internal representation of a container in the ECS agent
type Container struct {
// Name is the name of the container specified in the task definition
Name string
// V3EndpointID is a container identifier used to construct v3 metadata endpoint; it's unique among
// all the containers managed by the agent
V3EndpointID string
// Image is the image name specified in the task definition
Image string
// ImageID is the local ID of the image used in the container
ImageID string
// Command is the command to run in the container which is specified in the task definition
Command []string
// CPU is the cpu limitation of the container which is specified in the task definition
CPU uint `json:"Cpu"`
// GPUIDs is the list of GPU ids for a container
GPUIDs []string
// Memory is the memory limitation of the container which is specified in the task definition
Memory uint
// Links contains a list of containers to link, corresponding to docker option: --link
Links []string
// VolumesFrom contains a list of container's volume to use, corresponding to docker option: --volumes-from
VolumesFrom []VolumeFrom `json:"volumesFrom"`
// MountPoints contains a list of volume mount paths
MountPoints []MountPoint `json:"mountPoints"`
// Ports contains a list of ports binding configuration
Ports []PortBinding `json:"portMappings"`
// Secrets contains a list of secret
Secrets []Secret `json:"secrets"`
// Essential denotes whether the container is essential or not
Essential bool
// EntryPoint is entrypoint of the container, corresponding to docker option: --entrypoint
EntryPoint *[]string
// Environment is the environment variable set in the container
Environment map[string]string `json:"environment"`
// Overrides contains the configuration to override of a container
Overrides ContainerOverrides `json:"overrides"`
// DockerConfig is the configuration used to create the container
DockerConfig DockerConfig `json:"dockerConfig"`
// RegistryAuthentication is the auth data used to pull image
RegistryAuthentication *RegistryAuthenticationData `json:"registryAuthentication"`
// HealthCheckType is the mechnism to use for the container health check
// currently it only supports 'DOCKER'
HealthCheckType string `json:"healthCheckType,omitempty"`
// Health contains the health check information of container health check
Health HealthStatus `json:"-"`
// LogsAuthStrategy specifies how the logs driver for the container will be
// authenticated
LogsAuthStrategy string
// lock is used for fields that are accessed and updated concurrently
lock sync.RWMutex
// DesiredStatusUnsafe represents the state where the container should go. Generally,
// the desired status is informed by the ECS backend as a result of either
// API calls made to ECS or decisions made by the ECS service scheduler,
// though the agent may also set the DesiredStatusUnsafe if a different "essential"
// container in the task exits. The DesiredStatus is almost always either
// ContainerRunning or ContainerStopped.
// NOTE: Do not access DesiredStatusUnsafe directly. Instead, use `GetDesiredStatus`
// and `SetDesiredStatus`.
// TODO DesiredStatusUnsafe should probably be private with appropriately written
// setter/getter. When this is done, we need to ensure that the UnmarshalJSON
// is handled properly so that the state storage continues to work.
DesiredStatusUnsafe apicontainerstatus.ContainerStatus `json:"desiredStatus"`
// KnownStatusUnsafe represents the state where the container is.
// NOTE: Do not access `KnownStatusUnsafe` directly. Instead, use `GetKnownStatus`
// and `SetKnownStatus`.
// TODO KnownStatusUnsafe should probably be private with appropriately written
// setter/getter. When this is done, we need to ensure that the UnmarshalJSON
// is handled properly so that the state storage continues to work.
KnownStatusUnsafe apicontainerstatus.ContainerStatus `json:"KnownStatus"`
// TransitionDependenciesMap is a map of the dependent container status to other
// dependencies that must be satisfied in order for this container to transition.
TransitionDependenciesMap TransitionDependenciesMap `json:"TransitionDependencySet"`
// SteadyStateDependencies is a list of containers that must be in "steady state" before
// this one is created
// Note: Current logic requires that the containers specified here are run
// before this container can even be pulled.
//
// Deprecated: Use TransitionDependencySet instead. SteadyStateDependencies is retained for compatibility with old
// state files.
SteadyStateDependencies []string `json:"RunDependencies"`
// Type specifies the container type. Except the 'Normal' type, all other types
// are not directly specified by task definitions, but created by the agent. The
// JSON tag is retained as this field's previous name 'IsInternal' for maintaining
// backwards compatibility. Please see JSON parsing hooks for this type for more
// details
Type ContainerType `json:"IsInternal"`
// AppliedStatus is the status that has been "applied" (e.g., we've called Pull,
// Create, Start, or Stop) but we don't yet know that the application was successful.
// No need to save it in the state file, as agent will synchronize the container status
// on restart and for some operation eg: pull, it has to be recalled again.
AppliedStatus apicontainerstatus.ContainerStatus `json:"-"`
// ApplyingError is an error that occurred trying to transition the container
// to its desired state. It is propagated to the backend in the form
// 'Name: ErrorString' as the 'reason' field.
ApplyingError *apierrors.DefaultNamedError
// SentStatusUnsafe represents the last KnownStatusUnsafe that was sent to the ECS
// SubmitContainerStateChange API.
// TODO SentStatusUnsafe should probably be private with appropriately written
// setter/getter. When this is done, we need to ensure that the UnmarshalJSON is
// handled properly so that the state storage continues to work.
SentStatusUnsafe apicontainerstatus.ContainerStatus `json:"SentStatus"`
// MetadataFileUpdated is set to true when we have completed updating the
// metadata file
MetadataFileUpdated bool `json:"metadataFileUpdated"`
// KnownExitCodeUnsafe specifies the exit code for the container.
// It is exposed outside of the package so that it's marshalled/unmarshalled in
// the JSON body while saving the state.
// NOTE: Do not access KnownExitCodeUnsafe directly. Instead, use `GetKnownExitCode`
// and `SetKnownExitCode`.
KnownExitCodeUnsafe *int `json:"KnownExitCode"`
// KnownPortBindingsUnsafe is an array of port bindings for the container.
KnownPortBindingsUnsafe []PortBinding `json:"KnownPortBindings"`
// VolumesUnsafe is an array of volume mounts in the container.
VolumesUnsafe []types.MountPoint `json:"-"`
// NetworkModeUnsafe is the network mode in which the container is started
NetworkModeUnsafe string `json:"-"`
// NetworksUnsafe denotes the Docker Network Settings in the container.
NetworkSettingsUnsafe *types.NetworkSettings `json:"-"`
// SteadyStateStatusUnsafe specifies the steady state status for the container
// If uninitialized, it's assumed to be set to 'ContainerRunning'. Even though
// it's not only supposed to be set when the container is being created, it's
// exposed outside of the package so that it's marshalled/unmarshalled in the
// the JSON body while saving the state
SteadyStateStatusUnsafe *apicontainerstatus.ContainerStatus `json:"SteadyStateStatus,omitempty"`
createdAt time.Time
startedAt time.Time
finishedAt time.Time
labels map[string]string
}
// DockerContainer is a mapping between containers-as-docker-knows-them and
// containers-as-we-know-them.
// This is primarily used in DockerState, but lives here such that tasks and
// containers know how to convert themselves into Docker's desired config format
type DockerContainer struct {
DockerID string `json:"DockerId"`
DockerName string // needed for linking
Container *Container
}
// MountPoint describes the in-container location of a Volume and references
// that Volume by name.
type MountPoint struct {
SourceVolume string `json:"sourceVolume"`
ContainerPath string `json:"containerPath"`
ReadOnly bool `json:"readOnly"`
}
// VolumeFrom is a volume which references another container as its source.
type VolumeFrom struct {
SourceContainer string `json:"sourceContainer"`
ReadOnly bool `json:"readOnly"`
}
// Secret contains all essential attributes needed for ECS secrets vending as environment variables/tmpfs files
type Secret struct {
Name string `json:"name"`
ValueFrom string `json:"valueFrom"`
Region string `json:"region"`
ContainerPath string `json:"containerPath"`
Type string `json:"type"`
Provider string `json:"provider"`
}
// GetSecretResourceCacheKey returns the key required to access the secret
// from the ssmsecret resource
func (s *Secret) GetSecretResourceCacheKey() string {
return s.ValueFrom + "_" + s.Region
}
// String returns a human readable string representation of DockerContainer
func (dc *DockerContainer) String() string {
if dc == nil {
return "nil"
}
return fmt.Sprintf("Id: %s, Name: %s, Container: %s", dc.DockerID, dc.DockerName, dc.Container.String())
}
// NewContainerWithSteadyState creates a new Container object with the specified
// steady state. Containers that need the non default steady state set will
// use this method instead of setting it directly
func NewContainerWithSteadyState(steadyState apicontainerstatus.ContainerStatus) *Container {
steadyStateStatus := steadyState
return &Container{
SteadyStateStatusUnsafe: &steadyStateStatus,
}
}
// KnownTerminal returns true if the container's known status is STOPPED
func (c *Container) KnownTerminal() bool {
return c.GetKnownStatus().Terminal()
}
// DesiredTerminal returns true if the container's desired status is STOPPED
func (c *Container) DesiredTerminal() bool {
return c.GetDesiredStatus().Terminal()
}
// GetKnownStatus returns the known status of the container
func (c *Container) GetKnownStatus() apicontainerstatus.ContainerStatus {
c.lock.RLock()
defer c.lock.RUnlock()
return c.KnownStatusUnsafe
}
// SetKnownStatus sets the known status of the container and update the container
// applied status
func (c *Container) SetKnownStatus(status apicontainerstatus.ContainerStatus) {
c.lock.Lock()
defer c.lock.Unlock()
c.KnownStatusUnsafe = status
c.updateAppliedStatusUnsafe(status)
}
// GetDesiredStatus gets the desired status of the container
func (c *Container) GetDesiredStatus() apicontainerstatus.ContainerStatus {
c.lock.RLock()
defer c.lock.RUnlock()
return c.DesiredStatusUnsafe
}
// SetDesiredStatus sets the desired status of the container
func (c *Container) SetDesiredStatus(status apicontainerstatus.ContainerStatus) {
c.lock.Lock()
defer c.lock.Unlock()
c.DesiredStatusUnsafe = status
}
// GetSentStatus safely returns the SentStatusUnsafe of the container
func (c *Container) GetSentStatus() apicontainerstatus.ContainerStatus {
c.lock.RLock()
defer c.lock.RUnlock()
return c.SentStatusUnsafe
}
// SetSentStatus safely sets the SentStatusUnsafe of the container
func (c *Container) SetSentStatus(status apicontainerstatus.ContainerStatus) {
c.lock.Lock()
defer c.lock.Unlock()
c.SentStatusUnsafe = status
}
// SetKnownExitCode sets exit code field in container struct
func (c *Container) SetKnownExitCode(i *int) {
c.lock.Lock()
defer c.lock.Unlock()
c.KnownExitCodeUnsafe = i
}
// GetKnownExitCode returns the container exit code
func (c *Container) GetKnownExitCode() *int {
c.lock.RLock()
defer c.lock.RUnlock()
return c.KnownExitCodeUnsafe
}
// SetRegistryAuthCredentials sets the credentials for pulling image from ECR
func (c *Container) SetRegistryAuthCredentials(credential credentials.IAMRoleCredentials) {
c.lock.Lock()
defer c.lock.Unlock()
c.RegistryAuthentication.ECRAuthData.SetPullCredentials(credential)
}
// ShouldPullWithExecutionRole returns whether this container has its own ECR credentials
func (c *Container) ShouldPullWithExecutionRole() bool {
c.lock.RLock()
defer c.lock.RUnlock()
return c.RegistryAuthentication != nil &&
c.RegistryAuthentication.Type == AuthTypeECR &&
c.RegistryAuthentication.ECRAuthData != nil &&
c.RegistryAuthentication.ECRAuthData.UseExecutionRole
}
// String returns a human readable string representation of this object
func (c *Container) String() string {
ret := fmt.Sprintf("%s(%s) (%s->%s)", c.Name, c.Image,
c.GetKnownStatus().String(), c.GetDesiredStatus().String())
if c.GetKnownExitCode() != nil {
ret += " - Exit: " + strconv.Itoa(*c.GetKnownExitCode())
}
return ret
}
// GetSteadyStateStatus returns the steady state status for the container. If
// Container.steadyState is not initialized, the default steady state status
// defined by `defaultContainerSteadyStateStatus` is returned. The 'pause'
// container's steady state differs from that of other containers, as the
// 'pause' container can reach its teady state once networking resources
// have been provisioned for it, which is done in the `ContainerResourcesProvisioned`
// state
func (c *Container) GetSteadyStateStatus() apicontainerstatus.ContainerStatus {
if c.SteadyStateStatusUnsafe == nil {
return defaultContainerSteadyStateStatus
}
return *c.SteadyStateStatusUnsafe
}
// IsKnownSteadyState returns true if the `KnownState` of the container equals
// the `steadyState` defined for the container
func (c *Container) IsKnownSteadyState() bool {
knownStatus := c.GetKnownStatus()
return knownStatus == c.GetSteadyStateStatus()
}
// GetNextKnownStateProgression returns the state that the container should
// progress to based on its `KnownState`. The progression is
// incremental until the container reaches its steady state. From then on,
// it transitions to `ContainerStopped`.
//
// For example:
// a. if the steady state of the container is defined as `ContainerRunning`,
// the progression is:
// Container: None -> Pulled -> Created -> Running* -> Stopped -> Zombie
//
// b. if the steady state of the container is defined as `ContainerResourcesProvisioned`,
// the progression is:
// Container: None -> Pulled -> Created -> Running -> Provisioned* -> Stopped -> Zombie
//
// c. if the steady state of the container is defined as `ContainerCreated`,
// the progression is:
// Container: None -> Pulled -> Created* -> Stopped -> Zombie
func (c *Container) GetNextKnownStateProgression() apicontainerstatus.ContainerStatus {
if c.IsKnownSteadyState() {
return apicontainerstatus.ContainerStopped
}
return c.GetKnownStatus() + 1
}
// IsInternal returns true if the container type is `ContainerCNIPause`
// or `ContainerNamespacePause`. It returns false otherwise
func (c *Container) IsInternal() bool {
if c.Type == ContainerNormal {
return false
}
return true
}
// IsRunning returns true if the container's known status is either RUNNING
// or RESOURCES_PROVISIONED. It returns false otherwise
func (c *Container) IsRunning() bool {
return c.GetKnownStatus().IsRunning()
}
// IsMetadataFileUpdated returns true if the metadata file has been once the
// metadata file is ready and will no longer change
func (c *Container) IsMetadataFileUpdated() bool {
c.lock.RLock()
defer c.lock.RUnlock()
return c.MetadataFileUpdated
}
// SetMetadataFileUpdated sets the container's MetadataFileUpdated status to true
func (c *Container) SetMetadataFileUpdated() {
c.lock.Lock()
defer c.lock.Unlock()
c.MetadataFileUpdated = true
}
// IsEssential returns whether the container is an essential container or not
func (c *Container) IsEssential() bool {
c.lock.RLock()
defer c.lock.RUnlock()
return c.Essential
}
// AWSLogAuthExecutionRole returns true if the auth is by execution role
func (c *Container) AWSLogAuthExecutionRole() bool {
return c.LogsAuthStrategy == awslogsAuthExecutionRole
}
// SetCreatedAt sets the timestamp for container's creation time
func (c *Container) SetCreatedAt(createdAt time.Time) {
if createdAt.IsZero() {
return
}
c.lock.Lock()
defer c.lock.Unlock()
c.createdAt = createdAt
}
// SetStartedAt sets the timestamp for container's start time
func (c *Container) SetStartedAt(startedAt time.Time) {
if startedAt.IsZero() {
return
}
c.lock.Lock()
defer c.lock.Unlock()
c.startedAt = startedAt
}
// SetFinishedAt sets the timestamp for container's stopped time
func (c *Container) SetFinishedAt(finishedAt time.Time) {
if finishedAt.IsZero() {
return
}
c.lock.Lock()
defer c.lock.Unlock()
c.finishedAt = finishedAt
}
// GetCreatedAt sets the timestamp for container's creation time
func (c *Container) GetCreatedAt() time.Time {
c.lock.RLock()
defer c.lock.RUnlock()
return c.createdAt
}
// GetStartedAt sets the timestamp for container's start time
func (c *Container) GetStartedAt() time.Time {
c.lock.RLock()
defer c.lock.RUnlock()
return c.startedAt
}
// GetFinishedAt sets the timestamp for container's stopped time
func (c *Container) GetFinishedAt() time.Time {
c.lock.RLock()
defer c.lock.RUnlock()
return c.finishedAt
}
// SetLabels sets the labels for a container
func (c *Container) SetLabels(labels map[string]string) {
c.lock.Lock()
defer c.lock.Unlock()
c.labels = labels
}
// GetLabels gets the labels for a container
func (c *Container) GetLabels() map[string]string {
c.lock.RLock()
defer c.lock.RUnlock()
return c.labels
}
// SetKnownPortBindings sets the ports for a container
func (c *Container) SetKnownPortBindings(ports []PortBinding) {
c.lock.Lock()
defer c.lock.Unlock()
c.KnownPortBindingsUnsafe = ports
}
// GetKnownPortBindings gets the ports for a container
func (c *Container) GetKnownPortBindings() []PortBinding {
c.lock.RLock()
defer c.lock.RUnlock()
return c.KnownPortBindingsUnsafe
}
// SetVolumes sets the volumes mounted in a container
func (c *Container) SetVolumes(volumes []types.MountPoint) {
c.lock.Lock()
defer c.lock.Unlock()
c.VolumesUnsafe = volumes
}
// GetVolumes returns the volumes mounted in a container
func (c *Container) GetVolumes() []types.MountPoint {
c.lock.RLock()
defer c.lock.RUnlock()
return c.VolumesUnsafe
}
// SetNetworkSettings sets the networks field in a container
func (c *Container) SetNetworkSettings(networks *types.NetworkSettings) {
c.lock.Lock()
defer c.lock.Unlock()
c.NetworkSettingsUnsafe = networks
}
// GetNetworkSettings returns the networks field in a container
func (c *Container) GetNetworkSettings() *types.NetworkSettings {
c.lock.RLock()
defer c.lock.RUnlock()
return c.NetworkSettingsUnsafe
}
// SetNetworkMode sets the network mode of the container
func (c *Container) SetNetworkMode(networkMode string) {
c.lock.Lock()
defer c.lock.Unlock()
c.NetworkModeUnsafe = networkMode
}
// GetNetworkMode returns the network mode of the container
func (c *Container) GetNetworkMode() string {
c.lock.RLock()
defer c.lock.RUnlock()
return c.NetworkModeUnsafe
}
// HealthStatusShouldBeReported returns true if the health check is defined in
// the task definition
func (c *Container) HealthStatusShouldBeReported() bool {
return c.HealthCheckType == DockerHealthCheckType
}
// SetHealthStatus sets the container health status
func (c *Container) SetHealthStatus(health HealthStatus) {
c.lock.Lock()
defer c.lock.Unlock()
if c.Health.Status == health.Status {
return
}
c.Health.Status = health.Status
c.Health.Since = aws.Time(time.Now())
c.Health.Output = health.Output
// Set the health exit code if the health check failed
if c.Health.Status == apicontainerstatus.ContainerUnhealthy {
c.Health.ExitCode = health.ExitCode
}
}
// GetHealthStatus returns the container health information
func (c *Container) GetHealthStatus() HealthStatus {
c.lock.RLock()
defer c.lock.RUnlock()
// Copy the pointer to avoid race condition
copyHealth := c.Health
if c.Health.Since != nil {
copyHealth.Since = aws.Time(aws.TimeValue(c.Health.Since))
}
return copyHealth
}
// BuildContainerDependency adds a new dependency container and satisfied status
// to the dependent container
func (c *Container) BuildContainerDependency(contName string,
satisfiedStatus apicontainerstatus.ContainerStatus,
dependentStatus apicontainerstatus.ContainerStatus) {
contDep := ContainerDependency{
ContainerName: contName,
SatisfiedStatus: satisfiedStatus,
}
if _, ok := c.TransitionDependenciesMap[dependentStatus]; !ok {
c.TransitionDependenciesMap[dependentStatus] = TransitionDependencySet{}
}
deps := c.TransitionDependenciesMap[dependentStatus]
deps.ContainerDependencies = append(deps.ContainerDependencies, contDep)
c.TransitionDependenciesMap[dependentStatus] = deps
}
// BuildResourceDependency adds a new resource dependency by taking in the required status
// of the resource that satisfies the dependency and the dependent container status,
// whose transition is dependent on the resource.
// example: if container's PULLED transition is dependent on volume resource's
// CREATED status, then RequiredStatus=VolumeCreated and dependentStatus=ContainerPulled
func (c *Container) BuildResourceDependency(resourceName string,
requiredStatus resourcestatus.ResourceStatus,
dependentStatus apicontainerstatus.ContainerStatus) {
resourceDep := ResourceDependency{
Name: resourceName,
RequiredStatus: requiredStatus,
}
if _, ok := c.TransitionDependenciesMap[dependentStatus]; !ok {
c.TransitionDependenciesMap[dependentStatus] = TransitionDependencySet{}
}
deps := c.TransitionDependenciesMap[dependentStatus]
deps.ResourceDependencies = append(deps.ResourceDependencies, resourceDep)
c.TransitionDependenciesMap[dependentStatus] = deps
}
// updateAppliedStatusUnsafe updates the container transitioning status
func (c *Container) updateAppliedStatusUnsafe(knownStatus apicontainerstatus.ContainerStatus) {
if c.AppliedStatus == apicontainerstatus.ContainerStatusNone {
return
}
// Check if the container transition has already finished
if c.AppliedStatus <= knownStatus {
c.AppliedStatus = apicontainerstatus.ContainerStatusNone
}
}
// SetAppliedStatus sets the applied status of container and returns whether
// the container is already in a transition
func (c *Container) SetAppliedStatus(status apicontainerstatus.ContainerStatus) bool {
c.lock.Lock()
defer c.lock.Unlock()
if c.AppliedStatus != apicontainerstatus.ContainerStatusNone {
// return false to indicate the set operation failed
return false
}
c.AppliedStatus = status
return true
}
// GetAppliedStatus returns the transitioning status of container
func (c *Container) GetAppliedStatus() apicontainerstatus.ContainerStatus {
c.lock.RLock()
defer c.lock.RUnlock()
return c.AppliedStatus
}
// ShouldPullWithASMAuth returns true if this container needs to retrieve
// private registry authentication data from ASM
func (c *Container) ShouldPullWithASMAuth() bool {
c.lock.RLock()
defer c.lock.RUnlock()
return c.RegistryAuthentication != nil &&
c.RegistryAuthentication.Type == AuthTypeASM &&
c.RegistryAuthentication.ASMAuthData != nil
}
// SetASMDockerAuthConfig add the docker auth config data to the
// RegistryAuthentication struct held by the container, this is then passed down
// to the docker client to pull the image
func (c *Container) SetASMDockerAuthConfig(dac types.AuthConfig) {
c.RegistryAuthentication.ASMAuthData.SetDockerAuthConfig(dac)
}
// SetV3EndpointID sets the v3 endpoint id of container
func (c *Container) SetV3EndpointID(v3EndpointID string) {
c.lock.Lock()
defer c.lock.Unlock()
c.V3EndpointID = v3EndpointID
}
// GetV3EndpointID returns the v3 endpoint id of container
func (c *Container) GetV3EndpointID() string {
c.lock.RLock()
defer c.lock.RUnlock()
return c.V3EndpointID
}
// InjectV3MetadataEndpoint injects the v3 metadata endpoint as an environment variable for a container
func (c *Container) InjectV3MetadataEndpoint() {
c.lock.Lock()
defer c.lock.Unlock()
// don't assume that the environment variable map has been initialized by others
if c.Environment == nil {
c.Environment = make(map[string]string)
}
c.Environment[MetadataURIEnvironmentVariableName] =
fmt.Sprintf(MetadataURIFormat, c.V3EndpointID)
}
// ShouldCreateWithSSMSecret returns true if this container needs to get secret
// value from SSM Parameter Store
func (c *Container) ShouldCreateWithSSMSecret() bool {
c.lock.RLock()
defer c.lock.RUnlock()
// Secrets field will be nil if there is no secrets for container
if c.Secrets == nil {
return false
}
for _, secret := range c.Secrets {
if secret.Provider == SecretProviderSSM {
return true
}
}
return false
}
// ShouldCreateWithASMSecret returns true if this container needs to get secret
// value from AWS Secrets Manager
func (c *Container) ShouldCreateWithASMSecret() bool {
c.lock.RLock()
defer c.lock.RUnlock()
// Secrets field will be nil if there is no secrets for container
if c.Secrets == nil {
return false
}
for _, secret := range c.Secrets {
if secret.Provider == SecretProviderASM {
return true
}
}
return false
}
// MergeEnvironmentVariables appends additional envVarName:envVarValue pairs to
// the the container's enviornment values structure
func (c *Container) MergeEnvironmentVariables(envVars map[string]string) {
c.lock.Lock()
defer c.lock.Unlock()
// don't assume that the environment variable map has been initialized by others
if c.Environment == nil {
c.Environment = make(map[string]string)
}
for k, v := range envVars {
c.Environment[k] = v
}
}
func (c *Container) HasSecretAsEnv() bool {
c.lock.RLock()
defer c.lock.RUnlock()
// Secrets field will be nil if there is no secrets for container
if c.Secrets == nil {
return false
}
for _, secret := range c.Secrets {
if secret.Type == SecretTypeEnv {
return true
}
}
return false
}
| 1 | 21,916 | where is the default being set? | aws-amazon-ecs-agent | go |
@@ -154,7 +154,7 @@ static void found_package_cb (const char *line,
vString *name = vStringNew ();
tagEntryInfo tag;
- vStringNCopyS (name, line + matches[1].start, matches[1].length);
+ vStringNCopyS (name, line + matches[2].start, matches[2].length);
initTagEntry (&tag, vStringValue (name), RpmSpecKinds + K_PACKAGE);
tag.extensionFields.scopeIndex = *(int *)userData;
makeTagEntry (&tag); | 1 | /*
* Copyright (c) 2016 Masatake YAMATO
* Copyright (c) 2016 Red Hat, Inc.
*
* This source code is released for free distribution under the terms of the
* GNU General Public License version 2 or (at your option) any later version.
*
* This module contains functions for generating tags for rpm spec files.
*/
/*
* TODO
*
* 1. Capturing required and provide packages as reference tags
* 2. Capturing bz numbers and package versions in %changelog section
* 3. Capturing %configure --enable-FOO --with-BAR
*/
#include "general.h" /* must always come first */
#include <ctype.h>
#include <stddef.h>
#ifdef HAVE_SYS_TYPES_H
# include <sys/types.h> /* declare off_t (not known to regex.h on FreeBSD) */
#endif
#include <regex.h>
#include <string.h>
#include "debug.h"
#include "parse.h"
#include "read.h"
#include "routines.h"
typedef enum {
K_TAG,
K_MACOR,
K_PACKAGE,
K_GLOBAL,
} rpmSpecKind;
enum rpmSpecMacroRole {
R_MACRO_UNDEF,
};
typedef int rpmSpecMacroRole; /* to allow ROLE_INDEX_* */
static roleDesc RpmSpecMacroRoles [] = {
{ true, "undef", "undefined" },
};
static scopeSeparator RpmSpecPackageSeparators [] = {
{ 'p' , "-" },
};
static kindOption RpmSpecKinds[] = {
{ true, 't', "tag", "tags" },
{ true, 'm', "macro", "macros",
.referenceOnly = false, ATTACH_ROLES(RpmSpecMacroRoles) },
{ true, 'p', "package", "packages",
ATTACH_SEPARATORS(RpmSpecPackageSeparators) },
{ true, 'g', "global", "global macros" },
};
static bool rejecting;
struct macro_cb_data {
rpmSpecKind kindex;
rpmSpecMacroRole rindex;
};
static bool is_line_continued(const char *line)
{
size_t len = strlen (line);
Assert (len > 0);
return ((line[len - 1] == '\\')
|| ((len >= 2) && (line[len - 1] == '\n') && (line[len - 2] == '\\')))? true: false;
}
static void found_macro_cb (const char *line,
const regexMatch *matches,
unsigned int count,
void *uesrData)
{
struct macro_cb_data *data = uesrData;
if (count > 0)
{
vString *signature = ((count > 1) && (matches[2].length > 0))? vStringNew(): NULL;
vString *name = vStringNew ();
tagEntryInfo tag;
if (signature)
vStringNCopyS (signature, line + matches[2].start, matches[2].length);
vStringNCopyS (name, line + matches[1].start, matches[1].length);
if (data->rindex == ROLE_INDEX_DEFINITION)
initTagEntry (&tag, vStringValue (name), &(RpmSpecKinds[data->kindex]));
else
initRefTagEntry (&tag, vStringValue (name), &(RpmSpecKinds[data->kindex]), data->rindex);
if (signature)
tag.extensionFields.signature = vStringValue (signature);
/* Skip the definition */
while (line && is_line_continued (line))
{
rejecting = true;
line = (const char *)readLineFromInputFile ();
}
rejecting = false;
tag.extensionFields.endLine = getInputLineNumber();
makeTagEntry (&tag);
vStringDelete (name);
if (signature)
vStringDelete (signature);
}
}
static void found_tag_cb (const char *line,
const regexMatch *matches,
unsigned int count,
void *userData)
{
if (count > 0)
{
vString *name = vStringNew ();
vStringNCopyS (name, line + matches[1].start, matches[1].length);
makeSimpleTag (name, RpmSpecKinds, K_TAG);
if (count > 1)
{
if (strcasecmp (vStringValue (name), "name") == 0)
{
vString *package = vStringNew ();
vStringNCopyS (package, line + matches[2].start, matches[2].length);
*((int *)userData) = makeSimpleTag (package, RpmSpecKinds, K_PACKAGE);
vStringDelete (package);
}
}
vStringDelete (name);
}
}
static void found_package_cb (const char *line,
const regexMatch *matches,
unsigned int count,
void *userData)
{
if (count > 0)
{
vString *name = vStringNew ();
tagEntryInfo tag;
vStringNCopyS (name, line + matches[1].start, matches[1].length);
initTagEntry (&tag, vStringValue (name), RpmSpecKinds + K_PACKAGE);
tag.extensionFields.scopeIndex = *(int *)userData;
makeTagEntry (&tag);
vStringDelete (name);
}
}
static void initializeRpmSpecParser (langType language)
{
static int package_index = CORK_NIL;
rejecting = false;
static struct macro_cb_data macro = {K_MACOR, ROLE_INDEX_DEFINITION};
static struct macro_cb_data global = {K_GLOBAL, ROLE_INDEX_DEFINITION};
static struct macro_cb_data undef = {K_MACOR, R_MACRO_UNDEF};
addCallbackRegex (language, "^([A-Za-z_][A-Za-z_0-9()]+)[ \t]*:[ \t]*([^ \t]*)",
"{exclusive}", found_tag_cb, &rejecting, &package_index);
addCallbackRegex (language, "^%define[ \t]+([A-Za-z_][A-Za-z_0-9]+)(\\([^)]+\\))?",
"{exclusive}", found_macro_cb, &rejecting, ¯o);
addCallbackRegex (language, "^%undef[ \t]+([A-Za-z_][A-Za-z_0-9]+)",
"{exclusive}", found_macro_cb, &rejecting, &undef);
addCallbackRegex (language, "^%global[ \t]+([A-Za-z_][A-Za-z_0-9]+)(\\([^)]+\\))?",
"{exclusive}", found_macro_cb, &rejecting, &global);
addCallbackRegex (language, "^%package[ \t]+([A-Za-z_][A-Za-z_0-9-]+)",
"{exclusive}", found_package_cb, &rejecting, &package_index);
}
extern parserDefinition* RpmSpecParser (void)
{
static const char *const extensions [] = { "spec", NULL };
parserDefinition* const def = parserNew ("RpmSpec");
def->kinds = RpmSpecKinds;
def->kindCount = ARRAY_SIZE (RpmSpecKinds);
def->extensions = extensions;
def->initialize = initializeRpmSpecParser;
def->method = METHOD_NOT_CRAFTED|METHOD_REGEX;
def->useCork = true;
def->requestAutomaticFQTag = true;
return def;
}
| 1 | 14,937 | these changes should likely be in the next commit instead | universal-ctags-ctags | c |
@@ -26,9 +26,12 @@ import (
"github.com/stretchr/testify/assert"
)
+type Foobar struct {
+ Foo string
+}
+
// TestFormat ensures the formatter and AddonTransform works as expected.
func TestFormat(t *testing.T) {
- // TODO: Add table formatter tests after implementing table formatter
for _, tc := range []struct {
name string
singleton bool | 1 | // Copyright 2019 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package antctl
import (
"bytes"
"encoding/json"
"io"
"reflect"
"strings"
"testing"
"github.com/spf13/cobra"
"github.com/stretchr/testify/assert"
)
// TestFormat ensures the formatter and AddonTransform works as expected.
func TestFormat(t *testing.T) {
// TODO: Add table formatter tests after implementing table formatter
for _, tc := range []struct {
name string
singleton bool
single bool
transform func(reader io.Reader, single bool) (interface{}, error)
rawResponseData interface{}
responseStruct reflect.Type
expected string
formatter formatterType
}{
{
name: "StructureData-NoTransform-List",
rawResponseData: []struct{ Foo string }{{Foo: "foo"}},
responseStruct: reflect.TypeOf(struct{ Foo string }{}),
expected: "- foo: foo\n",
formatter: yamlFormatter,
},
{
name: "StructureData-NoTransform-Single",
single: true,
rawResponseData: &struct{ Foo string }{Foo: "foo"},
responseStruct: reflect.TypeOf(struct{ Foo string }{}),
expected: "foo: foo\n",
formatter: yamlFormatter,
},
{
name: "StructureData-Transform-Single",
single: true,
transform: func(reader io.Reader, single bool) (i interface{}, err error) {
foo := &struct{ Foo string }{}
err = json.NewDecoder(reader).Decode(foo)
return &struct{ Bar string }{Bar: foo.Foo}, err
},
rawResponseData: &struct{ Foo string }{Foo: "foo"},
responseStruct: reflect.TypeOf(struct{ Bar string }{}),
expected: "bar: foo\n",
formatter: yamlFormatter,
},
} {
t.Run(tc.name, func(t *testing.T) {
opt := &commandDefinition{
SingleObject: tc.singleton,
TransformedResponse: tc.responseStruct,
AddonTransform: tc.transform,
}
var responseData []byte
responseData, err := json.Marshal(tc.rawResponseData)
assert.Nil(t, err)
var outputBuf bytes.Buffer
err = opt.output(bytes.NewBuffer(responseData), &outputBuf, tc.formatter, tc.single)
assert.Nil(t, err)
assert.Equal(t, tc.expected, outputBuf.String())
})
}
}
// TestCommandDefinitionGenerateExample checks example strings are generated as
// expected.
func TestCommandDefinitionGenerateExample(t *testing.T) {
type fooResponse struct {
Bar string
}
type keyFooResponse struct {
Bar string `antctl:"key"`
}
for k, tc := range map[string]struct {
use string
cmdChain string
singleObject bool
expect string
responseType reflect.Type
}{
"SingleObject": {
use: "test",
cmdChain: "first second third",
singleObject: true,
responseType: reflect.TypeOf(fooResponse{}),
expect: " Get the foo\n $ first second third test\n",
},
"NoKeyList": {
use: "test",
cmdChain: "first second third",
responseType: reflect.TypeOf(fooResponse{}),
expect: " Get the list of foo\n $ first second third test\n",
},
"KeyList": {
use: "test",
cmdChain: "first second third",
responseType: reflect.TypeOf(keyFooResponse{}),
expect: " Get a keyfoo\n $ first second third test [bar]\n Get the list of keyfoo\n $ first second third test\n",
},
} {
t.Run(k, func(t *testing.T) {
cmd := new(cobra.Command)
for _, seg := range strings.Split(tc.cmdChain, " ") {
cmd.Use = seg
tmp := new(cobra.Command)
cmd.AddCommand(tmp)
cmd = tmp
}
cmd.Use = tc.use
co := &commandDefinition{
SingleObject: tc.singleObject,
TransformedResponse: tc.responseType,
}
co.applyExampleToCommand(cmd)
assert.Equal(t, tc.expect, cmd.Example)
})
}
}
| 1 | 11,285 | Since you have defined this struct, I would suggest to replace all exist literal structs by this. | antrea-io-antrea | go |
@@ -168,6 +168,7 @@ public class K9 extends Application {
private static boolean mAnimations = true;
private static boolean mConfirmDelete = false;
+ private static boolean mConfirmMenuDiscard = true;
private static boolean mConfirmDeleteStarred = false;
private static boolean mConfirmSpam = false;
private static boolean mConfirmDeleteFromNotification = true; | 1 |
package com.fsck.k9;
import java.io.File;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.SynchronousQueue;
import android.app.Application;
import android.content.ComponentName;
import android.content.Context;
import android.content.Intent;
import android.content.IntentFilter;
import android.content.SharedPreferences;
import android.content.SharedPreferences.Editor;
import android.content.pm.ApplicationInfo;
import android.content.pm.PackageManager;
import android.net.Uri;
import android.os.Debug;
import android.os.Environment;
import android.os.Handler;
import android.os.Looper;
import android.os.StrictMode;
import android.text.format.Time;
import android.util.Log;
import com.fsck.k9.Account.SortType;
import com.fsck.k9.activity.MessageCompose;
import com.fsck.k9.activity.UpgradeDatabases;
import com.fsck.k9.controller.MessagingController;
import com.fsck.k9.controller.MessagingListener;
import com.fsck.k9.mail.Address;
import com.fsck.k9.mail.K9MailLib;
import com.fsck.k9.mail.Message;
import com.fsck.k9.mail.MessagingException;
import com.fsck.k9.mail.internet.BinaryTempFileBody;
import com.fsck.k9.mailstore.LocalStore;
import com.fsck.k9.provider.UnreadWidgetProvider;
import com.fsck.k9.mail.ssl.LocalKeyStore;
import com.fsck.k9.service.BootReceiver;
import com.fsck.k9.service.MailService;
import com.fsck.k9.service.ShutdownReceiver;
import com.fsck.k9.service.StorageGoneReceiver;
public class K9 extends Application {
/**
* Components that are interested in knowing when the K9 instance is
* available and ready (Android invokes Application.onCreate() after other
* components') should implement this interface and register using
* {@link K9#registerApplicationAware(ApplicationAware)}.
*/
public static interface ApplicationAware {
/**
* Called when the Application instance is available and ready.
*
* @param application
* The application instance. Never <code>null</code>.
* @throws Exception
*/
void initializeComponent(Application application);
}
public static Application app = null;
public static File tempDirectory;
public static final String LOG_TAG = "k9";
/**
* Name of the {@link SharedPreferences} file used to store the last known version of the
* accounts' databases.
*
* <p>
* See {@link UpgradeDatabases} for a detailed explanation of the database upgrade process.
* </p>
*/
private static final String DATABASE_VERSION_CACHE = "database_version_cache";
/**
* Key used to store the last known database version of the accounts' databases.
*
* @see #DATABASE_VERSION_CACHE
*/
private static final String KEY_LAST_ACCOUNT_DATABASE_VERSION = "last_account_database_version";
/**
* Components that are interested in knowing when the K9 instance is
* available and ready.
*
* @see ApplicationAware
*/
private static final List<ApplicationAware> observers = new ArrayList<ApplicationAware>();
/**
* This will be {@code true} once the initialization is complete and {@link #notifyObservers()}
* was called.
* Afterwards calls to {@link #registerApplicationAware(com.fsck.k9.K9.ApplicationAware)} will
* immediately call {@link com.fsck.k9.K9.ApplicationAware#initializeComponent(K9)} for the
* supplied argument.
*/
private static boolean sInitialized = false;
public enum BACKGROUND_OPS {
ALWAYS, NEVER, WHEN_CHECKED_AUTO_SYNC
}
private static String language = "";
private static Theme theme = Theme.LIGHT;
private static Theme messageViewTheme = Theme.USE_GLOBAL;
private static Theme composerTheme = Theme.USE_GLOBAL;
private static boolean useFixedMessageTheme = true;
private static final FontSizes fontSizes = new FontSizes();
private static BACKGROUND_OPS backgroundOps = BACKGROUND_OPS.WHEN_CHECKED_AUTO_SYNC;
/**
* Some log messages can be sent to a file, so that the logs
* can be read using unprivileged access (eg. Terminal Emulator)
* on the phone, without adb. Set to null to disable
*/
public static final String logFile = null;
//public static final String logFile = Environment.getExternalStorageDirectory() + "/k9mail/debug.log";
/**
* If this is enabled, various development settings will be enabled
* It should NEVER be on for Market builds
* Right now, it just governs strictmode
**/
public static boolean DEVELOPER_MODE = BuildConfig.DEVELOPER_MODE;
/**
* If this is enabled there will be additional logging information sent to
* Log.d, including protocol dumps.
* Controlled by Preferences at run-time
*/
public static boolean DEBUG = false;
/**
* If this is enabled than logging that normally hides sensitive information
* like passwords will show that information.
*/
public static boolean DEBUG_SENSITIVE = false;
/**
* Can create messages containing stack traces that can be forwarded
* to the development team.
*
* Feature is enabled when DEBUG == true
*/
public static final String ERROR_FOLDER_NAME = "K9mail-errors";
/**
* A reference to the {@link SharedPreferences} used for caching the last known database
* version.
*
* @see #checkCachedDatabaseVersion()
* @see #setDatabasesUpToDate(boolean)
*/
private static SharedPreferences sDatabaseVersionCache;
/**
* {@code true} if this is a debuggable build.
*/
private static boolean sIsDebuggable;
private static boolean mAnimations = true;
private static boolean mConfirmDelete = false;
private static boolean mConfirmDeleteStarred = false;
private static boolean mConfirmSpam = false;
private static boolean mConfirmDeleteFromNotification = true;
private static NotificationHideSubject sNotificationHideSubject = NotificationHideSubject.NEVER;
/**
* Controls when to hide the subject in the notification area.
*/
public enum NotificationHideSubject {
ALWAYS,
WHEN_LOCKED,
NEVER
}
private static NotificationQuickDelete sNotificationQuickDelete = NotificationQuickDelete.NEVER;
/**
* Controls behaviour of delete button in notifications.
*/
public enum NotificationQuickDelete {
ALWAYS,
FOR_SINGLE_MSG,
NEVER
}
private static LockScreenNotificationVisibility sLockScreenNotificationVisibility =
LockScreenNotificationVisibility.MESSAGE_COUNT;
public enum LockScreenNotificationVisibility {
EVERYTHING,
SENDERS,
MESSAGE_COUNT,
APP_NAME,
NOTHING
}
/**
* Controls when to use the message list split view.
*/
public enum SplitViewMode {
ALWAYS,
NEVER,
WHEN_IN_LANDSCAPE
}
private static boolean mMessageListCheckboxes = true;
private static boolean mMessageListStars = true;
private static int mMessageListPreviewLines = 2;
private static boolean mShowCorrespondentNames = true;
private static boolean mMessageListSenderAboveSubject = false;
private static boolean mShowContactName = false;
private static boolean mChangeContactNameColor = false;
private static int mContactNameColor = 0xff00008f;
private static boolean sShowContactPicture = true;
private static boolean mMessageViewFixedWidthFont = false;
private static boolean mMessageViewReturnToList = false;
private static boolean mMessageViewShowNext = false;
private static boolean mGesturesEnabled = true;
private static boolean mUseVolumeKeysForNavigation = false;
private static boolean mUseVolumeKeysForListNavigation = false;
private static boolean mStartIntegratedInbox = false;
private static boolean mMeasureAccounts = true;
private static boolean mCountSearchMessages = true;
private static boolean mHideSpecialAccounts = false;
private static boolean mAutofitWidth;
private static boolean mQuietTimeEnabled = false;
private static String mQuietTimeStarts = null;
private static String mQuietTimeEnds = null;
private static String mAttachmentDefaultPath = "";
private static boolean mWrapFolderNames = false;
private static boolean mHideUserAgent = false;
private static boolean mHideTimeZone = false;
private static SortType mSortType;
private static Map<SortType, Boolean> mSortAscending = new HashMap<SortType, Boolean>();
private static boolean sUseBackgroundAsUnreadIndicator = true;
private static boolean sThreadedViewEnabled = true;
private static SplitViewMode sSplitViewMode = SplitViewMode.NEVER;
private static boolean sColorizeMissingContactPictures = true;
private static boolean sMessageViewArchiveActionVisible = false;
private static boolean sMessageViewDeleteActionVisible = true;
private static boolean sMessageViewMoveActionVisible = false;
private static boolean sMessageViewCopyActionVisible = false;
private static boolean sMessageViewSpamActionVisible = false;
/**
* @see #areDatabasesUpToDate()
*/
private static boolean sDatabasesUpToDate = false;
/**
* For use when displaying that no folder is selected
*/
public static final String FOLDER_NONE = "-NONE-";
public static final String LOCAL_UID_PREFIX = "K9LOCAL:";
public static final String REMOTE_UID_PREFIX = "K9REMOTE:";
public static final String IDENTITY_HEADER = K9MailLib.IDENTITY_HEADER;
/**
* Specifies how many messages will be shown in a folder by default. This number is set
* on each new folder and can be incremented with "Load more messages..." by the
* VISIBLE_LIMIT_INCREMENT
*/
public static final int DEFAULT_VISIBLE_LIMIT = 25;
/**
* The maximum size of an attachment we're willing to download (either View or Save)
* Attachments that are base64 encoded (most) will be about 1.375x their actual size
* so we should probably factor that in. A 5MB attachment will generally be around
* 6.8MB downloaded but only 5MB saved.
*/
public static final int MAX_ATTACHMENT_DOWNLOAD_SIZE = (128 * 1024 * 1024);
/* How many times should K-9 try to deliver a message before giving up
* until the app is killed and restarted
*/
public static final int MAX_SEND_ATTEMPTS = 5;
/**
* Max time (in millis) the wake lock will be held for when background sync is happening
*/
public static final int WAKE_LOCK_TIMEOUT = 600000;
public static final int MANUAL_WAKE_LOCK_TIMEOUT = 120000;
public static final int PUSH_WAKE_LOCK_TIMEOUT = K9MailLib.PUSH_WAKE_LOCK_TIMEOUT;
public static final int MAIL_SERVICE_WAKE_LOCK_TIMEOUT = 60000;
public static final int BOOT_RECEIVER_WAKE_LOCK_TIMEOUT = 60000;
/**
* Time the LED is on/off when blinking on new email notification
*/
public static final int NOTIFICATION_LED_ON_TIME = 500;
public static final int NOTIFICATION_LED_OFF_TIME = 2000;
public static final boolean NOTIFICATION_LED_WHILE_SYNCING = false;
public static final int NOTIFICATION_LED_FAST_ON_TIME = 100;
public static final int NOTIFICATION_LED_FAST_OFF_TIME = 100;
public static final int NOTIFICATION_LED_BLINK_SLOW = 0;
public static final int NOTIFICATION_LED_BLINK_FAST = 1;
public static final int NOTIFICATION_LED_FAILURE_COLOR = 0xffff0000;
// Must not conflict with an account number
public static final int FETCHING_EMAIL_NOTIFICATION = -5000;
public static final int SEND_FAILED_NOTIFICATION = -1500;
public static final int CERTIFICATE_EXCEPTION_NOTIFICATION_INCOMING = -2000;
public static final int CERTIFICATE_EXCEPTION_NOTIFICATION_OUTGOING = -2500;
public static final int CONNECTIVITY_ID = -3;
public static class Intents {
public static class EmailReceived {
public static final String ACTION_EMAIL_RECEIVED = BuildConfig.APPLICATION_ID + ".intent.action.EMAIL_RECEIVED";
public static final String ACTION_EMAIL_DELETED = BuildConfig.APPLICATION_ID + ".intent.action.EMAIL_DELETED";
public static final String ACTION_REFRESH_OBSERVER = BuildConfig.APPLICATION_ID + ".intent.action.REFRESH_OBSERVER";
public static final String EXTRA_ACCOUNT = BuildConfig.APPLICATION_ID + ".intent.extra.ACCOUNT";
public static final String EXTRA_FOLDER = BuildConfig.APPLICATION_ID + ".intent.extra.FOLDER";
public static final String EXTRA_SENT_DATE = BuildConfig.APPLICATION_ID + ".intent.extra.SENT_DATE";
public static final String EXTRA_FROM = BuildConfig.APPLICATION_ID + ".intent.extra.FROM";
public static final String EXTRA_TO = BuildConfig.APPLICATION_ID + ".intent.extra.TO";
public static final String EXTRA_CC = BuildConfig.APPLICATION_ID + ".intent.extra.CC";
public static final String EXTRA_BCC = BuildConfig.APPLICATION_ID + ".intent.extra.BCC";
public static final String EXTRA_SUBJECT = BuildConfig.APPLICATION_ID + ".intent.extra.SUBJECT";
public static final String EXTRA_FROM_SELF = BuildConfig.APPLICATION_ID + ".intent.extra.FROM_SELF";
}
public static class Share {
/*
* We don't want to use EmailReceived.EXTRA_FROM ("com.fsck.k9.intent.extra.FROM")
* because of different semantics (String array vs. string with comma separated
* email addresses)
*/
public static final String EXTRA_FROM = BuildConfig.APPLICATION_ID + ".intent.extra.SENDER";
}
}
/**
* Called throughout the application when the number of accounts has changed. This method
* enables or disables the Compose activity, the boot receiver and the service based on
* whether any accounts are configured.
*/
public static void setServicesEnabled(Context context) {
int acctLength = Preferences.getPreferences(context).getAvailableAccounts().size();
setServicesEnabled(context, acctLength > 0, null);
}
private static void setServicesEnabled(Context context, boolean enabled, Integer wakeLockId) {
PackageManager pm = context.getPackageManager();
if (!enabled && pm.getComponentEnabledSetting(new ComponentName(context, MailService.class)) ==
PackageManager.COMPONENT_ENABLED_STATE_ENABLED) {
/*
* If no accounts now exist but the service is still enabled we're about to disable it
* so we'll reschedule to kill off any existing alarms.
*/
MailService.actionReset(context, wakeLockId);
}
Class<?>[] classes = { MessageCompose.class, BootReceiver.class, MailService.class };
for (Class<?> clazz : classes) {
boolean alreadyEnabled = pm.getComponentEnabledSetting(new ComponentName(context, clazz)) ==
PackageManager.COMPONENT_ENABLED_STATE_ENABLED;
if (enabled != alreadyEnabled) {
pm.setComponentEnabledSetting(
new ComponentName(context, clazz),
enabled ? PackageManager.COMPONENT_ENABLED_STATE_ENABLED :
PackageManager.COMPONENT_ENABLED_STATE_DISABLED,
PackageManager.DONT_KILL_APP);
}
}
if (enabled && pm.getComponentEnabledSetting(new ComponentName(context, MailService.class)) ==
PackageManager.COMPONENT_ENABLED_STATE_ENABLED) {
/*
* And now if accounts do exist then we've just enabled the service and we want to
* schedule alarms for the new accounts.
*/
MailService.actionReset(context, wakeLockId);
}
}
/**
* Register BroadcastReceivers programmaticaly because doing it from manifest
* would make K-9 auto-start. We don't want auto-start because the initialization
* sequence isn't safe while some events occur (SD card unmount).
*/
protected void registerReceivers() {
final StorageGoneReceiver receiver = new StorageGoneReceiver();
final IntentFilter filter = new IntentFilter();
filter.addAction(Intent.ACTION_MEDIA_EJECT);
filter.addAction(Intent.ACTION_MEDIA_UNMOUNTED);
filter.addDataScheme("file");
final BlockingQueue<Handler> queue = new SynchronousQueue<Handler>();
// starting a new thread to handle unmount events
new Thread(new Runnable() {
@Override
public void run() {
Looper.prepare();
try {
queue.put(new Handler());
} catch (InterruptedException e) {
Log.e(K9.LOG_TAG, "", e);
}
Looper.loop();
}
}, "Unmount-thread").start();
try {
final Handler storageGoneHandler = queue.take();
registerReceiver(receiver, filter, null, storageGoneHandler);
Log.i(K9.LOG_TAG, "Registered: unmount receiver");
} catch (InterruptedException e) {
Log.e(K9.LOG_TAG, "Unable to register unmount receiver", e);
}
registerReceiver(new ShutdownReceiver(), new IntentFilter(Intent.ACTION_SHUTDOWN));
Log.i(K9.LOG_TAG, "Registered: shutdown receiver");
}
/**
* Save settings from our statics into the app database.
* <p/>
* If you're adding a preference here, odds are you'll need to add it to
* {@link com.fsck.k9.preferences.GlobalSettings}, too.
*
* @param editor Preferences to save into
*/
public static void save(SharedPreferences.Editor editor) {
editor.putBoolean("enableDebugLogging", K9.DEBUG);
editor.putBoolean("enableSensitiveLogging", K9.DEBUG_SENSITIVE);
editor.putString("backgroundOperations", K9.backgroundOps.name());
editor.putBoolean("animations", mAnimations);
editor.putBoolean("gesturesEnabled", mGesturesEnabled);
editor.putBoolean("useVolumeKeysForNavigation", mUseVolumeKeysForNavigation);
editor.putBoolean("useVolumeKeysForListNavigation", mUseVolumeKeysForListNavigation);
editor.putBoolean("autofitWidth", mAutofitWidth);
editor.putBoolean("quietTimeEnabled", mQuietTimeEnabled);
editor.putString("quietTimeStarts", mQuietTimeStarts);
editor.putString("quietTimeEnds", mQuietTimeEnds);
editor.putBoolean("startIntegratedInbox", mStartIntegratedInbox);
editor.putBoolean("measureAccounts", mMeasureAccounts);
editor.putBoolean("countSearchMessages", mCountSearchMessages);
editor.putBoolean("messageListSenderAboveSubject", mMessageListSenderAboveSubject);
editor.putBoolean("hideSpecialAccounts", mHideSpecialAccounts);
editor.putBoolean("messageListStars", mMessageListStars);
editor.putInt("messageListPreviewLines", mMessageListPreviewLines);
editor.putBoolean("messageListCheckboxes", mMessageListCheckboxes);
editor.putBoolean("showCorrespondentNames", mShowCorrespondentNames);
editor.putBoolean("showContactName", mShowContactName);
editor.putBoolean("showContactPicture", sShowContactPicture);
editor.putBoolean("changeRegisteredNameColor", mChangeContactNameColor);
editor.putInt("registeredNameColor", mContactNameColor);
editor.putBoolean("messageViewFixedWidthFont", mMessageViewFixedWidthFont);
editor.putBoolean("messageViewReturnToList", mMessageViewReturnToList);
editor.putBoolean("messageViewShowNext", mMessageViewShowNext);
editor.putBoolean("wrapFolderNames", mWrapFolderNames);
editor.putBoolean("hideUserAgent", mHideUserAgent);
editor.putBoolean("hideTimeZone", mHideTimeZone);
editor.putString("language", language);
editor.putInt("theme", theme.ordinal());
editor.putInt("messageViewTheme", messageViewTheme.ordinal());
editor.putInt("messageComposeTheme", composerTheme.ordinal());
editor.putBoolean("fixedMessageViewTheme", useFixedMessageTheme);
editor.putBoolean("confirmDelete", mConfirmDelete);
editor.putBoolean("confirmDeleteStarred", mConfirmDeleteStarred);
editor.putBoolean("confirmSpam", mConfirmSpam);
editor.putBoolean("confirmDeleteFromNotification", mConfirmDeleteFromNotification);
editor.putString("sortTypeEnum", mSortType.name());
editor.putBoolean("sortAscending", mSortAscending.get(mSortType));
editor.putString("notificationHideSubject", sNotificationHideSubject.toString());
editor.putString("notificationQuickDelete", sNotificationQuickDelete.toString());
editor.putString("lockScreenNotificationVisibility", sLockScreenNotificationVisibility.toString());
editor.putString("attachmentdefaultpath", mAttachmentDefaultPath);
editor.putBoolean("useBackgroundAsUnreadIndicator", sUseBackgroundAsUnreadIndicator);
editor.putBoolean("threadedView", sThreadedViewEnabled);
editor.putString("splitViewMode", sSplitViewMode.name());
editor.putBoolean("colorizeMissingContactPictures", sColorizeMissingContactPictures);
editor.putBoolean("messageViewArchiveActionVisible", sMessageViewArchiveActionVisible);
editor.putBoolean("messageViewDeleteActionVisible", sMessageViewDeleteActionVisible);
editor.putBoolean("messageViewMoveActionVisible", sMessageViewMoveActionVisible);
editor.putBoolean("messageViewCopyActionVisible", sMessageViewCopyActionVisible);
editor.putBoolean("messageViewSpamActionVisible", sMessageViewSpamActionVisible);
fontSizes.save(editor);
}
@Override
public void onCreate() {
if (K9.DEVELOPER_MODE) {
StrictMode.enableDefaults();
}
PRNGFixes.apply();
super.onCreate();
app = this;
sIsDebuggable = ((getApplicationInfo().flags & ApplicationInfo.FLAG_DEBUGGABLE) != 0);
K9MailLib.setDebugStatus(new K9MailLib.DebugStatus() {
@Override public boolean enabled() {
return DEBUG;
}
@Override public boolean debugSensitive() {
return DEBUG_SENSITIVE;
}
});
checkCachedDatabaseVersion();
Preferences prefs = Preferences.getPreferences(this);
loadPrefs(prefs);
/*
* We have to give MimeMessage a temp directory because File.createTempFile(String, String)
* doesn't work in Android and MimeMessage does not have access to a Context.
*/
BinaryTempFileBody.setTempDirectory(getCacheDir());
LocalKeyStore.setKeyStoreLocation(getDir("KeyStore", MODE_PRIVATE).toString());
/*
* Enable background sync of messages
*/
setServicesEnabled(this);
registerReceivers();
MessagingController.getInstance(this).addListener(new MessagingListener() {
private void broadcastIntent(String action, Account account, String folder, Message message) {
try {
Uri uri = Uri.parse("email://messages/" + account.getAccountNumber() + "/" + Uri.encode(folder) + "/" + Uri.encode(message.getUid()));
Intent intent = new Intent(action, uri);
intent.putExtra(K9.Intents.EmailReceived.EXTRA_ACCOUNT, account.getDescription());
intent.putExtra(K9.Intents.EmailReceived.EXTRA_FOLDER, folder);
intent.putExtra(K9.Intents.EmailReceived.EXTRA_SENT_DATE, message.getSentDate());
intent.putExtra(K9.Intents.EmailReceived.EXTRA_FROM, Address.toString(message.getFrom()));
intent.putExtra(K9.Intents.EmailReceived.EXTRA_TO, Address.toString(message.getRecipients(Message.RecipientType.TO)));
intent.putExtra(K9.Intents.EmailReceived.EXTRA_CC, Address.toString(message.getRecipients(Message.RecipientType.CC)));
intent.putExtra(K9.Intents.EmailReceived.EXTRA_BCC, Address.toString(message.getRecipients(Message.RecipientType.BCC)));
intent.putExtra(K9.Intents.EmailReceived.EXTRA_SUBJECT, message.getSubject());
intent.putExtra(K9.Intents.EmailReceived.EXTRA_FROM_SELF, account.isAnIdentity(message.getFrom()));
K9.this.sendBroadcast(intent);
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "Broadcasted: action=" + action
+ " account=" + account.getDescription()
+ " folder=" + folder
+ " message uid=" + message.getUid()
);
} catch (MessagingException e) {
Log.w(K9.LOG_TAG, "Error: action=" + action
+ " account=" + account.getDescription()
+ " folder=" + folder
+ " message uid=" + message.getUid()
);
}
}
private void updateUnreadWidget() {
try {
UnreadWidgetProvider.updateUnreadCount(K9.this);
} catch (Exception e) {
if (K9.DEBUG) {
Log.e(LOG_TAG, "Error while updating unread widget(s)", e);
}
}
}
@Override
public void synchronizeMailboxRemovedMessage(Account account, String folder, Message message) {
broadcastIntent(K9.Intents.EmailReceived.ACTION_EMAIL_DELETED, account, folder, message);
updateUnreadWidget();
}
@Override
public void messageDeleted(Account account, String folder, Message message) {
broadcastIntent(K9.Intents.EmailReceived.ACTION_EMAIL_DELETED, account, folder, message);
updateUnreadWidget();
}
@Override
public void synchronizeMailboxNewMessage(Account account, String folder, Message message) {
broadcastIntent(K9.Intents.EmailReceived.ACTION_EMAIL_RECEIVED, account, folder, message);
updateUnreadWidget();
}
@Override
public void folderStatusChanged(Account account, String folderName,
int unreadMessageCount) {
updateUnreadWidget();
// let observers know a change occurred
Intent intent = new Intent(K9.Intents.EmailReceived.ACTION_REFRESH_OBSERVER, null);
intent.putExtra(K9.Intents.EmailReceived.EXTRA_ACCOUNT, account.getDescription());
intent.putExtra(K9.Intents.EmailReceived.EXTRA_FOLDER, folderName);
K9.this.sendBroadcast(intent);
}
});
notifyObservers();
}
/**
* Loads the last known database version of the accounts' databases from a
* {@link SharedPreference}.
*
* <p>
* If the stored version matches {@link LocalStore#DB_VERSION} we know that the databases are
* up to date.<br>
* Using {@code SharedPreferences} should be a lot faster than opening all SQLite databases to
* get the current database version.
* </p><p>
* See {@link UpgradeDatabases} for a detailed explanation of the database upgrade process.
* </p>
*
* @see #areDatabasesUpToDate()
*/
public void checkCachedDatabaseVersion() {
sDatabaseVersionCache = getSharedPreferences(DATABASE_VERSION_CACHE, MODE_PRIVATE);
int cachedVersion = sDatabaseVersionCache.getInt(KEY_LAST_ACCOUNT_DATABASE_VERSION, 0);
if (cachedVersion >= LocalStore.DB_VERSION) {
K9.setDatabasesUpToDate(false);
}
}
/**
* Load preferences into our statics.
*
* If you're adding a preference here, odds are you'll need to add it to
* {@link com.fsck.k9.preferences.GlobalSettings}, too.
*
* @param prefs Preferences to load
*/
public static void loadPrefs(Preferences prefs) {
SharedPreferences sprefs = prefs.getPreferences();
DEBUG = sprefs.getBoolean("enableDebugLogging", false);
if (!DEBUG && sIsDebuggable && Debug.isDebuggerConnected()) {
// If the debugger is attached, we're probably (surprise surprise) debugging something.
DEBUG = true;
Log.i(K9.LOG_TAG, "Debugger attached; enabling debug logging.");
}
DEBUG_SENSITIVE = sprefs.getBoolean("enableSensitiveLogging", false);
mAnimations = sprefs.getBoolean("animations", true);
mGesturesEnabled = sprefs.getBoolean("gesturesEnabled", false);
mUseVolumeKeysForNavigation = sprefs.getBoolean("useVolumeKeysForNavigation", false);
mUseVolumeKeysForListNavigation = sprefs.getBoolean("useVolumeKeysForListNavigation", false);
mStartIntegratedInbox = sprefs.getBoolean("startIntegratedInbox", false);
mMeasureAccounts = sprefs.getBoolean("measureAccounts", true);
mCountSearchMessages = sprefs.getBoolean("countSearchMessages", true);
mHideSpecialAccounts = sprefs.getBoolean("hideSpecialAccounts", false);
mMessageListSenderAboveSubject = sprefs.getBoolean("messageListSenderAboveSubject", false);
mMessageListCheckboxes = sprefs.getBoolean("messageListCheckboxes", false);
mMessageListStars = sprefs.getBoolean("messageListStars", true);
mMessageListPreviewLines = sprefs.getInt("messageListPreviewLines", 2);
mAutofitWidth = sprefs.getBoolean("autofitWidth", true);
mQuietTimeEnabled = sprefs.getBoolean("quietTimeEnabled", false);
mQuietTimeStarts = sprefs.getString("quietTimeStarts", "21:00");
mQuietTimeEnds = sprefs.getString("quietTimeEnds", "7:00");
mShowCorrespondentNames = sprefs.getBoolean("showCorrespondentNames", true);
mShowContactName = sprefs.getBoolean("showContactName", false);
sShowContactPicture = sprefs.getBoolean("showContactPicture", true);
mChangeContactNameColor = sprefs.getBoolean("changeRegisteredNameColor", false);
mContactNameColor = sprefs.getInt("registeredNameColor", 0xff00008f);
mMessageViewFixedWidthFont = sprefs.getBoolean("messageViewFixedWidthFont", false);
mMessageViewReturnToList = sprefs.getBoolean("messageViewReturnToList", false);
mMessageViewShowNext = sprefs.getBoolean("messageViewShowNext", false);
mWrapFolderNames = sprefs.getBoolean("wrapFolderNames", false);
mHideUserAgent = sprefs.getBoolean("hideUserAgent", false);
mHideTimeZone = sprefs.getBoolean("hideTimeZone", false);
mConfirmDelete = sprefs.getBoolean("confirmDelete", false);
mConfirmDeleteStarred = sprefs.getBoolean("confirmDeleteStarred", false);
mConfirmSpam = sprefs.getBoolean("confirmSpam", false);
mConfirmDeleteFromNotification = sprefs.getBoolean("confirmDeleteFromNotification", true);
try {
String value = sprefs.getString("sortTypeEnum", Account.DEFAULT_SORT_TYPE.name());
mSortType = SortType.valueOf(value);
} catch (Exception e) {
mSortType = Account.DEFAULT_SORT_TYPE;
}
boolean sortAscending = sprefs.getBoolean("sortAscending", Account.DEFAULT_SORT_ASCENDING);
mSortAscending.put(mSortType, sortAscending);
String notificationHideSubject = sprefs.getString("notificationHideSubject", null);
if (notificationHideSubject == null) {
// If the "notificationHideSubject" setting couldn't be found, the app was probably
// updated. Look for the old "keyguardPrivacy" setting and map it to the new enum.
sNotificationHideSubject = (sprefs.getBoolean("keyguardPrivacy", false)) ?
NotificationHideSubject.WHEN_LOCKED : NotificationHideSubject.NEVER;
} else {
sNotificationHideSubject = NotificationHideSubject.valueOf(notificationHideSubject);
}
String notificationQuickDelete = sprefs.getString("notificationQuickDelete", null);
if (notificationQuickDelete != null) {
sNotificationQuickDelete = NotificationQuickDelete.valueOf(notificationQuickDelete);
}
String lockScreenNotificationVisibility = sprefs.getString("lockScreenNotificationVisibility", null);
if(lockScreenNotificationVisibility != null) {
sLockScreenNotificationVisibility = LockScreenNotificationVisibility.valueOf(lockScreenNotificationVisibility);
}
String splitViewMode = sprefs.getString("splitViewMode", null);
if (splitViewMode != null) {
sSplitViewMode = SplitViewMode.valueOf(splitViewMode);
}
mAttachmentDefaultPath = sprefs.getString("attachmentdefaultpath", Environment.getExternalStorageDirectory().toString());
sUseBackgroundAsUnreadIndicator = sprefs.getBoolean("useBackgroundAsUnreadIndicator", true);
sThreadedViewEnabled = sprefs.getBoolean("threadedView", true);
fontSizes.load(sprefs);
try {
setBackgroundOps(BACKGROUND_OPS.valueOf(sprefs.getString(
"backgroundOperations",
BACKGROUND_OPS.WHEN_CHECKED_AUTO_SYNC.name())));
} catch (Exception e) {
setBackgroundOps(BACKGROUND_OPS.WHEN_CHECKED_AUTO_SYNC);
}
sColorizeMissingContactPictures = sprefs.getBoolean("colorizeMissingContactPictures", true);
sMessageViewArchiveActionVisible = sprefs.getBoolean("messageViewArchiveActionVisible", false);
sMessageViewDeleteActionVisible = sprefs.getBoolean("messageViewDeleteActionVisible", true);
sMessageViewMoveActionVisible = sprefs.getBoolean("messageViewMoveActionVisible", false);
sMessageViewCopyActionVisible = sprefs.getBoolean("messageViewCopyActionVisible", false);
sMessageViewSpamActionVisible = sprefs.getBoolean("messageViewSpamActionVisible", false);
K9.setK9Language(sprefs.getString("language", ""));
int themeValue = sprefs.getInt("theme", Theme.LIGHT.ordinal());
// We used to save the resource ID of the theme. So convert that to the new format if
// necessary.
if (themeValue == Theme.DARK.ordinal() || themeValue == android.R.style.Theme) {
K9.setK9Theme(Theme.DARK);
} else {
K9.setK9Theme(Theme.LIGHT);
}
themeValue = sprefs.getInt("messageViewTheme", Theme.USE_GLOBAL.ordinal());
K9.setK9MessageViewThemeSetting(Theme.values()[themeValue]);
themeValue = sprefs.getInt("messageComposeTheme", Theme.USE_GLOBAL.ordinal());
K9.setK9ComposerThemeSetting(Theme.values()[themeValue]);
K9.setUseFixedMessageViewTheme(sprefs.getBoolean("fixedMessageViewTheme", true));
}
/**
* since Android invokes Application.onCreate() only after invoking all
* other components' onCreate(), here is a way to notify interested
* component that the application is available and ready
*/
protected void notifyObservers() {
synchronized (observers) {
for (final ApplicationAware aware : observers) {
if (K9.DEBUG) {
Log.v(K9.LOG_TAG, "Initializing observer: " + aware);
}
try {
aware.initializeComponent(this);
} catch (Exception e) {
Log.w(K9.LOG_TAG, "Failure when notifying " + aware, e);
}
}
sInitialized = true;
observers.clear();
}
}
/**
* Register a component to be notified when the {@link K9} instance is ready.
*
* @param component
* Never <code>null</code>.
*/
public static void registerApplicationAware(final ApplicationAware component) {
synchronized (observers) {
if (sInitialized) {
component.initializeComponent(K9.app);
} else if (!observers.contains(component)) {
observers.add(component);
}
}
}
public static String getK9Language() {
return language;
}
public static void setK9Language(String nlanguage) {
language = nlanguage;
}
/**
* Possible values for the different theme settings.
*
* <p><strong>Important:</strong>
* Do not change the order of the items! The ordinal value (position) is used when saving the
* settings.</p>
*/
public enum Theme {
LIGHT,
DARK,
USE_GLOBAL
}
public static int getK9ThemeResourceId(Theme themeId) {
return (themeId == Theme.LIGHT) ? R.style.Theme_K9_Light : R.style.Theme_K9_Dark;
}
public static int getK9ThemeResourceId() {
return getK9ThemeResourceId(theme);
}
public static Theme getK9MessageViewTheme() {
return messageViewTheme == Theme.USE_GLOBAL ? theme : messageViewTheme;
}
public static Theme getK9MessageViewThemeSetting() {
return messageViewTheme;
}
public static Theme getK9ComposerTheme() {
return composerTheme == Theme.USE_GLOBAL ? theme : composerTheme;
}
public static Theme getK9ComposerThemeSetting() {
return composerTheme;
}
public static Theme getK9Theme() {
return theme;
}
public static void setK9Theme(Theme ntheme) {
if (ntheme != Theme.USE_GLOBAL) {
theme = ntheme;
}
}
public static void setK9MessageViewThemeSetting(Theme nMessageViewTheme) {
messageViewTheme = nMessageViewTheme;
}
public static boolean useFixedMessageViewTheme() {
return useFixedMessageTheme;
}
public static void setK9ComposerThemeSetting(Theme compTheme) {
composerTheme = compTheme;
}
public static void setUseFixedMessageViewTheme(boolean useFixed) {
useFixedMessageTheme = useFixed;
if (!useFixedMessageTheme && messageViewTheme == Theme.USE_GLOBAL) {
messageViewTheme = theme;
}
}
public static BACKGROUND_OPS getBackgroundOps() {
return backgroundOps;
}
public static boolean setBackgroundOps(BACKGROUND_OPS backgroundOps) {
BACKGROUND_OPS oldBackgroundOps = K9.backgroundOps;
K9.backgroundOps = backgroundOps;
return backgroundOps != oldBackgroundOps;
}
public static boolean setBackgroundOps(String nbackgroundOps) {
return setBackgroundOps(BACKGROUND_OPS.valueOf(nbackgroundOps));
}
public static boolean gesturesEnabled() {
return mGesturesEnabled;
}
public static void setGesturesEnabled(boolean gestures) {
mGesturesEnabled = gestures;
}
public static boolean useVolumeKeysForNavigationEnabled() {
return mUseVolumeKeysForNavigation;
}
public static void setUseVolumeKeysForNavigation(boolean volume) {
mUseVolumeKeysForNavigation = volume;
}
public static boolean useVolumeKeysForListNavigationEnabled() {
return mUseVolumeKeysForListNavigation;
}
public static void setUseVolumeKeysForListNavigation(boolean enabled) {
mUseVolumeKeysForListNavigation = enabled;
}
public static boolean autofitWidth() {
return mAutofitWidth;
}
public static void setAutofitWidth(boolean autofitWidth) {
mAutofitWidth = autofitWidth;
}
public static boolean getQuietTimeEnabled() {
return mQuietTimeEnabled;
}
public static void setQuietTimeEnabled(boolean quietTimeEnabled) {
mQuietTimeEnabled = quietTimeEnabled;
}
public static String getQuietTimeStarts() {
return mQuietTimeStarts;
}
public static void setQuietTimeStarts(String quietTimeStarts) {
mQuietTimeStarts = quietTimeStarts;
}
public static String getQuietTimeEnds() {
return mQuietTimeEnds;
}
public static void setQuietTimeEnds(String quietTimeEnds) {
mQuietTimeEnds = quietTimeEnds;
}
public static boolean isQuietTime() {
if (!mQuietTimeEnabled) {
return false;
}
Time time = new Time();
time.setToNow();
Integer startHour = Integer.parseInt(mQuietTimeStarts.split(":")[0]);
Integer startMinute = Integer.parseInt(mQuietTimeStarts.split(":")[1]);
Integer endHour = Integer.parseInt(mQuietTimeEnds.split(":")[0]);
Integer endMinute = Integer.parseInt(mQuietTimeEnds.split(":")[1]);
Integer now = (time.hour * 60) + time.minute;
Integer quietStarts = startHour * 60 + startMinute;
Integer quietEnds = endHour * 60 + endMinute;
// If start and end times are the same, we're never quiet
if (quietStarts.equals(quietEnds)) {
return false;
}
// 21:00 - 05:00 means we want to be quiet if it's after 9 or before 5
if (quietStarts > quietEnds) {
// if it's 22:00 or 03:00 but not 8:00
if (now >= quietStarts || now <= quietEnds) {
return true;
}
}
// 01:00 - 05:00
else {
// if it' 2:00 or 4:00 but not 8:00 or 0:00
if (now >= quietStarts && now <= quietEnds) {
return true;
}
}
return false;
}
public static boolean startIntegratedInbox() {
return mStartIntegratedInbox;
}
public static void setStartIntegratedInbox(boolean startIntegratedInbox) {
mStartIntegratedInbox = startIntegratedInbox;
}
public static boolean showAnimations() {
return mAnimations;
}
public static void setAnimations(boolean animations) {
mAnimations = animations;
}
public static int messageListPreviewLines() {
return mMessageListPreviewLines;
}
public static void setMessageListPreviewLines(int lines) {
mMessageListPreviewLines = lines;
}
public static boolean messageListCheckboxes() {
return mMessageListCheckboxes;
}
public static void setMessageListCheckboxes(boolean checkboxes) {
mMessageListCheckboxes = checkboxes;
}
public static boolean messageListStars() {
return mMessageListStars;
}
public static void setMessageListStars(boolean stars) {
mMessageListStars = stars;
}
public static boolean showCorrespondentNames() {
return mShowCorrespondentNames;
}
public static boolean messageListSenderAboveSubject() {
return mMessageListSenderAboveSubject;
}
public static void setMessageListSenderAboveSubject(boolean sender) {
mMessageListSenderAboveSubject = sender;
}
public static void setShowCorrespondentNames(boolean showCorrespondentNames) {
mShowCorrespondentNames = showCorrespondentNames;
}
public static boolean showContactName() {
return mShowContactName;
}
public static void setShowContactName(boolean showContactName) {
mShowContactName = showContactName;
}
public static boolean changeContactNameColor() {
return mChangeContactNameColor;
}
public static void setChangeContactNameColor(boolean changeContactNameColor) {
mChangeContactNameColor = changeContactNameColor;
}
public static int getContactNameColor() {
return mContactNameColor;
}
public static void setContactNameColor(int contactNameColor) {
mContactNameColor = contactNameColor;
}
public static boolean messageViewFixedWidthFont() {
return mMessageViewFixedWidthFont;
}
public static void setMessageViewFixedWidthFont(boolean fixed) {
mMessageViewFixedWidthFont = fixed;
}
public static boolean messageViewReturnToList() {
return mMessageViewReturnToList;
}
public static void setMessageViewReturnToList(boolean messageViewReturnToList) {
mMessageViewReturnToList = messageViewReturnToList;
}
public static boolean messageViewShowNext() {
return mMessageViewShowNext;
}
public static void setMessageViewShowNext(boolean messageViewShowNext) {
mMessageViewShowNext = messageViewShowNext;
}
public static FontSizes getFontSizes() {
return fontSizes;
}
public static boolean measureAccounts() {
return mMeasureAccounts;
}
public static void setMeasureAccounts(boolean measureAccounts) {
mMeasureAccounts = measureAccounts;
}
public static boolean countSearchMessages() {
return mCountSearchMessages;
}
public static void setCountSearchMessages(boolean countSearchMessages) {
mCountSearchMessages = countSearchMessages;
}
public static boolean isHideSpecialAccounts() {
return mHideSpecialAccounts;
}
public static void setHideSpecialAccounts(boolean hideSpecialAccounts) {
mHideSpecialAccounts = hideSpecialAccounts;
}
public static boolean confirmDelete() {
return mConfirmDelete;
}
public static void setConfirmDelete(final boolean confirm) {
mConfirmDelete = confirm;
}
public static boolean confirmDeleteStarred() {
return mConfirmDeleteStarred;
}
public static void setConfirmDeleteStarred(final boolean confirm) {
mConfirmDeleteStarred = confirm;
}
public static boolean confirmSpam() {
return mConfirmSpam;
}
public static void setConfirmSpam(final boolean confirm) {
mConfirmSpam = confirm;
}
public static boolean confirmDeleteFromNotification() {
return mConfirmDeleteFromNotification;
}
public static void setConfirmDeleteFromNotification(final boolean confirm) {
mConfirmDeleteFromNotification = confirm;
}
public static NotificationHideSubject getNotificationHideSubject() {
return sNotificationHideSubject;
}
public static void setNotificationHideSubject(final NotificationHideSubject mode) {
sNotificationHideSubject = mode;
}
public static NotificationQuickDelete getNotificationQuickDeleteBehaviour() {
return sNotificationQuickDelete;
}
public static void setNotificationQuickDeleteBehaviour(final NotificationQuickDelete mode) {
sNotificationQuickDelete = mode;
}
public static LockScreenNotificationVisibility getLockScreenNotificationVisibility() {
return sLockScreenNotificationVisibility;
}
public static void setLockScreenNotificationVisibility(final LockScreenNotificationVisibility visibility) {
sLockScreenNotificationVisibility = visibility;
}
public static boolean wrapFolderNames() {
return mWrapFolderNames;
}
public static void setWrapFolderNames(final boolean state) {
mWrapFolderNames = state;
}
public static boolean hideUserAgent() {
return mHideUserAgent;
}
public static void setHideUserAgent(final boolean state) {
mHideUserAgent = state;
}
public static boolean hideTimeZone() {
return mHideTimeZone;
}
public static void setHideTimeZone(final boolean state) {
mHideTimeZone = state;
}
public static String getAttachmentDefaultPath() {
return mAttachmentDefaultPath;
}
public static void setAttachmentDefaultPath(String attachmentDefaultPath) {
K9.mAttachmentDefaultPath = attachmentDefaultPath;
}
public static synchronized SortType getSortType() {
return mSortType;
}
public static synchronized void setSortType(SortType sortType) {
mSortType = sortType;
}
public static synchronized boolean isSortAscending(SortType sortType) {
if (mSortAscending.get(sortType) == null) {
mSortAscending.put(sortType, sortType.isDefaultAscending());
}
return mSortAscending.get(sortType);
}
public static synchronized void setSortAscending(SortType sortType, boolean sortAscending) {
mSortAscending.put(sortType, sortAscending);
}
public static synchronized boolean useBackgroundAsUnreadIndicator() {
return sUseBackgroundAsUnreadIndicator;
}
public static synchronized void setUseBackgroundAsUnreadIndicator(boolean enabled) {
sUseBackgroundAsUnreadIndicator = enabled;
}
public static synchronized boolean isThreadedViewEnabled() {
return sThreadedViewEnabled;
}
public static synchronized void setThreadedViewEnabled(boolean enable) {
sThreadedViewEnabled = enable;
}
public static synchronized SplitViewMode getSplitViewMode() {
return sSplitViewMode;
}
public static synchronized void setSplitViewMode(SplitViewMode mode) {
sSplitViewMode = mode;
}
public static boolean showContactPicture() {
return sShowContactPicture;
}
public static void setShowContactPicture(boolean show) {
sShowContactPicture = show;
}
public static boolean isColorizeMissingContactPictures() {
return sColorizeMissingContactPictures;
}
public static void setColorizeMissingContactPictures(boolean enabled) {
sColorizeMissingContactPictures = enabled;
}
public static boolean isMessageViewArchiveActionVisible() {
return sMessageViewArchiveActionVisible;
}
public static void setMessageViewArchiveActionVisible(boolean visible) {
sMessageViewArchiveActionVisible = visible;
}
public static boolean isMessageViewDeleteActionVisible() {
return sMessageViewDeleteActionVisible;
}
public static void setMessageViewDeleteActionVisible(boolean visible) {
sMessageViewDeleteActionVisible = visible;
}
public static boolean isMessageViewMoveActionVisible() {
return sMessageViewMoveActionVisible;
}
public static void setMessageViewMoveActionVisible(boolean visible) {
sMessageViewMoveActionVisible = visible;
}
public static boolean isMessageViewCopyActionVisible() {
return sMessageViewCopyActionVisible;
}
public static void setMessageViewCopyActionVisible(boolean visible) {
sMessageViewCopyActionVisible = visible;
}
public static boolean isMessageViewSpamActionVisible() {
return sMessageViewSpamActionVisible;
}
public static void setMessageViewSpamActionVisible(boolean visible) {
sMessageViewSpamActionVisible = visible;
}
/**
* Check if we already know whether all databases are using the current database schema.
*
* <p>
* This method is only used for optimizations. If it returns {@code true} we can be certain that
* getting a {@link LocalStore} instance won't trigger a schema upgrade.
* </p>
*
* @return {@code true}, if we know that all databases are using the current database schema.
* {@code false}, otherwise.
*/
public static synchronized boolean areDatabasesUpToDate() {
return sDatabasesUpToDate;
}
/**
* Remember that all account databases are using the most recent database schema.
*
* @param save
* Whether or not to write the current database version to the
* {@code SharedPreferences} {@link #DATABASE_VERSION_CACHE}.
*
* @see #areDatabasesUpToDate()
*/
public static synchronized void setDatabasesUpToDate(boolean save) {
sDatabasesUpToDate = true;
if (save) {
Editor editor = sDatabaseVersionCache.edit();
editor.putInt(KEY_LAST_ACCOUNT_DATABASE_VERSION, LocalStore.DB_VERSION);
editor.commit();
}
}
}
| 1 | 13,134 | The field name doesn't really capture what this option does. I think `mConfirmDiscardMessage` would be a better choice. | k9mail-k-9 | java |
@@ -192,6 +192,15 @@ class FileProvider extends BaseProvider
return;
}
+ // is_file will cause a fatal error if binary content is not a string
+ if (!is_string($media->getBinaryContent())) {
+ throw new \RuntimeException(sprintf(
+ 'Invalid data provided for binary content, choose among: string, %s, %s',
+ 'Symfony\Component\HttpFoundation\File\File',
+ 'Symfony\Component\HttpFoundation\Request'
+ ));
+ }
+
// if the binary content is a filename => convert to a valid File
if (!is_file($media->getBinaryContent())) {
throw new \RuntimeException('The file does not exist : '.$media->getBinaryContent()); | 1 | <?php
/*
* This file is part of the Sonata Project package.
*
* (c) Thomas Rabaix <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Sonata\MediaBundle\Provider;
use Gaufrette\Filesystem;
use Sonata\AdminBundle\Form\FormMapper;
use Sonata\CoreBundle\Model\Metadata;
use Sonata\CoreBundle\Validator\ErrorElement;
use Sonata\MediaBundle\CDN\CDNInterface;
use Sonata\MediaBundle\Extra\ApiMediaFile;
use Sonata\MediaBundle\Generator\GeneratorInterface;
use Sonata\MediaBundle\Metadata\MetadataBuilderInterface;
use Sonata\MediaBundle\Model\MediaInterface;
use Sonata\MediaBundle\Thumbnail\ThumbnailInterface;
use Symfony\Component\Form\FormBuilder;
use Symfony\Component\HttpFoundation\BinaryFileResponse;
use Symfony\Component\HttpFoundation\File\File;
use Symfony\Component\HttpFoundation\File\MimeType\ExtensionGuesser;
use Symfony\Component\HttpFoundation\File\UploadedFile;
use Symfony\Component\HttpFoundation\Request;
use Symfony\Component\HttpFoundation\StreamedResponse;
use Symfony\Component\Validator\Constraints\NotBlank;
use Symfony\Component\Validator\Constraints\NotNull;
class FileProvider extends BaseProvider
{
protected $allowedExtensions;
protected $allowedMimeTypes;
protected $metadata;
/**
* @param string $name
* @param Filesystem $filesystem
* @param CDNInterface $cdn
* @param GeneratorInterface $pathGenerator
* @param ThumbnailInterface $thumbnail
* @param array $allowedExtensions
* @param array $allowedMimeTypes
* @param MetadataBuilderInterface $metadata
*/
public function __construct($name, Filesystem $filesystem, CDNInterface $cdn, GeneratorInterface $pathGenerator, ThumbnailInterface $thumbnail, array $allowedExtensions = array(), array $allowedMimeTypes = array(), MetadataBuilderInterface $metadata = null)
{
parent::__construct($name, $filesystem, $cdn, $pathGenerator, $thumbnail);
$this->allowedExtensions = $allowedExtensions;
$this->allowedMimeTypes = $allowedMimeTypes;
$this->metadata = $metadata;
}
/**
* {@inheritdoc}
*/
public function getProviderMetadata()
{
return new Metadata($this->getName(), $this->getName().'.description', false, 'SonataMediaBundle', array('class' => 'fa fa-file-text-o'));
}
/**
* {@inheritdoc}
*/
public function getReferenceImage(MediaInterface $media)
{
return sprintf('%s/%s',
$this->generatePath($media),
$media->getProviderReference()
);
}
/**
* {@inheritdoc}
*/
public function getReferenceFile(MediaInterface $media)
{
return $this->getFilesystem()->get($this->getReferenceImage($media), true);
}
/**
* {@inheritdoc}
*/
public function buildEditForm(FormMapper $formMapper)
{
$formMapper->add('name');
$formMapper->add('enabled', null, array('required' => false));
$formMapper->add('authorName');
$formMapper->add('cdnIsFlushable');
$formMapper->add('description');
$formMapper->add('copyright');
$formMapper->add('binaryContent', 'file', array('required' => false));
}
/**
* {@inheritdoc}
*/
public function buildCreateForm(FormMapper $formMapper)
{
$formMapper->add('binaryContent', 'file', array(
'constraints' => array(
new NotBlank(),
new NotNull(),
),
));
}
/**
* {@inheritdoc}
*/
public function buildMediaType(FormBuilder $formBuilder)
{
if ($formBuilder->getOption('context') == 'api') {
$formBuilder->add('binaryContent', 'file');
$formBuilder->add('contentType');
} else {
$formBuilder->add('binaryContent', 'file', array(
'required' => false,
'label' => 'widget_label_binary_content',
));
}
}
/**
* {@inheritdoc}
*/
public function postPersist(MediaInterface $media)
{
if ($media->getBinaryContent() === null) {
return;
}
$this->setFileContents($media);
$this->generateThumbnails($media);
$media->resetBinaryContent();
}
/**
* {@inheritdoc}
*/
public function postUpdate(MediaInterface $media)
{
if (!$media->getBinaryContent() instanceof \SplFileInfo) {
return;
}
// Delete the current file from the FS
$oldMedia = clone $media;
// if no previous reference is provided, it prevents
// Filesystem from trying to remove a directory
if ($media->getPreviousProviderReference() !== null) {
$oldMedia->setProviderReference($media->getPreviousProviderReference());
$path = $this->getReferenceImage($oldMedia);
if ($this->getFilesystem()->has($path)) {
$this->getFilesystem()->delete($path);
}
}
$this->fixBinaryContent($media);
$this->setFileContents($media);
$this->generateThumbnails($media);
$media->resetBinaryContent();
}
/**
* @param MediaInterface $media
*/
protected function fixBinaryContent(MediaInterface $media)
{
if ($media->getBinaryContent() === null || $media->getBinaryContent() instanceof File) {
return;
}
if ($media->getBinaryContent() instanceof Request) {
$this->generateBinaryFromRequest($media);
$this->updateMetadata($media);
return;
}
// if the binary content is a filename => convert to a valid File
if (!is_file($media->getBinaryContent())) {
throw new \RuntimeException('The file does not exist : '.$media->getBinaryContent());
}
$binaryContent = new File($media->getBinaryContent());
$media->setBinaryContent($binaryContent);
}
/**
* @throws \RuntimeException
*
* @param MediaInterface $media
*/
protected function fixFilename(MediaInterface $media)
{
if ($media->getBinaryContent() instanceof UploadedFile) {
$media->setName($media->getName() ?: $media->getBinaryContent()->getClientOriginalName());
$media->setMetadataValue('filename', $media->getBinaryContent()->getClientOriginalName());
} elseif ($media->getBinaryContent() instanceof File) {
$media->setName($media->getName() ?: $media->getBinaryContent()->getBasename());
$media->setMetadataValue('filename', $media->getBinaryContent()->getBasename());
}
// this is the original name
if (!$media->getName()) {
throw new \RuntimeException('Please define a valid media\'s name');
}
}
/**
* {@inheritdoc}
*/
protected function doTransform(MediaInterface $media)
{
$this->fixBinaryContent($media);
$this->fixFilename($media);
// this is the name used to store the file
if (!$media->getProviderReference() ||
$media->getProviderReference() === MediaInterface::MISSING_BINARY_REFERENCE
) {
$media->setProviderReference($this->generateReferenceName($media));
}
if ($media->getBinaryContent() instanceof File) {
$media->setContentType($media->getBinaryContent()->getMimeType());
$media->setSize($media->getBinaryContent()->getSize());
}
$media->setProviderStatus(MediaInterface::STATUS_OK);
}
/**
* {@inheritdoc}
*/
public function updateMetadata(MediaInterface $media, $force = true)
{
if (!$media->getBinaryContent() instanceof \SplFileInfo) {
// this is now optimized at all!!!
$path = tempnam(sys_get_temp_dir(), 'sonata_update_metadata_');
$fileObject = new \SplFileObject($path, 'w');
$fileObject->fwrite($this->getReferenceFile($media)->getContent());
} else {
$fileObject = $media->getBinaryContent();
}
$media->setSize($fileObject->getSize());
}
/**
* {@inheritdoc}
*/
public function generatePublicUrl(MediaInterface $media, $format)
{
if ($format == 'reference') {
$path = $this->getReferenceImage($media);
} else {
// @todo: fix the asset path
$path = sprintf('sonatamedia/files/%s/file.png', $format);
}
return $this->getCdn()->getPath($path, $media->getCdnIsFlushable());
}
/**
* {@inheritdoc}
*/
public function getHelperProperties(MediaInterface $media, $format, $options = array())
{
return array_merge(array(
'title' => $media->getName(),
'thumbnail' => $this->getReferenceImage($media),
'file' => $this->getReferenceImage($media),
), $options);
}
/**
* {@inheritdoc}
*/
public function generatePrivateUrl(MediaInterface $media, $format)
{
if ($format == 'reference') {
return $this->getReferenceImage($media);
}
return false;
}
/**
* Set the file contents for an image.
*
* @param MediaInterface $media
* @param string $contents path to contents, defaults to MediaInterface BinaryContent
*/
protected function setFileContents(MediaInterface $media, $contents = null)
{
$file = $this->getFilesystem()->get(sprintf('%s/%s', $this->generatePath($media), $media->getProviderReference()), true);
$metadata = $this->metadata ? $this->metadata->get($media, $file->getName()) : array();
if ($contents) {
$file->setContent($contents, $metadata);
return;
}
if ($media->getBinaryContent() instanceof File) {
$file->setContent(file_get_contents($media->getBinaryContent()->getRealPath()), $metadata);
return;
}
}
/**
* @param MediaInterface $media
*
* @return string
*/
protected function generateReferenceName(MediaInterface $media)
{
return $this->generateMediaUniqId($media).'.'.$media->getBinaryContent()->guessExtension();
}
/**
* @param MediaInterface $media
*
* @return string
*/
protected function generateMediaUniqId(MediaInterface $media)
{
return sha1($media->getName().uniqid().rand(11111, 99999));
}
/**
* {@inheritdoc}
*/
public function getDownloadResponse(MediaInterface $media, $format, $mode, array $headers = array())
{
// build the default headers
$headers = array_merge(array(
'Content-Type' => $media->getContentType(),
'Content-Disposition' => sprintf('attachment; filename="%s"', $media->getMetadataValue('filename')),
), $headers);
if (!in_array($mode, array('http', 'X-Sendfile', 'X-Accel-Redirect'))) {
throw new \RuntimeException('Invalid mode provided');
}
if ($mode == 'http') {
if ($format == 'reference') {
$file = $this->getReferenceFile($media);
} else {
$file = $this->getFilesystem()->get($this->generatePrivateUrl($media, $format));
}
return new StreamedResponse(function () use ($file) {
echo $file->getContent();
}, 200, $headers);
}
if (!$this->getFilesystem()->getAdapter() instanceof \Sonata\MediaBundle\Filesystem\Local) {
throw new \RuntimeException('Cannot use X-Sendfile or X-Accel-Redirect with non \Sonata\MediaBundle\Filesystem\Local');
}
$filename = sprintf('%s/%s',
$this->getFilesystem()->getAdapter()->getDirectory(),
$this->generatePrivateUrl($media, $format)
);
return new BinaryFileResponse($filename, 200, $headers);
}
/**
* {@inheritdoc}
*/
public function validate(ErrorElement $errorElement, MediaInterface $media)
{
if (!$media->getBinaryContent() instanceof \SplFileInfo) {
return;
}
if ($media->getBinaryContent() instanceof UploadedFile) {
$fileName = $media->getBinaryContent()->getClientOriginalName();
} elseif ($media->getBinaryContent() instanceof File) {
$fileName = $media->getBinaryContent()->getFilename();
} else {
throw new \RuntimeException(sprintf('Invalid binary content type: %s', get_class($media->getBinaryContent())));
}
if (!in_array(strtolower(pathinfo($fileName, PATHINFO_EXTENSION)), $this->allowedExtensions)) {
$errorElement
->with('binaryContent')
->addViolation('Invalid extensions')
->end();
}
if (!in_array($media->getBinaryContent()->getMimeType(), $this->allowedMimeTypes)) {
$errorElement
->with('binaryContent')
->addViolation('Invalid mime type : %type%', array('%type%' => $media->getBinaryContent()->getMimeType()))
->end();
}
}
/**
* Set media binary content according to request content.
*
* @param MediaInterface $media
*/
protected function generateBinaryFromRequest(MediaInterface $media)
{
if (php_sapi_name() === 'cli') {
throw new \RuntimeException('The current process cannot be executed in cli environment');
}
if (!$media->getContentType()) {
throw new \RuntimeException(
'You must provide the content type value for your media before setting the binary content'
);
}
$request = $media->getBinaryContent();
if (!$request instanceof Request) {
throw new \RuntimeException('Expected Request in binary content');
}
$content = $request->getContent();
// create unique id for media reference
$guesser = ExtensionGuesser::getInstance();
$extension = $guesser->guess($media->getContentType());
if (!$extension) {
throw new \RuntimeException(
sprintf('Unable to guess extension for content type %s', $media->getContentType())
);
}
$handle = tmpfile();
fwrite($handle, $content);
$file = new ApiMediaFile($handle);
$file->setExtension($extension);
$file->setMimetype($media->getContentType());
$media->setBinaryContent($file);
}
}
| 1 | 7,442 | what if `$media->getBinaryContent() == Symfony\Component\HttpFoundation\File\File` does `is_string()` return `true`? ping @greg0ire | sonata-project-SonataMediaBundle | php |
@@ -1065,11 +1065,9 @@ void GenStruct(StructDef &struct_def, std::string *code_ptr) {
}
}
// generate object accessors if is nested_flatbuffer
+ if (field.nested_flatbuffer) {
auto nested = field.attributes.Lookup("nested_flatbuffer");
- if (nested) {
- auto nested_qualified_name =
- parser_.namespaces_.back()->GetFullyQualifiedName(nested->constant);
- auto nested_type = parser_.structs_.Lookup(nested_qualified_name);
+ auto nested_type = nested->type.struct_def;
auto nested_type_name = WrapInNameSpace(*nested_type);
auto nestedMethodName = MakeCamel(field.name, lang_.first_camel_upper)
+ "As" + nested_type_name; | 1 | /*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// independent from idl_parser, since this code is not needed for most clients
#include "flatbuffers/flatbuffers.h"
#include "flatbuffers/idl.h"
#include "flatbuffers/util.h"
#include "flatbuffers/code_generators.h"
#if defined(FLATBUFFERS_CPP98_STL)
#include <cctype>
#endif // defined(FLATBUFFERS_CPP98_STL)
namespace flatbuffers {
// Convert an underscore_based_indentifier in to camelCase.
// Also uppercases the first character if first is true.
std::string MakeCamel(const std::string &in, bool first) {
std::string s;
for (size_t i = 0; i < in.length(); i++) {
if (!i && first)
s += static_cast<char>(toupper(in[0]));
else if (in[i] == '_' && i + 1 < in.length())
s += static_cast<char>(toupper(in[++i]));
else
s += in[i];
}
return s;
}
// These arrays need to correspond to the IDLOptions::k enum.
struct LanguageParameters {
IDLOptions::Language language;
// Whether function names in the language typically start with uppercase.
bool first_camel_upper;
std::string file_extension;
std::string string_type;
std::string bool_type;
std::string open_curly;
std::string accessor_type;
std::string const_decl;
std::string unsubclassable_decl;
std::string enum_decl;
std::string enum_separator;
std::string getter_prefix;
std::string getter_suffix;
std::string inheritance_marker;
std::string namespace_ident;
std::string namespace_begin;
std::string namespace_end;
std::string set_bb_byteorder;
std::string get_bb_position;
std::string get_fbb_offset;
std::string accessor_prefix;
std::string accessor_prefix_static;
std::string optional_suffix;
std::string includes;
CommentConfig comment_config;
};
const LanguageParameters& GetLangParams(IDLOptions::Language lang) {
static LanguageParameters language_parameters[] = {
{
IDLOptions::kJava,
false,
".java",
"String",
"boolean ",
" {\n",
"class ",
" final ",
"final ",
"final class ",
";\n",
"()",
"",
" extends ",
"package ",
";",
"",
"_bb.order(ByteOrder.LITTLE_ENDIAN); ",
"position()",
"offset()",
"",
"",
"",
"import java.nio.*;\nimport java.lang.*;\nimport java.util.*;\n"
"import com.google.flatbuffers.*;\n\n@SuppressWarnings(\"unused\")\n",
{
"/**",
" *",
" */",
},
},
{
IDLOptions::kCSharp,
true,
".cs",
"string",
"bool ",
"\n{\n",
"struct ",
" readonly ",
"",
"enum ",
",\n",
" { get",
"} ",
" : ",
"namespace ",
"\n{",
"\n}\n",
"",
"Position",
"Offset",
"__p.",
"Table.",
"?",
"using global::System;\nusing global::FlatBuffers;\n\n",
{
nullptr,
"///",
nullptr,
},
},
};
if (lang == IDLOptions::kJava) {
return language_parameters[0];
} else {
assert(lang == IDLOptions::kCSharp);
return language_parameters[1];
}
}
namespace general {
class GeneralGenerator : public BaseGenerator {
public:
GeneralGenerator(const Parser &parser, const std::string &path,
const std::string &file_name)
: BaseGenerator(parser, path, file_name, "", "."),
lang_(GetLangParams(parser_.opts.lang)),
cur_name_space_( nullptr ) {
}
GeneralGenerator &operator=(const GeneralGenerator &);
bool generate() {
std::string one_file_code;
cur_name_space_ = parser_.namespaces_.back();
for (auto it = parser_.enums_.vec.begin(); it != parser_.enums_.vec.end();
++it) {
std::string enumcode;
auto &enum_def = **it;
if (!parser_.opts.one_file)
cur_name_space_ = enum_def.defined_namespace;
GenEnum(enum_def, &enumcode);
if (parser_.opts.one_file) {
one_file_code += enumcode;
} else {
if (!SaveType(enum_def.name, *enum_def.defined_namespace,
enumcode, false)) return false;
}
}
for (auto it = parser_.structs_.vec.begin();
it != parser_.structs_.vec.end(); ++it) {
std::string declcode;
auto &struct_def = **it;
if (!parser_.opts.one_file)
cur_name_space_ = struct_def.defined_namespace;
GenStruct(struct_def, &declcode);
if (parser_.opts.one_file) {
one_file_code += declcode;
} else {
if (!SaveType(struct_def.name, *struct_def.defined_namespace,
declcode, true)) return false;
}
}
if (parser_.opts.one_file) {
return SaveType(file_name_, *parser_.namespaces_.back(),
one_file_code, true);
}
return true;
}
// Save out the generated code for a single class while adding
// declaration boilerplate.
bool SaveType(const std::string &defname, const Namespace &ns,
const std::string &classcode, bool needs_includes) {
if (!classcode.length()) return true;
std::string code;
if (lang_.language == IDLOptions::kCSharp) {
code = "// <auto-generated>\n"
"// " + std::string(FlatBuffersGeneratedWarning()) + "\n"
"// </auto-generated>\n\n";
} else {
code = "// " + std::string(FlatBuffersGeneratedWarning()) + "\n\n";
}
std::string namespace_name = FullNamespace(".", ns);
if (!namespace_name.empty()) {
code += lang_.namespace_ident + namespace_name + lang_.namespace_begin;
code += "\n\n";
}
if (needs_includes) code += lang_.includes;
code += classcode;
if (!namespace_name.empty()) code += lang_.namespace_end;
auto filename = NamespaceDir(ns) + defname + lang_.file_extension;
return SaveFile(filename.c_str(), code, false);
}
const Namespace *CurrentNameSpace() const { return cur_name_space_; }
std::string FunctionStart(char upper) {
return std::string() + (lang_.language == IDLOptions::kJava
? static_cast<char>(tolower(upper))
: upper);
}
static bool IsEnum(const Type& type) {
return type.enum_def != nullptr && IsInteger(type.base_type);
}
std::string GenTypeBasic(const Type &type, bool enableLangOverrides) {
static const char *java_typename[] = {
#define FLATBUFFERS_TD(ENUM, IDLTYPE, CTYPE, JTYPE, GTYPE, NTYPE, PTYPE) \
#JTYPE,
FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD)
#undef FLATBUFFERS_TD
};
static const char *csharp_typename[] = {
#define FLATBUFFERS_TD(ENUM, IDLTYPE, CTYPE, JTYPE, GTYPE, NTYPE, PTYPE) \
#NTYPE,
FLATBUFFERS_GEN_TYPES(FLATBUFFERS_TD)
#undef FLATBUFFERS_TD
};
if (enableLangOverrides) {
if (lang_.language == IDLOptions::kCSharp) {
if (IsEnum(type)) return WrapInNameSpace(*type.enum_def);
if (type.base_type == BASE_TYPE_STRUCT) {
return "Offset<" + WrapInNameSpace(*type.struct_def) + ">";
}
}
}
if (lang_.language == IDLOptions::kJava) {
return java_typename[type.base_type];
} else {
assert(lang_.language == IDLOptions::kCSharp);
return csharp_typename[type.base_type];
}
}
std::string GenTypeBasic(const Type &type) {
return GenTypeBasic(type, true);
}
std::string GenTypePointer(const Type &type) {
switch (type.base_type) {
case BASE_TYPE_STRING:
return lang_.string_type;
case BASE_TYPE_VECTOR:
return GenTypeGet(type.VectorType());
case BASE_TYPE_STRUCT:
return WrapInNameSpace(*type.struct_def);
case BASE_TYPE_UNION:
// Unions in C# use a generic Table-derived type for better type safety
if (lang_.language == IDLOptions::kCSharp) return "TTable";
// fall through
default:
return "Table";
}
}
std::string GenTypeGet(const Type &type) {
return IsScalar(type.base_type)
? GenTypeBasic(type)
: GenTypePointer(type);
}
// Find the destination type the user wants to receive the value in (e.g.
// one size higher signed types for unsigned serialized values in Java).
Type DestinationType(const Type &type, bool vectorelem) {
if (lang_.language != IDLOptions::kJava) return type;
switch (type.base_type) {
// We use int for both uchar/ushort, since that generally means less casting
// than using short for uchar.
case BASE_TYPE_UCHAR: return Type(BASE_TYPE_INT);
case BASE_TYPE_USHORT: return Type(BASE_TYPE_INT);
case BASE_TYPE_UINT: return Type(BASE_TYPE_LONG);
case BASE_TYPE_VECTOR:
if (vectorelem)
return DestinationType(type.VectorType(), vectorelem);
// else fall thru
default: return type;
}
}
std::string GenOffsetType(const StructDef &struct_def) {
if(lang_.language == IDLOptions::kCSharp) {
return "Offset<" + WrapInNameSpace(struct_def) + ">";
} else {
return "int";
}
}
std::string GenOffsetConstruct(const StructDef &struct_def,
const std::string &variable_name)
{
if(lang_.language == IDLOptions::kCSharp) {
return "new Offset<" + WrapInNameSpace(struct_def) + ">(" + variable_name +
")";
}
return variable_name;
}
std::string GenVectorOffsetType() {
if(lang_.language == IDLOptions::kCSharp) {
return "VectorOffset";
} else {
return "int";
}
}
// Generate destination type name
std::string GenTypeNameDest(const Type &type)
{
return GenTypeGet(DestinationType(type, true));
}
// Mask to turn serialized value into destination type value.
std::string DestinationMask(const Type &type, bool vectorelem) {
if (lang_.language != IDLOptions::kJava) return "";
switch (type.base_type) {
case BASE_TYPE_UCHAR: return " & 0xFF";
case BASE_TYPE_USHORT: return " & 0xFFFF";
case BASE_TYPE_UINT: return " & 0xFFFFFFFFL";
case BASE_TYPE_VECTOR:
if (vectorelem)
return DestinationMask(type.VectorType(), vectorelem);
// else fall thru
default: return "";
}
}
// Casts necessary to correctly read serialized data
std::string DestinationCast(const Type &type) {
if (type.base_type == BASE_TYPE_VECTOR) {
return DestinationCast(type.VectorType());
} else {
switch (lang_.language) {
case IDLOptions::kJava:
// Cast necessary to correctly read serialized unsigned values.
if (type.base_type == BASE_TYPE_UINT) return "(long)";
break;
case IDLOptions::kCSharp:
// Cast from raw integral types to enum.
if (IsEnum(type)) return "(" + WrapInNameSpace(*type.enum_def) + ")";
break;
default:
break;
}
}
return "";
}
// Cast statements for mutator method parameters.
// In Java, parameters representing unsigned numbers need to be cast down to
// their respective type. For example, a long holding an unsigned int value
// would be cast down to int before being put onto the buffer. In C#, one cast
// directly cast an Enum to its underlying type, which is essential before
// putting it onto the buffer.
std::string SourceCast(const Type &type, bool castFromDest) {
if (type.base_type == BASE_TYPE_VECTOR) {
return SourceCast(type.VectorType(), castFromDest);
} else {
switch (lang_.language) {
case IDLOptions::kJava:
if (castFromDest) {
if (type.base_type == BASE_TYPE_UINT) return "(int)";
else if (type.base_type == BASE_TYPE_USHORT) return "(short)";
else if (type.base_type == BASE_TYPE_UCHAR) return "(byte)";
}
break;
case IDLOptions::kCSharp:
if (IsEnum(type)) return "(" + GenTypeBasic(type, false) + ")";
break;
default:
break;
}
}
return "";
}
std::string SourceCast(const Type &type) {
return SourceCast(type, true);
}
std::string SourceCastBasic(const Type &type, bool castFromDest) {
return IsScalar(type.base_type) ? SourceCast(type, castFromDest) : "";
}
std::string SourceCastBasic(const Type &type) {
return SourceCastBasic(type, true);
}
std::string GenEnumDefaultValue(const Value &value) {
auto enum_def = value.type.enum_def;
auto vec = enum_def->vals.vec;
auto default_value = StringToInt(value.constant.c_str());
auto result = value.constant;
for (auto it = vec.begin(); it != vec.end(); ++it) {
auto enum_val = **it;
if (enum_val.value == default_value) {
result = WrapInNameSpace(*enum_def) + "." + enum_val.name;
break;
}
}
return result;
}
std::string GenDefaultValue(const Value &value, bool enableLangOverrides) {
if (enableLangOverrides) {
// handles both enum case and vector of enum case
if (lang_.language == IDLOptions::kCSharp &&
value.type.enum_def != nullptr &&
value.type.base_type != BASE_TYPE_UNION) {
return GenEnumDefaultValue(value);
}
}
auto longSuffix = lang_.language == IDLOptions::kJava ? "L" : "";
switch (value.type.base_type) {
case BASE_TYPE_FLOAT: return value.constant + "f";
case BASE_TYPE_BOOL: return value.constant == "0" ? "false" : "true";
case BASE_TYPE_ULONG:
{
if (lang_.language != IDLOptions::kJava)
return value.constant;
// Converts the ulong into its bits signed equivalent
uint64_t defaultValue = StringToUInt(value.constant.c_str());
return NumToString(static_cast<int64_t>(defaultValue)) + longSuffix;
}
case BASE_TYPE_UINT:
case BASE_TYPE_LONG: return value.constant + longSuffix;
default: return value.constant;
}
}
std::string GenDefaultValue(const Value &value) {
return GenDefaultValue(value, true);
}
std::string GenDefaultValueBasic(const Value &value, bool enableLangOverrides) {
if (!IsScalar(value.type.base_type)) {
if (enableLangOverrides) {
if (lang_.language == IDLOptions::kCSharp) {
switch (value.type.base_type) {
case BASE_TYPE_STRING:
return "default(StringOffset)";
case BASE_TYPE_STRUCT:
return "default(Offset<" + WrapInNameSpace(*value.type.struct_def) +
">)";
case BASE_TYPE_VECTOR:
return "default(VectorOffset)";
default:
break;
}
}
}
return "0";
}
return GenDefaultValue(value, enableLangOverrides);
}
std::string GenDefaultValueBasic(const Value &value) {
return GenDefaultValueBasic(value, true);
}
void GenEnum(EnumDef &enum_def, std::string *code_ptr) {
std::string &code = *code_ptr;
if (enum_def.generated) return;
// Generate enum definitions of the form:
// public static (final) int name = value;
// In Java, we use ints rather than the Enum feature, because we want them
// to map directly to how they're used in C/C++ and file formats.
// That, and Java Enums are expensive, and not universally liked.
GenComment(enum_def.doc_comment, code_ptr, &lang_.comment_config);
code += std::string("public ") + lang_.enum_decl + enum_def.name;
if (lang_.language == IDLOptions::kCSharp) {
code += lang_.inheritance_marker +
GenTypeBasic(enum_def.underlying_type, false);
}
code += lang_.open_curly;
if (lang_.language == IDLOptions::kJava) {
code += " private " + enum_def.name + "() { }\n";
}
for (auto it = enum_def.vals.vec.begin();
it != enum_def.vals.vec.end();
++it) {
auto &ev = **it;
GenComment(ev.doc_comment, code_ptr, &lang_.comment_config, " ");
if (lang_.language != IDLOptions::kCSharp) {
code += " public static";
code += lang_.const_decl;
code += GenTypeBasic(enum_def.underlying_type, false);
}
code += " " + ev.name + " = ";
code += NumToString(ev.value);
code += lang_.enum_separator;
}
// Generate a generate string table for enum values.
// We do not do that for C# where this functionality is native.
if (lang_.language != IDLOptions::kCSharp) {
// Problem is, if values are very sparse that could generate really big
// tables. Ideally in that case we generate a map lookup instead, but for
// the moment we simply don't output a table at all.
auto range = enum_def.vals.vec.back()->value -
enum_def.vals.vec.front()->value + 1;
// Average distance between values above which we consider a table
// "too sparse". Change at will.
static const int kMaxSparseness = 5;
if (range / static_cast<int64_t>(enum_def.vals.vec.size()) < kMaxSparseness) {
code += "\n public static";
code += lang_.const_decl;
code += lang_.string_type;
code += "[] names = { ";
auto val = enum_def.vals.vec.front()->value;
for (auto it = enum_def.vals.vec.begin();
it != enum_def.vals.vec.end();
++it) {
while (val++ != (*it)->value) code += "\"\", ";
code += "\"" + (*it)->name + "\", ";
}
code += "};\n\n";
code += " public static ";
code += lang_.string_type;
code += " " + MakeCamel("name", lang_.first_camel_upper);
code += "(int e) { return names[e";
if (enum_def.vals.vec.front()->value)
code += " - " + enum_def.vals.vec.front()->name;
code += "]; }\n";
}
}
// Close the class
code += "}";
// Java does not need the closing semi-colon on class definitions.
code += (lang_.language != IDLOptions::kJava) ? ";" : "";
code += "\n\n";
}
// Returns the function name that is able to read a value of the given type.
std::string GenGetter(const Type &type) {
switch (type.base_type) {
case BASE_TYPE_STRING: return lang_.accessor_prefix + "__string";
case BASE_TYPE_STRUCT: return lang_.accessor_prefix + "__struct";
case BASE_TYPE_UNION: return lang_.accessor_prefix + "__union";
case BASE_TYPE_VECTOR: return GenGetter(type.VectorType());
default: {
std::string getter =
lang_.accessor_prefix + "bb." + FunctionStart('G') + "et";
if (type.base_type == BASE_TYPE_BOOL) {
getter = "0!=" + getter;
} else if (GenTypeBasic(type, false) != "byte") {
getter += MakeCamel(GenTypeBasic(type, false));
}
return getter;
}
}
}
// Returns the function name that is able to read a value of the given type.
std::string GenGetterForLookupByKey(flatbuffers::FieldDef *key_field,
const std::string &data_buffer,
const char *num = nullptr) {
auto type = key_field->value.type;
auto dest_mask = DestinationMask(type, true);
auto dest_cast = DestinationCast(type);
auto getter = data_buffer + "." + FunctionStart('G') + "et";
if (GenTypeBasic(type, false) != "byte") {
getter += MakeCamel(GenTypeBasic(type, false));
}
getter = dest_cast + getter + "(" + GenOffsetGetter(key_field, num) + ")"
+ dest_mask;
return getter;
}
// Direct mutation is only allowed for scalar fields.
// Hence a setter method will only be generated for such fields.
std::string GenSetter(const Type &type) {
if (IsScalar(type.base_type)) {
std::string setter =
lang_.accessor_prefix + "bb." + FunctionStart('P') + "ut";
if (GenTypeBasic(type, false) != "byte" &&
type.base_type != BASE_TYPE_BOOL) {
setter += MakeCamel(GenTypeBasic(type, false));
}
return setter;
} else {
return "";
}
}
// Returns the method name for use with add/put calls.
std::string GenMethod(const Type &type) {
return IsScalar(type.base_type)
? MakeCamel(GenTypeBasic(type, false))
: (IsStruct(type) ? "Struct" : "Offset");
}
// Recursively generate arguments for a constructor, to deal with nested
// structs.
void GenStructArgs(const StructDef &struct_def, std::string *code_ptr,
const char *nameprefix) {
std::string &code = *code_ptr;
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end();
++it) {
auto &field = **it;
if (IsStruct(field.value.type)) {
// Generate arguments for a struct inside a struct. To ensure names
// don't clash, and to make it obvious these arguments are constructing
// a nested struct, prefix the name with the field name.
GenStructArgs(*field.value.type.struct_def, code_ptr,
(nameprefix + (field.name + "_")).c_str());
} else {
code += ", ";
code += GenTypeBasic(DestinationType(field.value.type, false));
code += " ";
code += nameprefix;
code += MakeCamel(field.name, lang_.first_camel_upper);
}
}
}
// Recusively generate struct construction statements of the form:
// builder.putType(name);
// and insert manual padding.
void GenStructBody(const StructDef &struct_def, std::string *code_ptr,
const char *nameprefix) {
std::string &code = *code_ptr;
code += " builder." + FunctionStart('P') + "rep(";
code += NumToString(struct_def.minalign) + ", ";
code += NumToString(struct_def.bytesize) + ");\n";
for (auto it = struct_def.fields.vec.rbegin();
it != struct_def.fields.vec.rend(); ++it) {
auto &field = **it;
if (field.padding) {
code += " builder." + FunctionStart('P') + "ad(";
code += NumToString(field.padding) + ");\n";
}
if (IsStruct(field.value.type)) {
GenStructBody(*field.value.type.struct_def, code_ptr,
(nameprefix + (field.name + "_")).c_str());
} else {
code += " builder." + FunctionStart('P') + "ut";
code += GenMethod(field.value.type) + "(";
code += SourceCast(field.value.type);
auto argname = nameprefix + MakeCamel(field.name, lang_.first_camel_upper);
code += argname;
code += ");\n";
}
}
}
std::string GenByteBufferLength(const char *bb_name) {
std::string bb_len = bb_name;
if (lang_.language == IDLOptions::kCSharp) bb_len += ".Length";
else bb_len += ".capacity()";
return bb_len;
}
std::string GenOffsetGetter(flatbuffers::FieldDef *key_field,
const char *num = nullptr) {
std::string key_offset = "";
key_offset += lang_.accessor_prefix_static + "__offset(" +
NumToString(key_field->value.offset) + ", ";
if (num) {
key_offset += num;
key_offset += (lang_.language == IDLOptions::kCSharp ?
".Value, builder.DataBuffer)" : ", _bb)");
} else {
key_offset += GenByteBufferLength("bb");
key_offset += " - tableOffset, bb)";
}
return key_offset;
}
std::string GenLookupKeyGetter(flatbuffers::FieldDef *key_field) {
std::string key_getter = " ";
key_getter += "int tableOffset = " + lang_.accessor_prefix_static;
key_getter += "__indirect(vectorLocation + 4 * (start + middle)";
key_getter += ", bb);\n ";
if (key_field->value.type.base_type == BASE_TYPE_STRING) {
key_getter += "int comp = " + lang_.accessor_prefix_static;
key_getter += FunctionStart('C') + "ompareStrings(";
key_getter += GenOffsetGetter(key_field);
key_getter += ", byteKey, bb);\n";
} else {
auto get_val = GenGetterForLookupByKey(key_field, "bb");
if (lang_.language == IDLOptions::kCSharp) {
key_getter += "int comp = " + get_val + ".CompareTo(key);\n";
} else {
key_getter += GenTypeNameDest(key_field->value.type) + " val = ";
key_getter += get_val + ";\n";
key_getter += " int comp = val > key ? 1 : val < key ? -1 : 0;\n";
}
}
return key_getter;
}
std::string GenKeyGetter(flatbuffers::FieldDef *key_field) {
std::string key_getter = "";
auto data_buffer = (lang_.language == IDLOptions::kCSharp) ?
"builder.DataBuffer" : "_bb";
if (key_field->value.type.base_type == BASE_TYPE_STRING) {
if (lang_.language == IDLOptions::kJava)
key_getter += " return ";
key_getter += lang_.accessor_prefix_static;
key_getter += FunctionStart('C') + "ompareStrings(";
key_getter += GenOffsetGetter(key_field, "o1") + ", ";
key_getter += GenOffsetGetter(key_field, "o2") + ", " + data_buffer + ")";
if (lang_.language == IDLOptions::kJava)
key_getter += ";";
}
else {
auto field_getter = GenGetterForLookupByKey(key_field, data_buffer, "o1");
if (lang_.language == IDLOptions::kCSharp) {
key_getter += field_getter;
field_getter = GenGetterForLookupByKey(key_field, data_buffer, "o2");
key_getter += ".CompareTo(" + field_getter + ")";
}
else {
key_getter += "\n " + GenTypeNameDest(key_field->value.type) + " val_1 = ";
key_getter += field_getter + ";\n " + GenTypeNameDest(key_field->value.type);
key_getter += " val_2 = ";
field_getter = GenGetterForLookupByKey(key_field, data_buffer, "o2");
key_getter += field_getter + ";\n";
key_getter += " return val_1 > val_2 ? 1 : val_1 < val_2 ? -1 : 0;\n ";
}
}
return key_getter;
}
void GenStruct(StructDef &struct_def, std::string *code_ptr) {
if (struct_def.generated) return;
std::string &code = *code_ptr;
// Generate a struct accessor class, with methods of the form:
// public type name() { return bb.getType(i + offset); }
// or for tables of the form:
// public type name() {
// int o = __offset(offset); return o != 0 ? bb.getType(o + i) : default;
// }
GenComment(struct_def.doc_comment, code_ptr, &lang_.comment_config);
code += "public ";
if (lang_.language == IDLOptions::kCSharp &&
struct_def.attributes.Lookup("csharp_partial")) {
// generate a partial class for this C# struct/table
code += "partial ";
} else {
code += lang_.unsubclassable_decl;
}
code += lang_.accessor_type + struct_def.name;
if (lang_.language == IDLOptions::kCSharp) {
code += " : IFlatbufferObject";
code += lang_.open_curly;
code += " private ";
code += struct_def.fixed ? "Struct" : "Table";
code += " __p;\n";
if (lang_.language == IDLOptions::kCSharp) {
code += " public ByteBuffer ByteBuffer { get { return __p.bb; } }\n";
}
} else {
code += lang_.inheritance_marker;
code += struct_def.fixed ? "Struct" : "Table";
code += lang_.open_curly;
}
if (!struct_def.fixed) {
// Generate a special accessor for the table that when used as the root
// of a FlatBuffer
std::string method_name = FunctionStart('G') + "etRootAs" + struct_def.name;
std::string method_signature = " public static " + struct_def.name + " " +
method_name;
// create convenience method that doesn't require an existing object
code += method_signature + "(ByteBuffer _bb) ";
code += "{ return " + method_name + "(_bb, new " + struct_def.name+ "()); }\n";
// create method that allows object reuse
code += method_signature + "(ByteBuffer _bb, " + struct_def.name + " obj) { ";
code += lang_.set_bb_byteorder;
code += "return (obj.__assign(_bb." + FunctionStart('G') + "etInt(_bb.";
code += lang_.get_bb_position;
code += ") + _bb.";
code += lang_.get_bb_position;
code += ", _bb)); }\n";
if (parser_.root_struct_def_ == &struct_def) {
if (parser_.file_identifier_.length()) {
// Check if a buffer has the identifier.
code += " public static ";
code += lang_.bool_type + struct_def.name;
code += "BufferHasIdentifier(ByteBuffer _bb) { return ";
code += lang_.accessor_prefix_static + "__has_identifier(_bb, \"";
code += parser_.file_identifier_;
code += "\"); }\n";
}
}
}
// Generate the __init method that sets the field in a pre-existing
// accessor object. This is to allow object reuse.
code += " public void __init(int _i, ByteBuffer _bb) ";
code += "{ " + lang_.accessor_prefix + "bb_pos = _i; ";
code += lang_.accessor_prefix + "bb = _bb; }\n";
code += " public " + struct_def.name + " __assign(int _i, ByteBuffer _bb) ";
code += "{ __init(_i, _bb); return this; }\n\n";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end();
++it) {
auto &field = **it;
if (field.deprecated) continue;
GenComment(field.doc_comment, code_ptr, &lang_.comment_config, " ");
std::string type_name = GenTypeGet(field.value.type);
std::string type_name_dest = GenTypeNameDest(field.value.type);
std::string conditional_cast = "";
std::string optional = "";
if (lang_.language == IDLOptions::kCSharp &&
!struct_def.fixed &&
(field.value.type.base_type == BASE_TYPE_STRUCT ||
field.value.type.base_type == BASE_TYPE_UNION ||
(field.value.type.base_type == BASE_TYPE_VECTOR &&
field.value.type.element == BASE_TYPE_STRUCT))) {
optional = lang_.optional_suffix;
conditional_cast = "(" + type_name_dest + optional + ")";
}
std::string dest_mask = DestinationMask(field.value.type, true);
std::string dest_cast = DestinationCast(field.value.type);
std::string src_cast = SourceCast(field.value.type);
std::string method_start = " public " + type_name_dest + optional + " " +
MakeCamel(field.name, lang_.first_camel_upper);
std::string obj = lang_.language == IDLOptions::kCSharp
? "(new " + type_name + "())"
: "obj";
// Most field accessors need to retrieve and test the field offset first,
// this is the prefix code for that:
auto offset_prefix = " { int o = " + lang_.accessor_prefix + "__offset(" +
NumToString(field.value.offset) +
"); return o != 0 ? ";
// Generate the accessors that don't do object reuse.
if (field.value.type.base_type == BASE_TYPE_STRUCT) {
// Calls the accessor that takes an accessor object with a new object.
if (lang_.language != IDLOptions::kCSharp) {
code += method_start + "() { return ";
code += MakeCamel(field.name, lang_.first_camel_upper);
code += "(new ";
code += type_name + "()); }\n";
}
} else if (field.value.type.base_type == BASE_TYPE_VECTOR &&
field.value.type.element == BASE_TYPE_STRUCT) {
// Accessors for vectors of structs also take accessor objects, this
// generates a variant without that argument.
if (lang_.language != IDLOptions::kCSharp) {
code += method_start + "(int j) { return ";
code += MakeCamel(field.name, lang_.first_camel_upper);
code += "(new " + type_name + "(), j); }\n";
}
} else if (field.value.type.base_type == BASE_TYPE_UNION) {
if (lang_.language == IDLOptions::kCSharp) {
// Union types in C# use generic Table-derived type for better type
// safety.
method_start += "<TTable>";
type_name = type_name_dest;
}
}
std::string getter = dest_cast + GenGetter(field.value.type);
code += method_start;
std::string default_cast = "";
// only create default casts for c# scalars or vectors of scalars
if (lang_.language == IDLOptions::kCSharp &&
(IsScalar(field.value.type.base_type) ||
(field.value.type.base_type == BASE_TYPE_VECTOR &&
IsScalar(field.value.type.element)))) {
// For scalars, default value will be returned by GetDefaultValue().
// If the scalar is an enum, GetDefaultValue() returns an actual c# enum
// that doesn't need to be casted. However, default values for enum
// elements of vectors are integer literals ("0") and are still casted
// for clarity.
if (field.value.type.enum_def == nullptr ||
field.value.type.base_type == BASE_TYPE_VECTOR) {
default_cast = "(" + type_name_dest + ")";
}
}
std::string member_suffix = "; ";
if (IsScalar(field.value.type.base_type)) {
code += lang_.getter_prefix;
member_suffix += lang_.getter_suffix;
if (struct_def.fixed) {
code += " { return " + getter;
code += "(" + lang_.accessor_prefix + "bb_pos + ";
code += NumToString(field.value.offset) + ")";
code += dest_mask;
} else {
code += offset_prefix + getter;
code += "(o + " + lang_.accessor_prefix + "bb_pos)" + dest_mask;
code += " : " + default_cast;
code += GenDefaultValue(field.value);
}
} else {
switch (field.value.type.base_type) {
case BASE_TYPE_STRUCT:
if (lang_.language != IDLOptions::kCSharp) {
code += "(" + type_name + " obj" + ")";
} else {
code += lang_.getter_prefix;
member_suffix += lang_.getter_suffix;
}
if (struct_def.fixed) {
code += " { return " + obj + ".__assign(" + lang_.accessor_prefix;
code += "bb_pos + " + NumToString(field.value.offset) + ", ";
code += lang_.accessor_prefix + "bb)";
} else {
code += offset_prefix + conditional_cast;
code += obj + ".__assign(";
code += field.value.type.struct_def->fixed
? "o + " + lang_.accessor_prefix + "bb_pos"
: lang_.accessor_prefix + "__indirect(o + " +
lang_.accessor_prefix + "bb_pos)";
code += ", " + lang_.accessor_prefix + "bb) : null";
}
break;
case BASE_TYPE_STRING:
code += lang_.getter_prefix;
member_suffix += lang_.getter_suffix;
code += offset_prefix + getter + "(o + " + lang_.accessor_prefix;
code += "bb_pos) : null";
break;
case BASE_TYPE_VECTOR: {
auto vectortype = field.value.type.VectorType();
code += "(";
if (vectortype.base_type == BASE_TYPE_STRUCT) {
if (lang_.language != IDLOptions::kCSharp)
code += type_name + " obj, ";
getter = obj + ".__assign";
}
code += "int j)" + offset_prefix + conditional_cast + getter +"(";
auto index = lang_.accessor_prefix + "__vector(o) + j * " +
NumToString(InlineSize(vectortype));
if (vectortype.base_type == BASE_TYPE_STRUCT) {
code += vectortype.struct_def->fixed
? index
: lang_.accessor_prefix + "__indirect(" + index + ")";
code += ", " + lang_.accessor_prefix + "bb";
} else {
code += index;
}
code += ")" + dest_mask + " : ";
code += field.value.type.element == BASE_TYPE_BOOL ? "false" :
(IsScalar(field.value.type.element) ? default_cast + "0" : "null");
break;
}
case BASE_TYPE_UNION:
if (lang_.language == IDLOptions::kCSharp) {
code += "() where TTable : struct, IFlatbufferObject";
code += offset_prefix + "(TTable?)" + getter;
code += "<TTable>(o) : null";
} else {
code += "(" + type_name + " obj)" + offset_prefix + getter;
code += "(obj, o) : null";
}
break;
default:
assert(0);
}
}
code += member_suffix;
code += "}\n";
if (field.value.type.base_type == BASE_TYPE_VECTOR) {
code += " public int " + MakeCamel(field.name, lang_.first_camel_upper);
code += "Length";
code += lang_.getter_prefix;
code += offset_prefix;
code += lang_.accessor_prefix + "__vector_len(o) : 0; ";
code += lang_.getter_suffix;
code += "}\n";
// See if we should generate a by-key accessor.
if (field.value.type.element == BASE_TYPE_STRUCT &&
!field.value.type.struct_def->fixed) {
auto &sd = *field.value.type.struct_def;
auto &fields = sd.fields.vec;
for (auto kit = fields.begin(); kit != fields.end(); ++kit) {
auto &key_field = **kit;
if (key_field.key) {
code += " public " + sd.name + lang_.optional_suffix + " ";
code += MakeCamel(field.name, lang_.first_camel_upper) + "ByKey(";
code += GenTypeNameDest(key_field.value.type) + " key)";
code += offset_prefix;
code += sd.name + ".__lookup_by_key(";
code += lang_.accessor_prefix + "__vector(o), key, ";
code += lang_.accessor_prefix + "bb) : null; ";
code += "}\n";
break;
}
}
}
}
// Generate a ByteBuffer accessor for strings & vectors of scalars.
if ((field.value.type.base_type == BASE_TYPE_VECTOR &&
IsScalar(field.value.type.VectorType().base_type)) ||
field.value.type.base_type == BASE_TYPE_STRING) {
switch (lang_.language) {
case IDLOptions::kJava:
code += " public ByteBuffer ";
code += MakeCamel(field.name, lang_.first_camel_upper);
code += "AsByteBuffer() { return ";
code += lang_.accessor_prefix + "__vector_as_bytebuffer(";
code += NumToString(field.value.offset) + ", ";
code += NumToString(field.value.type.base_type == BASE_TYPE_STRING
? 1
: InlineSize(field.value.type.VectorType()));
code += "); }\n";
break;
case IDLOptions::kCSharp:
code += " public ArraySegment<byte>? Get";
code += MakeCamel(field.name, lang_.first_camel_upper);
code += "Bytes() { return ";
code += lang_.accessor_prefix + "__vector_as_arraysegment(";
code += NumToString(field.value.offset);
code += "); }\n";
break;
default:
break;
}
}
// generate object accessors if is nested_flatbuffer
auto nested = field.attributes.Lookup("nested_flatbuffer");
if (nested) {
auto nested_qualified_name =
parser_.namespaces_.back()->GetFullyQualifiedName(nested->constant);
auto nested_type = parser_.structs_.Lookup(nested_qualified_name);
auto nested_type_name = WrapInNameSpace(*nested_type);
auto nestedMethodName = MakeCamel(field.name, lang_.first_camel_upper)
+ "As" + nested_type_name;
auto getNestedMethodName = nestedMethodName;
if (lang_.language == IDLOptions::kCSharp) {
getNestedMethodName = "Get" + nestedMethodName;
conditional_cast = "(" + nested_type_name + lang_.optional_suffix + ")";
}
if (lang_.language != IDLOptions::kCSharp) {
code += " public " + nested_type_name + lang_.optional_suffix + " ";
code += nestedMethodName + "() { return ";
code += getNestedMethodName + "(new " + nested_type_name + "()); }\n";
} else {
obj = "(new " + nested_type_name + "())";
}
code += " public " + nested_type_name + lang_.optional_suffix + " ";
code += getNestedMethodName + "(";
if (lang_.language != IDLOptions::kCSharp)
code += nested_type_name + " obj";
code += ") { int o = " + lang_.accessor_prefix + "__offset(";
code += NumToString(field.value.offset) +"); ";
code += "return o != 0 ? " + conditional_cast + obj + ".__assign(";
code += lang_.accessor_prefix;
code += "__indirect(" + lang_.accessor_prefix + "__vector(o)), ";
code += lang_.accessor_prefix + "bb) : null; }\n";
}
// Generate mutators for scalar fields or vectors of scalars.
if (parser_.opts.mutable_buffer) {
auto underlying_type = field.value.type.base_type == BASE_TYPE_VECTOR
? field.value.type.VectorType()
: field.value.type;
// Boolean parameters have to be explicitly converted to byte
// representation.
auto setter_parameter = underlying_type.base_type == BASE_TYPE_BOOL
? "(byte)(" + field.name + " ? 1 : 0)"
: field.name;
auto mutator_prefix = MakeCamel("mutate", lang_.first_camel_upper);
// A vector mutator also needs the index of the vector element it should
// mutate.
auto mutator_params = (field.value.type.base_type == BASE_TYPE_VECTOR
? "(int j, "
: "(") + GenTypeNameDest(underlying_type) + " " + field.name + ") { ";
auto setter_index = field.value.type.base_type == BASE_TYPE_VECTOR
? lang_.accessor_prefix + "__vector(o) + j * " +
NumToString(InlineSize(underlying_type))
: (struct_def.fixed
? lang_.accessor_prefix + "bb_pos + " +
NumToString(field.value.offset)
: "o + " + lang_.accessor_prefix + "bb_pos");
if (IsScalar(field.value.type.base_type) ||
(field.value.type.base_type == BASE_TYPE_VECTOR &&
IsScalar(field.value.type.VectorType().base_type))) {
code += " public ";
code += struct_def.fixed ? "void " : lang_.bool_type;
code += mutator_prefix + MakeCamel(field.name, true);
code += mutator_params;
if (struct_def.fixed) {
code += GenSetter(underlying_type) + "(" + setter_index + ", ";
code += src_cast + setter_parameter + "); }\n";
} else {
code += "int o = " + lang_.accessor_prefix + "__offset(";
code += NumToString(field.value.offset) + ");";
code += " if (o != 0) { " + GenSetter(underlying_type);
code += "(" + setter_index + ", " + src_cast + setter_parameter +
"); return true; } else { return false; } }\n";
}
}
}
}
code += "\n";
flatbuffers::FieldDef *key_field = nullptr;
if (struct_def.fixed) {
// create a struct constructor function
code += " public static " + GenOffsetType(struct_def) + " ";
code += FunctionStart('C') + "reate";
code += struct_def.name + "(FlatBufferBuilder builder";
GenStructArgs(struct_def, code_ptr, "");
code += ") {\n";
GenStructBody(struct_def, code_ptr, "");
code += " return ";
code += GenOffsetConstruct(struct_def,
"builder." + std::string(lang_.get_fbb_offset));
code += ";\n }\n";
} else {
// Generate a method that creates a table in one go. This is only possible
// when the table has no struct fields, since those have to be created
// inline, and there's no way to do so in Java.
bool has_no_struct_fields = true;
int num_fields = 0;
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
auto &field = **it;
if (field.deprecated) continue;
if (IsStruct(field.value.type)) {
has_no_struct_fields = false;
} else {
num_fields++;
}
}
if (has_no_struct_fields && num_fields) {
// Generate a table constructor of the form:
// public static int createName(FlatBufferBuilder builder, args...)
code += " public static " + GenOffsetType(struct_def) + " ";
code += FunctionStart('C') + "reate" + struct_def.name;
code += "(FlatBufferBuilder builder";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
auto &field = **it;
if (field.deprecated) continue;
code += ",\n ";
code += GenTypeBasic(DestinationType(field.value.type, false));
code += " ";
code += field.name;
if (!IsScalar(field.value.type.base_type)) code += "Offset";
// Java doesn't have defaults, which means this method must always
// supply all arguments, and thus won't compile when fields are added.
if (lang_.language != IDLOptions::kJava) {
code += " = ";
code += GenDefaultValueBasic(field.value);
}
}
code += ") {\n builder.";
code += FunctionStart('S') + "tartObject(";
code += NumToString(struct_def.fields.vec.size()) + ");\n";
for (size_t size = struct_def.sortbysize ? sizeof(largest_scalar_t) : 1;
size;
size /= 2) {
for (auto it = struct_def.fields.vec.rbegin();
it != struct_def.fields.vec.rend(); ++it) {
auto &field = **it;
if (!field.deprecated &&
(!struct_def.sortbysize ||
size == SizeOf(field.value.type.base_type))) {
code += " " + struct_def.name + ".";
code += FunctionStart('A') + "dd";
code += MakeCamel(field.name) + "(builder, " + field.name;
if (!IsScalar(field.value.type.base_type)) code += "Offset";
code += ");\n";
}
}
}
code += " return " + struct_def.name + ".";
code += FunctionStart('E') + "nd" + struct_def.name;
code += "(builder);\n }\n\n";
}
// Generate a set of static methods that allow table construction,
// of the form:
// public static void addName(FlatBufferBuilder builder, short name)
// { builder.addShort(id, name, default); }
// Unlike the Create function, these always work.
code += " public static void " + FunctionStart('S') + "tart";
code += struct_def.name;
code += "(FlatBufferBuilder builder) { builder.";
code += FunctionStart('S') + "tartObject(";
code += NumToString(struct_def.fields.vec.size()) + "); }\n";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
auto &field = **it;
if (field.deprecated) continue;
if (field.key) key_field = &field;
code += " public static void " + FunctionStart('A') + "dd";
code += MakeCamel(field.name);
code += "(FlatBufferBuilder builder, ";
code += GenTypeBasic(DestinationType(field.value.type, false));
auto argname = MakeCamel(field.name, false);
if (!IsScalar(field.value.type.base_type)) argname += "Offset";
code += " " + argname + ") { builder." + FunctionStart('A') + "dd";
code += GenMethod(field.value.type) + "(";
code += NumToString(it - struct_def.fields.vec.begin()) + ", ";
code += SourceCastBasic(field.value.type);
code += argname;
if (!IsScalar(field.value.type.base_type) &&
field.value.type.base_type != BASE_TYPE_UNION &&
lang_.language == IDLOptions::kCSharp) {
code += ".Value";
}
code += ", ";
if (lang_.language == IDLOptions::kJava)
code += SourceCastBasic( field.value.type );
code += GenDefaultValue(field.value, false);
code += "); }\n";
if (field.value.type.base_type == BASE_TYPE_VECTOR) {
auto vector_type = field.value.type.VectorType();
auto alignment = InlineAlignment(vector_type);
auto elem_size = InlineSize(vector_type);
if (!IsStruct(vector_type)) {
// Generate a method to create a vector from a Java array.
code += " public static " + GenVectorOffsetType() + " ";
code += FunctionStart('C') + "reate";
code += MakeCamel(field.name);
code += "Vector(FlatBufferBuilder builder, ";
code += GenTypeBasic(vector_type) + "[] data) ";
code += "{ builder." + FunctionStart('S') + "tartVector(";
code += NumToString(elem_size);
code += ", data." + FunctionStart('L') + "ength, ";
code += NumToString(alignment);
code += "); for (int i = data.";
code += FunctionStart('L') + "ength - 1; i >= 0; i--) builder.";
code += FunctionStart('A') + "dd";
code += GenMethod(vector_type);
code += "(";
code += SourceCastBasic(vector_type, false);
code += "data[i]";
if (lang_.language == IDLOptions::kCSharp &&
(vector_type.base_type == BASE_TYPE_STRUCT ||
vector_type.base_type == BASE_TYPE_STRING))
code += ".Value";
code += "); return ";
code += "builder." + FunctionStart('E') + "ndVector(); }\n";
}
// Generate a method to start a vector, data to be added manually after.
code += " public static void " + FunctionStart('S') + "tart";
code += MakeCamel(field.name);
code += "Vector(FlatBufferBuilder builder, int numElems) ";
code += "{ builder." + FunctionStart('S') + "tartVector(";
code += NumToString(elem_size);
code += ", numElems, " + NumToString(alignment);
code += "); }\n";
}
}
code += " public static " + GenOffsetType(struct_def) + " ";
code += FunctionStart('E') + "nd" + struct_def.name;
code += "(FlatBufferBuilder builder) {\n int o = builder.";
code += FunctionStart('E') + "ndObject();\n";
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end();
++it) {
auto &field = **it;
if (!field.deprecated && field.required) {
code += " builder." + FunctionStart('R') + "equired(o, ";
code += NumToString(field.value.offset);
code += "); // " + field.name + "\n";
}
}
code += " return " + GenOffsetConstruct(struct_def, "o") + ";\n }\n";
if (parser_.root_struct_def_ == &struct_def) {
code += " public static void ";
code += FunctionStart('F') + "inish" + struct_def.name;
code += "Buffer(FlatBufferBuilder builder, " + GenOffsetType(struct_def);
code += " offset) {";
code += " builder." + FunctionStart('F') + "inish(offset";
if (lang_.language == IDLOptions::kCSharp) {
code += ".Value";
}
if (parser_.file_identifier_.length())
code += ", \"" + parser_.file_identifier_ + "\"";
code += "); }\n";
}
}
// Only generate key compare function for table,
// because `key_field` is not set for struct
if (struct_def.has_key && !struct_def.fixed) {
if (lang_.language == IDLOptions::kJava) {
code += "\n @Override\n protected int keysCompare(";
code += "Integer o1, Integer o2, ByteBuffer _bb) {";
code += GenKeyGetter(key_field);
code += " }\n";
}
else {
code += "\n public static VectorOffset ";
code += "CreateSortedVectorOf" + struct_def.name;
code += "(FlatBufferBuilder builder, ";
code += "Offset<" + struct_def.name + ">";
code += "[] offsets) {\n";
code += " Array.Sort(offsets, (Offset<" + struct_def.name +
"> o1, Offset<" + struct_def.name + "> o2) => " + GenKeyGetter(key_field);
code += ");\n";
code += " return builder.CreateVectorOfTables(offsets);\n }\n";
}
code += "\n public static " + struct_def.name + lang_.optional_suffix;
code += " __lookup_by_key(int vectorLocation, ";
code += GenTypeNameDest(key_field->value.type);
code += " key, ByteBuffer bb) {\n";
if (key_field->value.type.base_type == BASE_TYPE_STRING) {
code += " byte[] byteKey = ";
if (lang_.language == IDLOptions::kJava)
code += "key.getBytes(Table.UTF8_CHARSET.get());\n";
else
code += "System.Text.Encoding.UTF8.GetBytes(key);\n";
}
code += " int span = ";
code += "bb." + FunctionStart('G') + "etInt(vectorLocation - 4);\n";
code += " int start = 0;\n";
code += " while (span != 0) {\n";
code += " int middle = span / 2;\n";
code += GenLookupKeyGetter(key_field);
code += " if (comp > 0) {\n";
code += " span = middle;\n";
code += " } else if (comp < 0) {\n";
code += " middle++;\n";
code += " start += middle;\n";
code += " span -= middle;\n";
code += " } else {\n";
code += " return new " + struct_def.name;
code += "().__assign(tableOffset, bb);\n";
code += " }\n }\n";
code += " return null;\n";
code += " }\n";
}
code += "}";
// Java does not need the closing semi-colon on class definitions.
code += (lang_.language != IDLOptions::kJava) ? ";" : "";
code += "\n\n";
}
const LanguageParameters& lang_;
// This tracks the current namespace used to determine if a type need to be prefixed by its namespace
const Namespace *cur_name_space_;
};
} // namespace general
bool GenerateGeneral(const Parser &parser, const std::string &path,
const std::string &file_name) {
general::GeneralGenerator generator(parser, path, file_name);
return generator.generate();
}
std::string GeneralMakeRule(const Parser &parser, const std::string &path,
const std::string &file_name) {
assert(parser.opts.lang <= IDLOptions::kMAX);
const auto &lang = GetLangParams(parser.opts.lang);
std::string make_rule;
for (auto it = parser.enums_.vec.begin(); it != parser.enums_.vec.end();
++it) {
auto &enum_def = **it;
if (make_rule != "") make_rule += " ";
std::string directory =
BaseGenerator::NamespaceDir(parser, path, *enum_def.defined_namespace);
make_rule += directory + enum_def.name + lang.file_extension;
}
for (auto it = parser.structs_.vec.begin(); it != parser.structs_.vec.end();
++it) {
auto &struct_def = **it;
if (make_rule != "") make_rule += " ";
std::string directory =
BaseGenerator::NamespaceDir(parser, path,
*struct_def.defined_namespace);
make_rule += directory + struct_def.name + lang.file_extension;
}
make_rule += ": ";
auto included_files = parser.GetIncludedFilesRecursive(file_name);
for (auto it = included_files.begin(); it != included_files.end(); ++it) {
make_rule += " " + *it;
}
return make_rule;
}
std::string BinaryFileName(const Parser &parser,
const std::string &path,
const std::string &file_name) {
auto ext = parser.file_extension_.length() ? parser.file_extension_ : "bin";
return path + file_name + "." + ext;
}
bool GenerateBinary(const Parser &parser,
const std::string &path,
const std::string &file_name) {
return !parser.builder_.GetSize() ||
flatbuffers::SaveFile(
BinaryFileName(parser, path, file_name).c_str(),
reinterpret_cast<char *>(parser.builder_.GetBufferPointer()),
parser.builder_.GetSize(),
true);
}
std::string BinaryMakeRule(const Parser &parser,
const std::string &path,
const std::string &file_name) {
if (!parser.builder_.GetSize()) return "";
std::string filebase = flatbuffers::StripPath(
flatbuffers::StripExtension(file_name));
std::string make_rule = BinaryFileName(parser, path, filebase) + ": " +
file_name;
auto included_files = parser.GetIncludedFilesRecursive(
parser.root_struct_def_->file);
for (auto it = included_files.begin();
it != included_files.end(); ++it) {
make_rule += " " + *it;
}
return make_rule;
}
} // namespace flatbuffers
| 1 | 12,343 | maybe rather than `bool` make this field a `StructDef *` ? | google-flatbuffers | java |
@@ -209,6 +209,13 @@ func (fbm *folderBlockManager) enqueueBlocksToDelete(toDelete blocksToDelete) {
fbm.blocksToDeleteChan <- toDelete
}
+func (fbm *folderBlockManager) enqueueBlocksToDeleteAfterShortDelay(
+ toDelete blocksToDelete) {
+ fbm.blocksToDeleteWaitGroup.Add(1)
+ time.AfterFunc(10*time.Millisecond,
+ func() { fbm.blocksToDeleteChan <- toDelete })
+}
+
// enqueueBlocksToDeleteNoWait enqueues blocks to be deleted just like
// enqueueBlocksToDelete, except that when fbm.blocksToDeleteChan is full, it
// doesn't block, but instead spawns a goroutine to handle the sending. | 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"errors"
"fmt"
"sync"
"time"
"github.com/keybase/client/go/logger"
"golang.org/x/net/context"
)
type fbmHelper interface {
getMDForFBM(ctx context.Context) (*RootMetadata, error)
finalizeGCOp(ctx context.Context, gco *gcOp) error
}
const (
// How many pointers to downgrade in a single Archive/Delete call.
numPointersToDowngradePerChunk = 20
// Once the number of pointers being deleted in a single gc op
// passes this threshold, we'll stop garbage collection at the
// current revision.
numPointersPerGCThreshold = 100
// The most revisions to consider for each QR run.
numMaxRevisionsPerQR = 100
)
type blocksToDelete struct {
md *RootMetadata
blocks []BlockPointer
}
// folderBlockManager is a helper class for managing the blocks in a
// particular TLF. It archives historical blocks and reclaims quota
// usage, all in the background.
type folderBlockManager struct {
config Config
log logger.Logger
shutdownChan chan struct{}
id TlfID
// A queue of MD updates for this folder that need to have their
// unref's blocks archived
archiveChan chan *RootMetadata
archivePauseChan chan (<-chan struct{})
// archiveGroup tracks the outstanding archives.
archiveGroup RepeatedWaitGroup
archiveCancelLock sync.Mutex
archiveCancel context.CancelFunc
// blocksToDeleteChan is a list of blocks, for a given
// metadata revision, that may have been Put as part of a failed
// MD write. These blocks should be deleted as soon as we know
// for sure that the MD write isn't visible to others.
// TODO: Persist these to disk?
blocksToDeleteChan chan blocksToDelete
blocksToDeletePauseChan chan (<-chan struct{})
blocksToDeleteWaitGroup RepeatedWaitGroup
blocksToDeleteCancelLock sync.Mutex
blocksToDeleteCancel context.CancelFunc
// forceReclamation forces the manager to start a reclamation
// process.
forceReclamationChan chan struct{}
// reclamationGroup tracks the outstanding quota reclamations.
reclamationGroup RepeatedWaitGroup
reclamationCancelLock sync.Mutex
reclamationCancel context.CancelFunc
helper fbmHelper
// Keep track of the last reclamation time, for testing.
lastReclamationTimeLock sync.Mutex
lastReclamationTime time.Time
// Remembers what happened last time during quota reclamation;
// should only be accessed by the QR goroutine.
lastQRHeadRev MetadataRevision
lastQROldEnoughRev MetadataRevision
wasLastQRComplete bool
}
func newFolderBlockManager(config Config, fb FolderBranch,
helper fbmHelper) *folderBlockManager {
tlfStringFull := fb.Tlf.String()
log := config.MakeLogger(fmt.Sprintf("FBM %s", tlfStringFull[:8]))
fbm := &folderBlockManager{
config: config,
log: log,
shutdownChan: make(chan struct{}),
id: fb.Tlf,
archiveChan: make(chan *RootMetadata, 25),
archivePauseChan: make(chan (<-chan struct{})),
blocksToDeleteChan: make(chan blocksToDelete, 25),
blocksToDeletePauseChan: make(chan (<-chan struct{})),
forceReclamationChan: make(chan struct{}, 1),
helper: helper,
}
// Pass in the BlockOps here so that the archive goroutine
// doesn't do possibly-racy-in-tests access to
// fbm.config.BlockOps().
go fbm.archiveBlocksInBackground()
go fbm.deleteBlocksInBackground()
if fb.Branch == MasterBranch {
go fbm.reclaimQuotaInBackground()
}
return fbm
}
func (fbm *folderBlockManager) setBlocksToDeleteCancel(cancel context.CancelFunc) {
fbm.blocksToDeleteCancelLock.Lock()
defer fbm.blocksToDeleteCancelLock.Unlock()
fbm.blocksToDeleteCancel = cancel
}
func (fbm *folderBlockManager) cancelBlocksToDelete() {
blocksToDeleteCancel := func() context.CancelFunc {
fbm.blocksToDeleteCancelLock.Lock()
defer fbm.blocksToDeleteCancelLock.Unlock()
blocksToDeleteCancel := fbm.blocksToDeleteCancel
fbm.blocksToDeleteCancel = nil
return blocksToDeleteCancel
}()
if blocksToDeleteCancel != nil {
blocksToDeleteCancel()
}
}
func (fbm *folderBlockManager) setArchiveCancel(cancel context.CancelFunc) {
fbm.archiveCancelLock.Lock()
defer fbm.archiveCancelLock.Unlock()
fbm.archiveCancel = cancel
}
func (fbm *folderBlockManager) cancelArchive() {
archiveCancel := func() context.CancelFunc {
fbm.archiveCancelLock.Lock()
defer fbm.archiveCancelLock.Unlock()
archiveCancel := fbm.archiveCancel
fbm.archiveCancel = nil
return archiveCancel
}()
if archiveCancel != nil {
archiveCancel()
}
}
func (fbm *folderBlockManager) setReclamationCancel(cancel context.CancelFunc) {
fbm.reclamationCancelLock.Lock()
defer fbm.reclamationCancelLock.Unlock()
fbm.reclamationCancel = cancel
}
func (fbm *folderBlockManager) cancelReclamation() {
reclamationCancel := func() context.CancelFunc {
fbm.reclamationCancelLock.Lock()
defer fbm.reclamationCancelLock.Unlock()
reclamationCancel := fbm.reclamationCancel
fbm.reclamationCancel = nil
return reclamationCancel
}()
if reclamationCancel != nil {
reclamationCancel()
}
}
func (fbm *folderBlockManager) shutdown() {
close(fbm.shutdownChan)
fbm.cancelArchive()
fbm.cancelBlocksToDelete()
fbm.cancelReclamation()
}
// cleanUpBlockState cleans up any blocks that may have been orphaned
// by a failure during or after blocks have been sent to the
// server. This is usually used in a defer right before a call to
// fbo.doBlockPuts like so:
//
// defer func() {
// if err != nil {
// ...cleanUpBlockState(md, bps)
// }
// }()
//
// ... = ...doBlockPuts(ctx, md, *bps)
func (fbm *folderBlockManager) cleanUpBlockState(
md *RootMetadata, bps *blockPutState) {
fbm.log.CDebugf(nil, "Clean up md %d %s", md.Revision, md.MergedStatus())
toDelete := blocksToDelete{md: md}
for _, bs := range bps.blockStates {
toDelete.blocks = append(toDelete.blocks, bs.blockPtr)
}
fbm.enqueueBlocksToDelete(toDelete)
}
func (fbm *folderBlockManager) enqueueBlocksToDelete(toDelete blocksToDelete) {
fbm.blocksToDeleteWaitGroup.Add(1)
fbm.blocksToDeleteChan <- toDelete
}
// enqueueBlocksToDeleteNoWait enqueues blocks to be deleted just like
// enqueueBlocksToDelete, except that when fbm.blocksToDeleteChan is full, it
// doesn't block, but instead spawns a goroutine to handle the sending.
//
// This is necessary to prevent a situation like following:
// 1. A delete fails when fbm.blocksToDeleteChan is full
// 2. The goroutine tries to put the failed toDelete back to
// fbm.blocksToDeleteChan
// 3. Step 2 becomes synchronous and is blocked because
// fbm.blocksToDeleteChan is already full
// 4. fbm.blocksToDeleteChan never gets drained because the goroutine that
// drains it is waiting for sending on the same channel.
// 5. Deadlock!
func (fbm *folderBlockManager) enqueueBlocksToDeleteNoWait(toDelete blocksToDelete) {
fbm.blocksToDeleteWaitGroup.Add(1)
select {
case fbm.blocksToDeleteChan <- toDelete:
return
default:
go func() { fbm.blocksToDeleteChan <- toDelete }()
}
}
func (fbm *folderBlockManager) archiveUnrefBlocks(md *RootMetadata) {
// Don't archive for unmerged revisions, because conflict
// resolution might undo some of the unreferences.
if md.MergedStatus() != Merged {
return
}
fbm.archiveGroup.Add(1)
fbm.archiveChan <- md
}
// archiveUnrefBlocksNoWait enqueues the MD for archiving without
// blocking. By the time it returns, the archive group has been
// incremented so future waits will block on this archive. This
// method is for internal use within folderBlockManager only.
func (fbm *folderBlockManager) archiveUnrefBlocksNoWait(md *RootMetadata) {
// Don't archive for unmerged revisions, because conflict
// resolution might undo some of the unreferences.
if md.MergedStatus() != Merged {
return
}
fbm.archiveGroup.Add(1)
// Don't block if the channel is full; instead do the send in a
// background goroutine. We've already done the Add above, so the
// wait calls should all work just fine.
select {
case fbm.archiveChan <- md:
return
default:
go func() { fbm.archiveChan <- md }()
}
}
func (fbm *folderBlockManager) waitForArchives(ctx context.Context) error {
return fbm.archiveGroup.Wait(ctx)
}
func (fbm *folderBlockManager) waitForDeletingBlocks(ctx context.Context) error {
return fbm.blocksToDeleteWaitGroup.Wait(ctx)
}
func (fbm *folderBlockManager) waitForQuotaReclamations(
ctx context.Context) error {
return fbm.reclamationGroup.Wait(ctx)
}
func (fbm *folderBlockManager) forceQuotaReclamation() {
fbm.reclamationGroup.Add(1)
select {
case fbm.forceReclamationChan <- struct{}{}:
default:
fbm.reclamationGroup.Done()
}
}
// doChunkedDowngrades sends batched archive or delete messages to the
// block server for the given block pointers. For deletes, it returns
// a list of block IDs that no longer have any references.
func (fbm *folderBlockManager) doChunkedDowngrades(ctx context.Context,
md *RootMetadata, ptrs []BlockPointer, archive bool) (
[]BlockID, error) {
fbm.log.CDebugf(ctx, "Downgrading %d pointers (archive=%t)",
len(ptrs), archive)
bops := fbm.config.BlockOps()
// Round up to find the number of chunks.
numChunks := (len(ptrs) + numPointersToDowngradePerChunk - 1) /
numPointersToDowngradePerChunk
numWorkers := numChunks
if numWorkers > maxParallelBlockPuts {
numWorkers = maxParallelBlockPuts
}
chunks := make(chan []BlockPointer, numChunks)
var wg sync.WaitGroup
defer wg.Wait()
ctx, cancel := context.WithCancel(ctx)
defer cancel()
type workerResult struct {
zeroRefCounts []BlockID
err error
}
chunkResults := make(chan workerResult, numChunks)
worker := func() {
defer wg.Done()
for chunk := range chunks {
var res workerResult
fbm.log.CDebugf(ctx, "Downgrading chunk of %d pointers", len(chunk))
if archive {
res.err = bops.Archive(ctx, md, chunk)
} else {
var liveCounts map[BlockID]int
liveCounts, res.err = bops.Delete(ctx, md, chunk)
if res.err == nil {
for id, count := range liveCounts {
if count == 0 {
res.zeroRefCounts = append(res.zeroRefCounts, id)
}
}
}
}
chunkResults <- res
select {
// return early if the context has been canceled
case <-ctx.Done():
return
default:
}
}
}
for i := 0; i < numWorkers; i++ {
wg.Add(1)
go worker()
}
for start := 0; start < len(ptrs); start += numPointersToDowngradePerChunk {
end := start + numPointersToDowngradePerChunk
if end > len(ptrs) {
end = len(ptrs)
}
chunks <- ptrs[start:end]
}
close(chunks)
var zeroRefCounts []BlockID
for i := 0; i < numChunks; i++ {
result := <-chunkResults
if result.err != nil {
// deferred cancel will stop the other workers.
return nil, result.err
}
zeroRefCounts = append(zeroRefCounts, result.zeroRefCounts...)
}
return zeroRefCounts, nil
}
// deleteBlockRefs sends batched delete messages to the block server
// for the given block pointers. It returns a list of block IDs that
// no longer have any references.
func (fbm *folderBlockManager) deleteBlockRefs(ctx context.Context,
md *RootMetadata, ptrs []BlockPointer) ([]BlockID, error) {
return fbm.doChunkedDowngrades(ctx, md, ptrs, false)
}
func (fbm *folderBlockManager) processBlocksToDelete(ctx context.Context, toDelete blocksToDelete) error {
// also attempt to delete any error references
defer fbm.blocksToDeleteWaitGroup.Done()
fbm.log.CDebugf(ctx, "Checking deleted blocks for revision %d",
toDelete.md.Revision)
// Make sure that the MD didn't actually become
// part of the folder history. (This could happen
// if the Sync was canceled while the MD put was
// outstanding.)
rmds, err := getMDRange(ctx, fbm.config, fbm.id, toDelete.md.BID,
toDelete.md.Revision, toDelete.md.Revision, toDelete.md.MergedStatus())
if err != nil || len(rmds) == 0 {
fbm.enqueueBlocksToDeleteNoWait(toDelete)
return nil
}
dirsEqual, err := CodecEqual(fbm.config.Codec(),
rmds[0].data.Dir, toDelete.md.data.Dir)
if err != nil {
fbm.log.CErrorf(ctx, "Error when comparing dirs: %v", err)
} else if dirsEqual {
// This md is part of the history of the folder,
// so we shouldn't delete the blocks.
fbm.log.CDebugf(ctx, "Not deleting blocks from revision %d",
toDelete.md.Revision)
// But, since this MD put seems to have succeeded, we
// should archive it.
fbm.log.CDebugf(ctx, "Archiving successful MD revision %d",
rmds[0].Revision)
// Don't block on archiving the MD, because that could
// lead to deadlock.
fbm.archiveUnrefBlocksNoWait(rmds[0])
return nil
}
// Otherwise something else has been written over
// this MD, so get rid of the blocks.
fbm.log.CDebugf(ctx, "Cleaning up blocks for failed revision %d",
toDelete.md.Revision)
_, err = fbm.deleteBlockRefs(ctx, toDelete.md, toDelete.blocks)
// Ignore permanent errors
_, isPermErr := err.(BServerError)
_, isNonceNonExistentErr := err.(BServerErrorNonceNonExistent)
if err != nil {
fbm.log.CWarningf(ctx, "Couldn't delete some ref in batch %v: %v", toDelete.blocks, err)
if !isPermErr && !isNonceNonExistentErr {
fbm.enqueueBlocksToDeleteNoWait(toDelete)
return nil
}
}
return nil
}
// CtxFBMTagKey is the type used for unique context tags within
// folderBlockManager
type CtxFBMTagKey int
const (
// CtxFBMIDKey is the type of the tag for unique operation IDs
// within folderBlockManager.
CtxFBMIDKey CtxFBMTagKey = iota
)
// CtxFBMOpID is the display name for the unique operation
// folderBlockManager ID tag.
const CtxFBMOpID = "FBMID"
func (fbm *folderBlockManager) ctxWithFBMID(
ctx context.Context) context.Context {
return ctxWithRandomID(ctx, CtxFBMIDKey, CtxFBMOpID, fbm.log)
}
// Run the passed function with a context that's canceled on shutdown.
func (fbm *folderBlockManager) runUnlessShutdown(
fn func(ctx context.Context) error) error {
ctx := fbm.ctxWithFBMID(context.Background())
ctx, cancelFunc := context.WithCancel(ctx)
defer cancelFunc()
errChan := make(chan error, 1)
go func() {
errChan <- fn(ctx)
}()
select {
case err := <-errChan:
return err
case <-fbm.shutdownChan:
return errors.New("shutdown received")
}
}
func (fbm *folderBlockManager) archiveBlockRefs(ctx context.Context,
md *RootMetadata, ptrs []BlockPointer) error {
_, err := fbm.doChunkedDowngrades(ctx, md, ptrs, true)
return err
}
func (fbm *folderBlockManager) archiveBlocksInBackground() {
for {
select {
case md := <-fbm.archiveChan:
var ptrs []BlockPointer
for _, op := range md.data.Changes.Ops {
ptrs = append(ptrs, op.Unrefs()...)
for _, update := range op.AllUpdates() {
// It's legal for there to be an "update" between
// two identical pointers (usually because of
// conflict resolution), so ignore that for
// archival purposes.
if update.Ref != update.Unref {
ptrs = append(ptrs, update.Unref)
}
}
}
fbm.runUnlessShutdown(func(ctx context.Context) (err error) {
defer fbm.archiveGroup.Done()
// This func doesn't take any locks, though it can
// block md writes due to the buffered channel. So
// use the long timeout to make sure things get
// unblocked eventually, but no need for a short timeout.
ctx, cancel := context.WithTimeout(ctx, backgroundTaskTimeout)
fbm.setArchiveCancel(cancel)
defer fbm.cancelArchive()
fbm.log.CDebugf(ctx, "Archiving %d block pointers as a result "+
"of revision %d", len(ptrs), md.Revision)
err = fbm.archiveBlockRefs(ctx, md, ptrs)
if err != nil {
fbm.log.CWarningf(ctx, "Couldn't archive blocks: %v", err)
return err
}
return nil
})
case unpause := <-fbm.archivePauseChan:
fbm.runUnlessShutdown(func(ctx context.Context) (err error) {
fbm.log.CInfof(ctx, "Archives paused")
// wait to be unpaused
select {
case <-unpause:
fbm.log.CInfof(ctx, "Archives unpaused")
case <-ctx.Done():
return ctx.Err()
}
return nil
})
case <-fbm.shutdownChan:
return
}
}
}
func (fbm *folderBlockManager) deleteBlocksInBackground() {
for {
select {
case toDelete := <-fbm.blocksToDeleteChan:
fbm.runUnlessShutdown(func(ctx context.Context) (err error) {
ctx, cancel := context.WithTimeout(ctx, backgroundTaskTimeout)
fbm.setBlocksToDeleteCancel(cancel)
defer fbm.cancelBlocksToDelete()
if err := fbm.processBlocksToDelete(ctx, toDelete); err != nil {
fbm.log.CDebugf(ctx, "Error deleting blocks: %v", err)
return err
}
return nil
})
case unpause := <-fbm.blocksToDeletePauseChan:
fbm.runUnlessShutdown(func(ctx context.Context) (err error) {
fbm.log.CInfof(ctx, "deleteBlocks paused")
select {
case <-unpause:
fbm.log.CInfof(ctx, "deleteBlocks unpaused")
case <-ctx.Done():
return ctx.Err()
}
return nil
})
case <-fbm.shutdownChan:
return
}
}
}
func (fbm *folderBlockManager) isOldEnough(rmd *RootMetadata) bool {
// Trust the client-provided timestamp -- it's
// possible that a writer with a bad clock could cause
// another writer to clear out quotas early. That's
// ok, there's nothing we can really do about that.
//
// TODO: rmd.data.Dir.Mtime does not necessarily reflect when the
// MD was made, since it only gets updated if the root directory
// mtime needs to be updated. As a result, some updates may be
// cleaned up earlier than desired. We need to find a more stable
// way to record MD update time (KBFS-821).
mtime := time.Unix(0, rmd.data.Dir.Mtime)
unrefAge := fbm.config.QuotaReclamationMinUnrefAge()
return mtime.Add(unrefAge).Before(fbm.config.Clock().Now())
}
// getMostRecentOldEnoughAndGCRevisions returns the most recent MD
// that's older than the unref age, as well as the latest revision
// that was scrubbed by the previous gc op.
func (fbm *folderBlockManager) getMostRecentOldEnoughAndGCRevisions(
ctx context.Context, head *RootMetadata) (
mostRecentOldEnoughRev, lastGCRev MetadataRevision, err error) {
// Walk backwards until we find one that is old enough. Also,
// look out for the previous gcOp.
currHead := head.Revision
mostRecentOldEnoughRev = MetadataRevisionUninitialized
lastGCRev = MetadataRevisionUninitialized
for {
startRev := currHead - maxMDsAtATime + 1 // (MetadataRevision is signed)
if startRev < MetadataRevisionInitial {
startRev = MetadataRevisionInitial
}
rmds, err := getMDRange(ctx, fbm.config, fbm.id, NullBranchID, startRev,
currHead, Merged)
if err != nil {
return MetadataRevisionUninitialized,
MetadataRevisionUninitialized, err
}
numNew := len(rmds)
for i := len(rmds) - 1; i >= 0; i-- {
rmd := rmds[i]
if mostRecentOldEnoughRev == MetadataRevisionUninitialized &&
fbm.isOldEnough(rmd) {
fbm.log.CDebugf(ctx, "Revision %d is older than the unref "+
"age %s", rmd.Revision,
fbm.config.QuotaReclamationMinUnrefAge())
mostRecentOldEnoughRev = rmd.Revision
}
if lastGCRev == MetadataRevisionUninitialized {
for j := len(rmd.data.Changes.Ops) - 1; j >= 0; j-- {
gcOp, ok := rmd.data.Changes.Ops[j].(*gcOp)
if !ok {
continue
}
fbm.log.CDebugf(ctx, "Found last gc op: %s", gcOp)
lastGCRev = gcOp.LatestRev
break
}
}
// Once both return values are set, we are done
if mostRecentOldEnoughRev != MetadataRevisionUninitialized &&
lastGCRev != MetadataRevisionUninitialized {
return mostRecentOldEnoughRev, lastGCRev, nil
}
}
if numNew > 0 {
currHead = rmds[0].Revision - 1
}
if numNew < maxMDsAtATime || currHead < MetadataRevisionInitial {
break
}
}
return mostRecentOldEnoughRev, lastGCRev, nil
}
// getUnrefBlocks returns a slice containing all the block pointers
// that were unreferenced after the earliestRev, up to and including
// those in latestRev. If the number of pointers is too large, it
// will shorten the range of the revisions being reclaimed, and return
// the latest revision represented in the returned slice of pointers.
func (fbm *folderBlockManager) getUnreferencedBlocks(
ctx context.Context, latestRev, earliestRev MetadataRevision) (
ptrs []BlockPointer, lastRevConsidered MetadataRevision,
complete bool, err error) {
fbm.log.CDebugf(ctx, "Getting unreferenced blocks between revisions "+
"%d and %d", earliestRev, latestRev)
defer func() {
if err == nil {
fbm.log.CDebugf(ctx, "Found %d pointers to clean between "+
"revisions %d and %d", len(ptrs), earliestRev, latestRev)
}
}()
if latestRev <= earliestRev {
// Nothing to do.
fbm.log.CDebugf(ctx, "Latest rev %d is included in the previous "+
"gc op (%d)", latestRev, earliestRev)
return nil, MetadataRevisionUninitialized, true, nil
}
// Walk backward, starting from latestRev, until just after
// earliestRev, gathering block pointers.
currHead := latestRev
revStartPositions := make(map[MetadataRevision]int)
outer:
for {
startRev := currHead - maxMDsAtATime + 1 // (MetadataRevision is signed)
if startRev < MetadataRevisionInitial {
startRev = MetadataRevisionInitial
}
rmds, err := getMDRange(ctx, fbm.config, fbm.id, NullBranchID, startRev,
currHead, Merged)
if err != nil {
return nil, MetadataRevisionUninitialized, false, err
}
numNew := len(rmds)
for i := len(rmds) - 1; i >= 0; i-- {
rmd := rmds[i]
if rmd.Revision <= earliestRev {
break outer
}
// Save the latest revision starting at this position:
revStartPositions[rmd.Revision] = len(ptrs)
for _, op := range rmd.data.Changes.Ops {
if _, ok := op.(*gcOp); ok {
continue
}
ptrs = append(ptrs, op.Unrefs()...)
for _, update := range op.AllUpdates() {
// It's legal for there to be an "update" between
// two identical pointers (usually because of
// conflict resolution), so ignore that for quota
// reclamation purposes.
if update.Ref != update.Unref {
ptrs = append(ptrs, update.Unref)
}
}
}
// TODO: when can we clean up the MD's unembedded block
// changes pointer? It's not safe until we know for sure
// that all existing clients have received the latest
// update (and also that there are no outstanding staged
// branches). Let's do that as part of the bigger issue
// KBFS-793 -- for now we have to leak those blocks.
}
if numNew > 0 {
currHead = rmds[0].Revision - 1
}
if numNew < maxMDsAtATime || currHead < MetadataRevisionInitial {
break
}
}
complete = true
if len(ptrs) > numPointersPerGCThreshold {
// Find the earliest revision to clean up that lets us send at
// least numPointersPerGCThreshold pointers. The earliest
// pointers are at the end of the list, so subtract the
// threshold from the back.
threshStart := len(ptrs) - numPointersPerGCThreshold
origLatestRev := latestRev
origPtrsLen := len(ptrs)
// TODO: optimize by keeping rev->pos mappings in sorted order.
for rev, i := range revStartPositions {
if i < threshStart && rev < latestRev {
latestRev = rev
}
}
if latestRev < origLatestRev {
ptrs = ptrs[revStartPositions[latestRev]:]
fbm.log.CDebugf(ctx, "Shortening GC range from [%d:%d] to [%d:%d],"+
" reducing pointers from %d to %d", earliestRev, origLatestRev,
earliestRev, latestRev, origPtrsLen, len(ptrs))
complete = false
}
}
return ptrs, latestRev, complete, nil
}
func (fbm *folderBlockManager) finalizeReclamation(ctx context.Context,
ptrs []BlockPointer, zeroRefCounts []BlockID,
latestRev MetadataRevision) error {
gco := newGCOp(latestRev)
for _, id := range zeroRefCounts {
gco.AddUnrefBlock(BlockPointer{ID: id})
}
fbm.log.CDebugf(ctx, "Finalizing reclamation %s with %d ptrs", gco,
len(ptrs))
// finalizeGCOp could wait indefinitely on locks, so run it in a
// goroutine.
return runUnlessCanceled(ctx,
func() error { return fbm.helper.finalizeGCOp(ctx, gco) })
}
func (fbm *folderBlockManager) isQRNecessary(head *RootMetadata) bool {
if head == nil {
return false
}
// Do QR if:
// * The head has changed since last time, OR
// * The last QR did not completely clean every available thing
if head.Revision != fbm.lastQRHeadRev || !fbm.wasLastQRComplete {
return true
}
// Do QR if the head was not reclaimable at the last QR time, but
// is old enough now.
return fbm.lastQRHeadRev > fbm.lastQROldEnoughRev && fbm.isOldEnough(head)
}
func (fbm *folderBlockManager) doReclamation(timer *time.Timer) (err error) {
ctx, cancel := context.WithCancel(fbm.ctxWithFBMID(context.Background()))
fbm.setReclamationCancel(cancel)
defer fbm.cancelReclamation()
defer timer.Reset(fbm.config.QuotaReclamationPeriod())
defer fbm.reclamationGroup.Done()
// Don't set a context deadline. For users that have written a
// lot of updates since their last QR, this might involve fetching
// a lot of MD updates in small chunks. It doesn't hold locks for
// any considerable amount of time, so it should be safe to let it
// run indefinitely.
// First get the current head, and see if we're staged or not.
head, err := fbm.helper.getMDForFBM(ctx)
if err != nil {
return err
} else if err := head.isReadableOrError(ctx, fbm.config); err != nil {
return err
} else if head.MergedStatus() != Merged {
return errors.New("Skipping quota reclamation while unstaged")
}
// Make sure we're a writer
username, uid, err := fbm.config.KBPKI().GetCurrentUserInfo(ctx)
if err != nil {
return err
}
if !head.GetTlfHandle().IsWriter(uid) {
return NewWriteAccessError(head.GetTlfHandle(), username)
}
if !fbm.isQRNecessary(head) {
// Nothing has changed since last time, so no need to do any QR.
return nil
}
var mostRecentOldEnoughRev MetadataRevision
var complete bool
defer func() {
// Remember the QR we just performed.
if err == nil && head != nil {
fbm.lastQRHeadRev = head.Revision
fbm.lastQROldEnoughRev = mostRecentOldEnoughRev
fbm.wasLastQRComplete = complete
}
}()
// Then grab the lock for this folder, so we're the only one doing
// garbage collection for a while.
locked, err := fbm.config.MDServer().TruncateLock(ctx, fbm.id)
if err != nil {
return err
}
if !locked {
fbm.log.CDebugf(ctx, "Couldn't get the truncate lock")
return fmt.Errorf("Couldn't get the truncate lock for folder %d",
fbm.id)
}
defer func() {
unlocked, unlockErr := fbm.config.MDServer().TruncateUnlock(ctx, fbm.id)
if unlockErr != nil {
fbm.log.CDebugf(ctx, "Couldn't release the truncate lock: %v",
unlockErr)
}
if !unlocked {
fbm.log.CDebugf(ctx, "Couldn't unlock the truncate lock")
}
}()
mostRecentOldEnoughRev, lastGCRev, err :=
fbm.getMostRecentOldEnoughAndGCRevisions(ctx, head)
if err != nil {
return err
}
if mostRecentOldEnoughRev == MetadataRevisionUninitialized ||
mostRecentOldEnoughRev <= lastGCRev {
// TODO: need a log level more fine-grained than Debug to
// print out that we're not doing reclamation.
complete = true
return nil
}
// Don't try to do too many at a time.
shortened := false
if mostRecentOldEnoughRev-lastGCRev > numMaxRevisionsPerQR {
mostRecentOldEnoughRev = lastGCRev + numMaxRevisionsPerQR
shortened = true
}
// Don't print these until we know for sure that we'll be
// reclaiming some quota, to avoid log pollution.
fbm.log.CDebugf(ctx, "Starting quota reclamation process")
defer func() {
fbm.log.CDebugf(ctx, "Ending quota reclamation process: %v", err)
fbm.lastReclamationTimeLock.Lock()
defer fbm.lastReclamationTimeLock.Unlock()
fbm.lastReclamationTime = fbm.config.Clock().Now()
}()
ptrs, latestRev, complete, err :=
fbm.getUnreferencedBlocks(ctx, mostRecentOldEnoughRev, lastGCRev)
if err != nil {
return err
}
if len(ptrs) == 0 && !shortened {
complete = true
return nil
}
zeroRefCounts, err := fbm.deleteBlockRefs(ctx, head, ptrs)
if err != nil {
return err
}
return fbm.finalizeReclamation(ctx, ptrs, zeroRefCounts, latestRev)
}
func (fbm *folderBlockManager) reclaimQuotaInBackground() {
timer := time.NewTimer(fbm.config.QuotaReclamationPeriod())
timerChan := timer.C
for {
// Don't let the timer fire if auto-reclamation is turned off.
if fbm.config.QuotaReclamationPeriod().Seconds() == 0 {
timer.Stop()
// Use a channel that will never fire instead.
timerChan = make(chan time.Time)
}
select {
case <-fbm.shutdownChan:
return
case <-timerChan:
fbm.reclamationGroup.Add(1)
case <-fbm.forceReclamationChan:
}
err := fbm.doReclamation(timer)
if _, ok := err.(WriteAccessError); ok {
// If we got a write access error, don't bother with the
// timer anymore. Don't completely shut down, since we
// don't want forced reclamations to hang.
timer.Stop()
timerChan = make(chan time.Time)
}
}
}
func (fbm *folderBlockManager) getLastReclamationTime() time.Time {
fbm.lastReclamationTimeLock.Lock()
defer fbm.lastReclamationTimeLock.Unlock()
return fbm.lastReclamationTime
}
| 1 | 12,011 | Shall we maybe put this in a `const` like `backgroundTaskTimeout`? | keybase-kbfs | go |
@@ -272,6 +272,11 @@ namespace Datadog.Trace.TestHelpers
ctx.Response.OutputStream.Write(buffer, 0, buffer.Length);
ctx.Response.Close();
}
+ catch (InvalidOperationException)
+ {
+ // this can occur when setting Response.ContentLength64, with the framework claiming that the response has already been submitted
+ // for now ignore, and we'll see if this introduces downstream issues
+ }
catch (HttpListenerException)
{
// listener was stopped, | 1 | using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
using System.Collections.Immutable;
using System.Collections.Specialized;
using System.Diagnostics;
using System.Linq;
using System.Net;
using System.Net.Sockets;
using System.Text;
using System.Threading;
using Datadog.Core.Tools;
using Datadog.Trace.ExtensionMethods;
using MessagePack;
namespace Datadog.Trace.TestHelpers
{
public class MockTracerAgent : IDisposable
{
private readonly HttpListener _listener;
private readonly UdpClient _udpClient;
private readonly Thread _listenerThread;
private readonly Thread _statsdThread;
private readonly CancellationTokenSource _cancellationTokenSource;
public MockTracerAgent(int port = 8126, int retries = 5, bool useStatsd = false)
{
_cancellationTokenSource = new CancellationTokenSource();
if (useStatsd)
{
const int basePort = 11555;
var retriesLeft = retries;
while (true)
{
try
{
_udpClient = new UdpClient(basePort + retriesLeft);
}
catch (Exception) when (retriesLeft > 0)
{
retriesLeft--;
continue;
}
_statsdThread = new Thread(HandleStatsdRequests) { IsBackground = true };
_statsdThread.Start();
StatsdPort = basePort + retriesLeft;
break;
}
}
// try up to 5 consecutive ports before giving up
while (true)
{
// seems like we can't reuse a listener if it fails to start,
// so create a new listener each time we retry
var listener = new HttpListener();
listener.Prefixes.Add($"http://127.0.0.1:{port}/");
listener.Prefixes.Add($"http://localhost:{port}/");
try
{
listener.Start();
// successfully listening
Port = port;
_listener = listener;
_listenerThread = new Thread(HandleHttpRequests);
_listenerThread.Start();
return;
}
catch (HttpListenerException) when (retries > 0)
{
// only catch the exception if there are retries left
port = TcpPortProvider.GetOpenPort();
retries--;
}
// always close listener if exception is thrown,
// whether it was caught or not
listener.Close();
}
}
public event EventHandler<EventArgs<HttpListenerContext>> RequestReceived;
public event EventHandler<EventArgs<IList<IList<Span>>>> RequestDeserialized;
/// <summary>
/// Gets or sets a value indicating whether to skip serialization of traces.
/// </summary>
public bool ShouldDeserializeTraces { get; set; } = true;
/// <summary>
/// Gets the TCP port that this Agent is listening on.
/// Can be different from <see cref="MockTracerAgent(int, int)"/>'s <c>initialPort</c>
/// parameter if listening on that port fails.
/// </summary>
public int Port { get; }
/// <summary>
/// Gets the UDP port for statsd
/// </summary>
public int StatsdPort { get; }
/// <summary>
/// Gets the filters used to filter out spans we don't want to look at for a test.
/// </summary>
public List<Func<Span, bool>> SpanFilters { get; private set; } = new List<Func<Span, bool>>();
public IImmutableList<Span> Spans { get; private set; } = ImmutableList<Span>.Empty;
public IImmutableList<NameValueCollection> RequestHeaders { get; private set; } = ImmutableList<NameValueCollection>.Empty;
public ConcurrentQueue<string> StatsdRequests { get; } = new ConcurrentQueue<string>();
/// <summary>
/// Wait for the given number of spans to appear.
/// </summary>
/// <param name="count">The expected number of spans.</param>
/// <param name="timeoutInMilliseconds">The timeout</param>
/// <param name="operationName">The integration we're testing</param>
/// <param name="minDateTime">Minimum time to check for spans from</param>
/// <param name="returnAllOperations">When true, returns every span regardless of operation name</param>
/// <returns>The list of spans.</returns>
public IImmutableList<Span> WaitForSpans(
int count,
int timeoutInMilliseconds = 20000,
string operationName = null,
DateTimeOffset? minDateTime = null,
bool returnAllOperations = false)
{
var deadline = DateTime.Now.AddMilliseconds(timeoutInMilliseconds);
var minimumOffset = (minDateTime ?? DateTimeOffset.MinValue).ToUnixTimeNanoseconds();
IImmutableList<Span> relevantSpans = ImmutableList<Span>.Empty;
while (DateTime.Now < deadline)
{
relevantSpans =
Spans
.Where(s => SpanFilters.All(shouldReturn => shouldReturn(s)))
.Where(s => s.Start > minimumOffset)
.ToImmutableList();
if (relevantSpans.Count(s => operationName == null || s.Name == operationName) >= count)
{
break;
}
Thread.Sleep(500);
}
foreach (var headers in RequestHeaders)
{
// This is the place to check against headers we expect
AssertHeader(
headers,
"X-Datadog-Trace-Count",
header =>
{
if (int.TryParse(header, out int traceCount))
{
return traceCount >= 0;
}
return false;
});
}
if (!returnAllOperations)
{
relevantSpans =
relevantSpans
.Where(s => operationName == null || s.Name == operationName)
.ToImmutableList();
}
return relevantSpans;
}
public void Dispose()
{
_listener?.Stop();
_cancellationTokenSource.Cancel();
_udpClient?.Close();
}
protected virtual void OnRequestReceived(HttpListenerContext context)
{
RequestReceived?.Invoke(this, new EventArgs<HttpListenerContext>(context));
}
protected virtual void OnRequestDeserialized(IList<IList<Span>> traces)
{
RequestDeserialized?.Invoke(this, new EventArgs<IList<IList<Span>>>(traces));
}
private void AssertHeader(
NameValueCollection headers,
string headerKey,
Func<string, bool> assertion)
{
var header = headers.Get(headerKey);
if (string.IsNullOrEmpty(header))
{
throw new Exception($"Every submission to the agent should have a {headerKey} header.");
}
if (!assertion(header))
{
throw new Exception($"Failed assertion for {headerKey} on {header}");
}
}
private void HandleStatsdRequests()
{
var endPoint = new IPEndPoint(IPAddress.Loopback, 0);
while (!_cancellationTokenSource.IsCancellationRequested)
{
try
{
var buffer = _udpClient.Receive(ref endPoint);
StatsdRequests.Enqueue(Encoding.UTF8.GetString(buffer));
}
catch (Exception) when (_cancellationTokenSource.IsCancellationRequested)
{
return;
}
}
}
private void HandleHttpRequests()
{
while (_listener.IsListening)
{
try
{
var ctx = _listener.GetContext();
OnRequestReceived(ctx);
if (ShouldDeserializeTraces)
{
var spans = MessagePackSerializer.Deserialize<IList<IList<Span>>>(ctx.Request.InputStream);
OnRequestDeserialized(spans);
lock (this)
{
// we only need to lock when replacing the span collection,
// not when reading it because it is immutable
Spans = Spans.AddRange(spans.SelectMany(trace => trace));
RequestHeaders = RequestHeaders.Add(new NameValueCollection(ctx.Request.Headers));
}
}
// NOTE: HttpStreamRequest doesn't support Transfer-Encoding: Chunked
// (Setting content-length avoids that)
ctx.Response.ContentType = "application/json";
var buffer = Encoding.UTF8.GetBytes("{}");
ctx.Response.ContentLength64 = buffer.LongLength;
ctx.Response.OutputStream.Write(buffer, 0, buffer.Length);
ctx.Response.Close();
}
catch (HttpListenerException)
{
// listener was stopped,
// ignore to let the loop end and the method return
}
catch (ObjectDisposedException)
{
// the response has been already disposed.
}
catch (Exception) when (!_listener.IsListening)
{
// we don't care about any exception when listener is stopped
}
}
}
[MessagePackObject]
[DebuggerDisplay("TraceId={TraceId}, SpanId={SpanId}, Service={Service}, Name={Name}, Resource={Resource}")]
public class Span
{
[Key("trace_id")]
public ulong TraceId { get; set; }
[Key("span_id")]
public ulong SpanId { get; set; }
[Key("name")]
public string Name { get; set; }
[Key("resource")]
public string Resource { get; set; }
[Key("service")]
public string Service { get; set; }
[Key("type")]
public string Type { get; set; }
[Key("start")]
public long Start { get; set; }
[Key("duration")]
public long Duration { get; set; }
[Key("parent_id")]
public ulong? ParentId { get; set; }
[Key("error")]
public byte Error { get; set; }
[Key("meta")]
public Dictionary<string, string> Tags { get; set; }
[Key("metrics")]
public Dictionary<string, double> Metrics { get; set; }
public override string ToString()
{
return $"TraceId={TraceId}, SpanId={SpanId}, Service={Service}, Name={Name}, Resource={Resource}, Type={Type}";
}
}
}
}
| 1 | 19,598 | CI is complaining about this. | DataDog-dd-trace-dotnet | .cs |
@@ -31,6 +31,14 @@ import java.util.Date;
*/
public class DefaultHistoryRemovalTimeProvider implements HistoryRemovalTimeProvider {
+ public static Date determineRemovalTime(Date initTime, Integer timeToLive) {
+ Calendar removalTime = Calendar.getInstance();
+ removalTime.setTime(initTime);
+ removalTime.add(Calendar.DATE, timeToLive);
+
+ return removalTime.getTime();
+ }
+
public Date calculateRemovalTime(HistoricProcessInstanceEventEntity historicRootProcessInstance, ProcessDefinition processDefinition) {
Integer historyTimeToLive = processDefinition.getHistoryTimeToLive(); | 1 | /*
* Copyright Camunda Services GmbH and/or licensed to Camunda Services GmbH
* under one or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information regarding copyright
* ownership. Camunda licenses this file to you under the Apache License,
* Version 2.0; you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.camunda.bpm.engine.impl.history;
import org.camunda.bpm.engine.impl.batch.history.HistoricBatchEntity;
import org.camunda.bpm.engine.impl.context.Context;
import org.camunda.bpm.engine.impl.history.event.HistoricDecisionInstanceEntity;
import org.camunda.bpm.engine.impl.history.event.HistoricProcessInstanceEventEntity;
import org.camunda.bpm.engine.repository.DecisionDefinition;
import org.camunda.bpm.engine.repository.ProcessDefinition;
import java.util.Calendar;
import java.util.Date;
/**
* @author Tassilo Weidner
*/
public class DefaultHistoryRemovalTimeProvider implements HistoryRemovalTimeProvider {
public Date calculateRemovalTime(HistoricProcessInstanceEventEntity historicRootProcessInstance, ProcessDefinition processDefinition) {
Integer historyTimeToLive = processDefinition.getHistoryTimeToLive();
if (historyTimeToLive != null) {
if (isProcessInstanceRunning(historicRootProcessInstance)) {
Date startTime = historicRootProcessInstance.getStartTime();
return determineRemovalTime(startTime, historyTimeToLive);
} else if (isProcessInstanceEnded(historicRootProcessInstance)) {
Date endTime = historicRootProcessInstance.getEndTime();
return determineRemovalTime(endTime, historyTimeToLive);
}
}
return null;
}
public Date calculateRemovalTime(HistoricDecisionInstanceEntity historicRootDecisionInstance, DecisionDefinition decisionDefinition) {
Integer historyTimeToLive = decisionDefinition.getHistoryTimeToLive();
if (historyTimeToLive != null) {
Date evaluationTime = historicRootDecisionInstance.getEvaluationTime();
return determineRemovalTime(evaluationTime, historyTimeToLive);
}
return null;
}
public Date calculateRemovalTime(HistoricBatchEntity historicBatch) {
String batchOperation = historicBatch.getType();
if (batchOperation != null) {
Integer historyTimeToLive = getTTLByBatchOperation(batchOperation);
if (historyTimeToLive != null) {
if (isBatchRunning(historicBatch)) {
Date startTime = historicBatch.getStartTime();
return determineRemovalTime(startTime, historyTimeToLive);
} else if (isBatchEnded(historicBatch)) {
Date endTime = historicBatch.getEndTime();
return determineRemovalTime(endTime, historyTimeToLive);
}
}
}
return null;
}
protected boolean isBatchRunning(HistoricBatchEntity historicBatch) {
return historicBatch.getEndTime() == null;
}
protected boolean isBatchEnded(HistoricBatchEntity historicBatch) {
return historicBatch.getEndTime() != null;
}
protected Integer getTTLByBatchOperation(String batchOperation) {
return Context.getCommandContext()
.getProcessEngineConfiguration()
.getParsedBatchOperationsForHistoryCleanup()
.get(batchOperation);
}
protected boolean isProcessInstanceRunning(HistoricProcessInstanceEventEntity historicProcessInstance) {
return historicProcessInstance.getEndTime() == null;
}
protected boolean isProcessInstanceEnded(HistoricProcessInstanceEventEntity historicProcessInstance) {
return historicProcessInstance.getEndTime() != null;
}
protected Date determineRemovalTime(Date initTime, Integer timeToLive) {
Calendar removalTime = Calendar.getInstance();
removalTime.setTime(initTime);
removalTime.add(Calendar.DATE, timeToLive);
return removalTime.getTime();
}
}
| 1 | 10,726 | Let's only change a file if it is really necessary. Such a change does not bring much value and makes it harder to find the original commit in which the method was introduced. | camunda-camunda-bpm-platform | java |
@@ -210,6 +210,14 @@ class ImageExtension extends Twig_Extension
$htmlAttributes = $attributes;
unset($htmlAttributes['type'], $htmlAttributes['size']);
+ $useLazyLoading = array_key_exists('lazy', $attributes) ? (bool)$attributes['lazy'] : true;
+ $isAttributeClassExistsAndNotEmpty = array_key_exists('class', $attributes) && $attributes['class'] !== '';
+ $htmlAttributes['class'] = sprintf(
+ '%s%s',
+ $useLazyLoading ? 'lazy' : '',
+ $isAttributeClassExistsAndNotEmpty ? ' ' . $attributes['class'] : ''
+ );
+
return $this->templating->render('@ShopsysFramework/Common/image.html.twig', [
'attr' => $htmlAttributes,
'additionalImagesData' => $additionalImagesData, | 1 | <?php
declare(strict_types=1);
namespace Shopsys\FrameworkBundle\Twig;
use Shopsys\FrameworkBundle\Component\Domain\Domain;
use Shopsys\FrameworkBundle\Component\Image\ImageFacade;
use Shopsys\FrameworkBundle\Component\Image\ImageLocator;
use Shopsys\FrameworkBundle\Component\Utils\Utils;
use Symfony\Bundle\FrameworkBundle\Templating\EngineInterface;
use Twig_Extension;
use Twig_SimpleFunction;
class ImageExtension extends Twig_Extension
{
protected const NOIMAGE_FILENAME = 'noimage.png';
/**
* @var string
*/
protected $frontDesignImageUrlPrefix;
/**
* @var \Shopsys\FrameworkBundle\Component\Domain\Domain
*/
protected $domain;
/**
* @var \Shopsys\FrameworkBundle\Component\Image\ImageLocator
*/
protected $imageLocator;
/**
* @var \Shopsys\FrameworkBundle\Component\Image\ImageFacade
*/
protected $imageFacade;
/**
* @var \Symfony\Component\Templating\EngineInterface
*/
protected $templating;
/**
* @param string $frontDesignImageUrlPrefix
* @param \Shopsys\FrameworkBundle\Component\Domain\Domain $domain
* @param \Shopsys\FrameworkBundle\Component\Image\ImageLocator $imageLocator
* @param \Shopsys\FrameworkBundle\Component\Image\ImageFacade $imageFacade
* @param \Symfony\Bundle\FrameworkBundle\Templating\EngineInterface $templating
*/
public function __construct(
$frontDesignImageUrlPrefix,
Domain $domain,
ImageLocator $imageLocator,
ImageFacade $imageFacade,
EngineInterface $templating
) {
$this->frontDesignImageUrlPrefix = rtrim($frontDesignImageUrlPrefix, '/');
$this->domain = $domain;
$this->imageLocator = $imageLocator;
$this->imageFacade = $imageFacade;
$this->templating = $templating;
}
/**
* @return array
*/
public function getFunctions()
{
return [
new Twig_SimpleFunction('imageExists', [$this, 'imageExists']),
new Twig_SimpleFunction('imageUrl', [$this, 'getImageUrl']),
new Twig_SimpleFunction('image', [$this, 'getImageHtml'], ['is_safe' => ['html']]),
new Twig_SimpleFunction('noimage', [$this, 'getNoimageHtml'], ['is_safe' => ['html']]),
new Twig_SimpleFunction('getImages', [$this, 'getImages']),
];
}
/**
* @param \Shopsys\FrameworkBundle\Component\Image\Image|object $imageOrEntity
* @param string|null $type
* @return bool
*/
public function imageExists($imageOrEntity, $type = null)
{
try {
$image = $this->imageFacade->getImageByObject($imageOrEntity, $type);
} catch (\Shopsys\FrameworkBundle\Component\Image\Exception\ImageNotFoundException $e) {
return false;
}
return $this->imageLocator->imageExists($image);
}
/**
* @param \Shopsys\FrameworkBundle\Component\Image\Image|Object $imageOrEntity
* @param string|null $sizeName
* @param string|null $type
* @return string
*/
public function getImageUrl($imageOrEntity, $sizeName = null, $type = null)
{
try {
return $this->imageFacade->getImageUrl($this->domain->getCurrentDomainConfig(), $imageOrEntity, $sizeName, $type);
} catch (\Shopsys\FrameworkBundle\Component\Image\Exception\ImageNotFoundException $e) {
return $this->getEmptyImageUrl();
}
}
/**
* @param Object $entity
* @param string|null $type
* @return \Shopsys\FrameworkBundle\Component\Image\Image[]
*/
public function getImages($entity, $type = null)
{
return $this->imageFacade->getImagesByEntityIndexedById($entity, $type);
}
/**
* @param \Shopsys\FrameworkBundle\Component\Image\Image|Object $imageOrEntity
* @param array $attributes
* @return string
*/
public function getImageHtml($imageOrEntity, array $attributes = [])
{
$this->preventDefault($attributes);
try {
$image = $this->imageFacade->getImageByObject($imageOrEntity, $attributes['type']);
$entityName = $image->getEntityName();
$attributes['src'] = $this->getImageUrl($image, $attributes['size'], $attributes['type']);
$additionalImagesData = $this->imageFacade->getAdditionalImagesData($this->domain->getCurrentDomainConfig(), $image, $attributes['size'], $attributes['type']);
return $this->getImageHtmlByEntityName($attributes, $entityName, $additionalImagesData);
} catch (\Shopsys\FrameworkBundle\Component\Image\Exception\ImageNotFoundException $e) {
return $this->getNoimageHtml($attributes);
}
}
/**
* @param array $attributes
* @return string
*/
public function getNoimageHtml(array $attributes = [])
{
$this->preventDefault($attributes);
$entityName = 'noimage';
$attributes['src'] = $this->getEmptyImageUrl();
$additionalImagesData = [];
return $this->getImageHtmlByEntityName($attributes, $entityName, $additionalImagesData);
}
/**
* @return string
*/
protected function getEmptyImageUrl(): string
{
return $this->domain->getUrl() . $this->frontDesignImageUrlPrefix . '/' . static::NOIMAGE_FILENAME;
}
/**
* @param string $entityName
* @param string|null $type
* @param string|null $sizeName
* @return string
*/
protected function getImageCssClass($entityName, $type, $sizeName)
{
$allClassParts = [
'image',
$entityName,
$type,
$sizeName,
];
$classParts = array_filter($allClassParts);
return implode('-', $classParts);
}
/**
* @return string
*/
public function getName()
{
return 'image_extension';
}
/**
* @param array $attributes
*/
protected function preventDefault(array &$attributes): void
{
Utils::setArrayDefaultValue($attributes, 'type');
Utils::setArrayDefaultValue($attributes, 'size');
Utils::setArrayDefaultValue($attributes, 'alt', '');
Utils::setArrayDefaultValue($attributes, 'title', $attributes['alt']);
}
/**
* @param array $attributes
* @param string $entityName
* @param \Shopsys\FrameworkBundle\Component\Image\AdditionalImageData[] $additionalImagesData
* @return string
*/
protected function getImageHtmlByEntityName(array $attributes, $entityName, $additionalImagesData = []): string
{
$htmlAttributes = $attributes;
unset($htmlAttributes['type'], $htmlAttributes['size']);
return $this->templating->render('@ShopsysFramework/Common/image.html.twig', [
'attr' => $htmlAttributes,
'additionalImagesData' => $additionalImagesData,
'imageCssClass' => $this->getImageCssClass($entityName, $attributes['type'], $attributes['size']),
]);
}
}
| 1 | 19,667 | I would prefer to set up space between classes in format pattern, `%s %s` and then use `trim()` to remove unnecessary whitespaces. This will also solve stripping whitespaces from the beginning and end of a string `$attributes['class']` | shopsys-shopsys | php |
@@ -22,9 +22,13 @@ AdminJobExecutor::AdminJobExecutor(Sentence *sentence,
void AdminJobExecutor::execute() {
LOG(INFO) << __func__ << " enter";
- auto opEnum = toAdminJobOp(sentence_->getType());
+ auto optOpEnum = toAdminJobOp(sentence_->getType());
+ if (!optOpEnum) {
+ LOG(ERROR) << "unknown setence type[" << sentence_->getType() <<"]";
+ }
auto paras = sentence_->getParas();
+ auto opEnum = *optOpEnum;
if (opNeedsSpace(opEnum)) {
auto status = checkIfGraphSpaceChosen();
if (!status.ok()) { | 1 | /* Copyright (c) 2019 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License,
* attached with Common Clause Condition 1.0, found in the LICENSES directory.
*/
#include "gen-cpp2/meta_types.h"
#include "http/HttpClient.h"
#include "graph/AdminJobExecutor.h"
#include "process/ProcessUtils.h"
#include "webservice/Common.h"
#include <folly/executors/Async.h>
#include <folly/futures/Future.h>
namespace nebula {
namespace graph {
AdminJobExecutor::AdminJobExecutor(Sentence *sentence,
ExecutionContext *ectx) : Executor(ectx) {
sentence_ = static_cast<AdminSentence*>(sentence);
}
void AdminJobExecutor::execute() {
LOG(INFO) << __func__ << " enter";
auto opEnum = toAdminJobOp(sentence_->getType());
auto paras = sentence_->getParas();
if (opNeedsSpace(opEnum)) {
auto status = checkIfGraphSpaceChosen();
if (!status.ok()) {
DCHECK(onError_);
onError_(std::move(status));
return;
}
paras.emplace_back(ectx()->rctx()->session()->spaceName());
}
auto future = ectx()->getMetaClient()->submitJob(opEnum, paras);
auto *runner = ectx()->rctx()->runner();
auto cb = [this, opEnum] (auto &&resp) {
if (!resp.ok()) {
DCHECK(onError_);
onError_(std::forward<nebula::Status>(resp.status()));
return;
}
resp_ = std::make_unique<cpp2::ExecutionResponse>();
auto header = getHeader(opEnum);
resp_->set_column_names(std::move(header));
auto result = toRowValues(opEnum, std::move(resp.value()));
resp_->set_rows(std::move(result));
DCHECK(onFinish_);
onFinish_(Executor::ProcessControl::kNext);
};
auto error = [this] (auto &&e) {
LOG(ERROR) << "Exception caught: " << e.what();
DCHECK(onError_);
onError_(Status::Error(folly::stringPrintf("Internal error : %s",
e.what().c_str())));
return;
};
std::move(future).via(runner).thenValue(cb).thenError(error);
}
Status AdminJobExecutor::prepare() {
return Status::OK();
}
bool AdminJobExecutor::opNeedsSpace(nebula::meta::cpp2::AdminJobOp op) {
return op == nebula::meta::cpp2::AdminJobOp::ADD;
}
void AdminJobExecutor::setupResponse(cpp2::ExecutionResponse &resp) {
resp = std::move(*resp_);
}
std::vector<std::string>
AdminJobExecutor::getHeader(nebula::meta::cpp2::AdminJobOp op, bool succeed) {
if (!succeed) {
return {"Error"};
}
switch (op) {
case nebula::meta::cpp2::AdminJobOp::ADD:
return {"New Job Id"};
case nebula::meta::cpp2::AdminJobOp::SHOW_All:
return {"Job Id", "Command", "Status", "Start Time", "Stop Time"};
case nebula::meta::cpp2::AdminJobOp::SHOW:
return {"Job Id(TaskId)", "Command(Dest)", "Status", "Start Time", "Stop Time"};
case nebula::meta::cpp2::AdminJobOp::STOP:
return {"Result"};
case nebula::meta::cpp2::AdminJobOp::RECOVER:
return {"Recovered job num"};
default:
return {"Result"};
}
}
nebula::meta::cpp2::AdminJobOp
AdminJobExecutor::toAdminJobOp(const std::string& op) {
if (op == "add_job") {
return nebula::meta::cpp2::AdminJobOp::ADD;
} else if (op == "show_jobs") {
return nebula::meta::cpp2::AdminJobOp::SHOW_All;
} else if (op == "show_job") {
return nebula::meta::cpp2::AdminJobOp::SHOW;
} else if (op == "stop_job") {
return nebula::meta::cpp2::AdminJobOp::STOP;
} else if (op == "recover_job") {
return nebula::meta::cpp2::AdminJobOp::RECOVER;
}
return nebula::meta::cpp2::AdminJobOp::INVALID;
}
std::string AdminJobExecutor::time2string(int64_t t) {
std::string ret;
if (t == 0) {
return ret;
}
std::time_t tm = t;
char mbstr[50];
int len = std::strftime(mbstr, sizeof(mbstr), "%x %X", std::localtime(&tm));
if (len != 0) {
ret = std::string(&mbstr[0], len);
}
return ret;
}
cpp2::RowValue
AdminJobExecutor::toRowValue(const nebula::meta::cpp2::JobDesc& job) {
cpp2::RowValue ret;
std::vector<cpp2::ColumnValue> row(5);
row[0].set_str(std::to_string(job.get_id()));
std::stringstream oss;
oss << job.get_cmd() << " ";
for (auto& p : job.get_paras()) {
oss << p << " ";
}
row[1].set_str(oss.str());
row[2].set_str(toString(job.get_status()));
row[3].set_str(time2string(job.get_start_time()));
row[4].set_str(time2string(job.get_stop_time()));
ret.set_columns(std::move(row));
return ret;
}
cpp2::RowValue
AdminJobExecutor::toRowValue(const nebula::meta::cpp2::TaskDesc& task) {
cpp2::RowValue ret;
std::vector<cpp2::ColumnValue> row(5);
row[0].set_str(folly::stringPrintf("%d-%d", task.get_job_id(), task.get_task_id()));
row[1].set_str(toString(task.get_host()));
row[2].set_str(toString(task.get_status()));
row[3].set_str(time2string(task.get_start_time()));
row[4].set_str(time2string(task.get_stop_time()));
ret.set_columns(std::move(row));
return ret;
}
cpp2::RowValue AdminJobExecutor::toRowValue(std::string&& msg) {
cpp2::RowValue row;
std::vector<cpp2::ColumnValue> cols(1);
cols.back().set_str(std::move(msg));
row.set_columns(std::move(cols));
return row;
}
std::vector<cpp2::RowValue>
AdminJobExecutor::toRowValues(nebula::meta::cpp2::AdminJobOp op,
nebula::meta::cpp2::AdminJobResult &&resp) {
std::vector<cpp2::RowValue> ret;
switch (op) {
case nebula::meta::cpp2::AdminJobOp::ADD:
{
ret.emplace_back(toRowValue(std::to_string(*resp.get_job_id())));
}
break;
case nebula::meta::cpp2::AdminJobOp::SHOW_All:
{
for (auto& job : *resp.get_job_desc()) {
ret.emplace_back(toRowValue(job));
}
}
break;
case nebula::meta::cpp2::AdminJobOp::SHOW:
{
for (auto& job : *resp.get_job_desc()) {
ret.emplace_back(toRowValue(job));
}
for (auto& task : *resp.get_task_desc()) {
ret.emplace_back(toRowValue(task));
}
}
break;
case nebula::meta::cpp2::AdminJobOp::STOP:
{
ret.emplace_back(toRowValue("Job stopped"));
}
break;
case nebula::meta::cpp2::AdminJobOp::RECOVER:
{
auto msg = folly::stringPrintf("recoverd job num: %d",
*resp.get_recovered_job_num());
ret.emplace_back(toRowValue(std::move(msg)));
}
break;
default:
return ret;
}
return ret;
}
std::string
AdminJobExecutor::toString(nebula::meta::cpp2::JobStatus st) {
switch (st) {
case nebula::meta::cpp2::JobStatus::QUEUE:
return "queue";
case nebula::meta::cpp2::JobStatus::RUNNING:
return "running";
case nebula::meta::cpp2::JobStatus::FINISHED:
return "finished";
case nebula::meta::cpp2::JobStatus::FAILED:
return "failed";
case nebula::meta::cpp2::JobStatus::STOPPED:
return "stopped";
case nebula::meta::cpp2::JobStatus::INVALID:
return "invalid";
}
return "invalid st";
}
std::string AdminJobExecutor::toString(nebula::cpp2::HostAddr host) {
auto ip = network::NetworkUtils::intToIPv4(host.get_ip());
auto ret = folly::stringPrintf("%s:%d", ip.c_str(), host.get_port());
return ret;
}
} // namespace graph
} // namespace nebula
| 1 | 28,167 | when Op is illegal should return here? | vesoft-inc-nebula | cpp |
@@ -447,8 +447,6 @@ public class FlowRunnerManager implements EventListener,
} catch (IOException e) {
logger.error(e);
}
-
- installedVersions.remove(versionKey);
}
}
} | 1 | /*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.execapp;
import azkaban.Constants;
import azkaban.executor.Status;
import java.io.File;
import java.io.FilenameFilter;
import java.io.IOException;
import java.lang.Thread.State;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.Future;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.TimeUnit;
import org.apache.commons.io.FileUtils;
import org.apache.log4j.Logger;
import azkaban.event.Event;
import azkaban.event.EventListener;
import azkaban.execapp.event.FlowWatcher;
import azkaban.execapp.event.LocalFlowWatcher;
import azkaban.execapp.event.RemoteFlowWatcher;
import azkaban.execapp.metric.NumFailedFlowMetric;
import azkaban.executor.ExecutableFlow;
import azkaban.executor.ExecutionOptions;
import azkaban.executor.ExecutorLoader;
import azkaban.executor.ExecutorManagerException;
import azkaban.jobtype.JobTypeManager;
import azkaban.jobtype.JobTypeManagerException;
import azkaban.metric.MetricReportManager;
import azkaban.project.ProjectLoader;
import azkaban.project.ProjectWhitelist;
import azkaban.project.ProjectWhitelist.WhitelistType;
import azkaban.utils.FileIOUtils;
import azkaban.utils.FileIOUtils.JobMetaData;
import azkaban.utils.FileIOUtils.LogData;
import azkaban.utils.JSONUtils;
import azkaban.utils.Pair;
import azkaban.utils.Props;
import azkaban.utils.ThreadPoolExecutingListener;
import azkaban.utils.TrackingThreadPool;
/**
* Execution manager for the server side execution.
*
* When a flow is submitted to FlowRunnerManager, it is the
* {@link Status.PREPARING} status. When a flow is about to be executed by
* FlowRunner, its status is updated to {@link Status.RUNNING}
*
* Two main data structures are used in this class to maintain flows.
*
* runningFlows: this is used as a bookkeeping for submitted flows in
* FlowRunnerManager. It has nothing to do with the executor service that is
* used to execute the flows. This bookkeeping is used at the time of canceling
* or killing a flow. The flows in this data structure is removed in the
* handleEvent method.
*
* submittedFlows: this is used to keep track the execution of the flows, so it
* has the mapping between a Future<?> and an execution id. This would allow us
* to find out the execution ids of the flows that are in the Status.PREPARING
* status. The entries in this map is removed once the flow execution is
* completed.
*
*
*/
public class FlowRunnerManager implements EventListener,
ThreadPoolExecutingListener {
private static final Logger logger = Logger.getLogger(FlowRunnerManager.class);
private static final String EXECUTOR_USE_BOUNDED_THREADPOOL_QUEUE = "executor.use.bounded.threadpool.queue";
private static final String EXECUTOR_THREADPOOL_WORKQUEUE_SIZE = "executor.threadpool.workqueue.size";
private static final String EXECUTOR_FLOW_THREADS = "executor.flow.threads";
private static final String FLOW_NUM_JOB_THREADS = "flow.num.job.threads";
// recently finished secs to clean up. 1 minute
private static final int RECENTLY_FINISHED_TIME_TO_LIVE = 60 * 1000;
private static final int DEFAULT_NUM_EXECUTING_FLOWS = 30;
private static final int DEFAULT_FLOW_NUM_JOB_TREADS = 10;
// this map is used to store the flows that have been submitted to
// the executor service. Once a flow has been submitted, it is either
// in the queue waiting to be executed or in executing state.
private final Map<Future<?>, Integer> submittedFlows = new ConcurrentHashMap<>();
private final Map<Integer, FlowRunner> runningFlows = new ConcurrentHashMap<>();
private final Map<Integer, ExecutableFlow> recentlyFinishedFlows = new ConcurrentHashMap<>();
private final Map<Pair<Integer, Integer>, ProjectVersion> installedProjects;
private final TrackingThreadPool executorService;
private final CleanerThread cleanerThread;
private final ExecutorLoader executorLoader;
private final ProjectLoader projectLoader;
private final JobTypeManager jobtypeManager;
private final FlowPreparer flowPreparer;
private final Props azkabanProps;
private final File executionDirectory;
private final File projectDirectory;
private final Object executionDirDeletionSync = new Object();
private int numThreads = DEFAULT_NUM_EXECUTING_FLOWS;
private int threadPoolQueueSize = -1;
private int numJobThreadPerFlow = DEFAULT_FLOW_NUM_JOB_TREADS;
private Props globalProps;
private long lastCleanerThreadCheckTime = -1;
private long executionDirRetention = 1 * 24 * 60 * 60 * 1000; // 1 Day
// We want to limit the log sizes to about 20 megs
private String jobLogChunkSize = "5MB";
private int jobLogNumFiles = 4;
// If true, jobs will validate proxy user against a list of valid proxy users.
private boolean validateProxyUser = false;
// date time of the the last flow submitted.
private long lastFlowSubmittedDate = 0;
// whether the current executor is active
private volatile boolean isExecutorActive = false;
public FlowRunnerManager(Props props, ExecutorLoader executorLoader,
ProjectLoader projectLoader, ClassLoader parentClassLoader) throws IOException {
azkabanProps = props;
// JobWrappingFactory.init(props, getClass().getClassLoader());
executionDirRetention = props.getLong("execution.dir.retention", executionDirRetention);
logger.info("Execution dir retention set to " + executionDirRetention + " ms");
executionDirectory = new File(props.getString("azkaban.execution.dir", "executions"));
if (!executionDirectory.exists()) {
executionDirectory.mkdirs();
}
projectDirectory = new File(props.getString("azkaban.project.dir", "projects"));
if (!projectDirectory.exists()) {
projectDirectory.mkdirs();
}
installedProjects = loadExistingProjects();
// azkaban.temp.dir
numThreads = props.getInt(EXECUTOR_FLOW_THREADS, DEFAULT_NUM_EXECUTING_FLOWS);
numJobThreadPerFlow = props.getInt(FLOW_NUM_JOB_THREADS, DEFAULT_FLOW_NUM_JOB_TREADS);
executorService = createExecutorService(numThreads);
// Create a flow preparer
flowPreparer = new FlowPreparer(projectLoader, executionDirectory, projectDirectory, installedProjects);
this.executorLoader = executorLoader;
this.projectLoader = projectLoader;
this.jobLogChunkSize = azkabanProps.getString("job.log.chunk.size", "5MB");
this.jobLogNumFiles = azkabanProps.getInt("job.log.backup.index", 4);
this.validateProxyUser = azkabanProps.getBoolean("proxy.user.lock.down", false);
cleanerThread = new CleanerThread();
cleanerThread.start();
String globalPropsPath = props.getString("executor.global.properties", null);
if (globalPropsPath != null) {
globalProps = new Props(null, globalPropsPath);
}
jobtypeManager =
new JobTypeManager(props.getString(
AzkabanExecutorServer.JOBTYPE_PLUGIN_DIR,
JobTypeManager.DEFAULT_JOBTYPEPLUGINDIR), globalProps,
parentClassLoader);
}
private TrackingThreadPool createExecutorService(int nThreads) {
boolean useNewThreadPool =
azkabanProps.getBoolean(EXECUTOR_USE_BOUNDED_THREADPOOL_QUEUE, false);
logger.info("useNewThreadPool: " + useNewThreadPool);
if (useNewThreadPool) {
threadPoolQueueSize =
azkabanProps.getInt(EXECUTOR_THREADPOOL_WORKQUEUE_SIZE, nThreads);
logger.info("workQueueSize: " + threadPoolQueueSize);
// using a bounded queue for the work queue. The default rejection policy
// {@ThreadPoolExecutor.AbortPolicy} is used
TrackingThreadPool executor =
new TrackingThreadPool(nThreads, nThreads, 0L, TimeUnit.MILLISECONDS,
new LinkedBlockingQueue<Runnable>(threadPoolQueueSize), this);
return executor;
} else {
// the old way of using unbounded task queue.
// if the running tasks are taking a long time or stuck, this queue
// will be very very long.
return new TrackingThreadPool(nThreads, nThreads, 0L,
TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>(), this);
}
}
private Map<Pair<Integer, Integer>, ProjectVersion> loadExistingProjects() {
Map<Pair<Integer, Integer>, ProjectVersion> allProjects =
new HashMap<Pair<Integer, Integer>, ProjectVersion>();
for (File project : projectDirectory.listFiles(new FilenameFilter() {
String pattern = "[0-9]+\\.[0-9]+";
@Override
public boolean accept(File dir, String name) {
return name.matches(pattern);
}
})) {
if (project.isDirectory()) {
try {
String fileName = new File(project.getAbsolutePath()).getName();
int projectId = Integer.parseInt(fileName.split("\\.")[0]);
int versionNum = Integer.parseInt(fileName.split("\\.")[1]);
ProjectVersion version =
new ProjectVersion(projectId, versionNum, project);
allProjects.put(new Pair<Integer, Integer>(projectId, versionNum),
version);
} catch (Exception e) {
e.printStackTrace();
}
}
}
return allProjects;
}
public void setExecutorActive(boolean isActive) {
this.isExecutorActive = isActive;
}
public long getLastFlowSubmittedTime(){
// Note: this is not thread safe and may result in providing dirty data.
// we will provide this data as is for now and will revisit if there
// is a string justification for change.
return lastFlowSubmittedDate;
}
public Props getGlobalProps() {
return globalProps;
}
public void setGlobalProps(Props globalProps) {
this.globalProps = globalProps;
}
private class CleanerThread extends Thread {
// Every hour, clean execution dir.
private static final long EXECUTION_DIR_CLEAN_INTERVAL_MS = 60 * 60 * 1000;
// Every 5 mins clean the old project dir
private static final long OLD_PROJECT_DIR_INTERVAL_MS = 5 * 60 * 1000;
// Every 2 mins clean the recently finished list
private static final long RECENTLY_FINISHED_INTERVAL_MS = 2 * 60 * 1000;
// Every 5 mins kill flows running longer than allowed max running time
private static final long LONG_RUNNING_FLOW_KILLING_INTERVAL_MS = 5 * 60 * 1000;
private boolean shutdown = false;
private long lastExecutionDirCleanTime = -1;
private long lastOldProjectCleanTime = -1;
private long lastRecentlyFinishedCleanTime = -1;
private long lastLongRunningFlowCleanTime = -1;
private final long flowMaxRunningTimeInMins = azkabanProps.getInt(Constants.ConfigurationKeys.AZKABAN_MAX_FLOW_RUNNING_MINS, 60 * 24 * 10);
public CleanerThread() {
this.setName("FlowRunnerManager-Cleaner-Thread");
setDaemon(true);
}
@SuppressWarnings("unused")
public void shutdown() {
shutdown = true;
this.interrupt();
}
private boolean isFlowRunningLongerThan(ExecutableFlow flow, long flowMaxRunningTimeInMins) {
Set<Status> nonFinishingStatusAfterFlowStarts = new HashSet<>(Arrays.asList(Status.RUNNING, Status.QUEUED, Status.PAUSED, Status.FAILED_FINISHING));
return nonFinishingStatusAfterFlowStarts.contains(flow.getStatus()) && flow.getStartTime() > 0 && TimeUnit.MILLISECONDS.toMinutes(System.currentTimeMillis()-flow.getStartTime()) >= flowMaxRunningTimeInMins;
}
public void run() {
while (!shutdown) {
synchronized (this) {
try {
lastCleanerThreadCheckTime = System.currentTimeMillis();
logger.info("# of executing flows: " + getNumRunningFlows());
// Cleanup old stuff.
long currentTime = System.currentTimeMillis();
if (currentTime - RECENTLY_FINISHED_INTERVAL_MS > lastRecentlyFinishedCleanTime) {
logger.info("Cleaning recently finished");
cleanRecentlyFinished();
lastRecentlyFinishedCleanTime = currentTime;
}
if (currentTime - OLD_PROJECT_DIR_INTERVAL_MS > lastOldProjectCleanTime && isExecutorActive) {
logger.info("Cleaning old projects");
cleanOlderProjects();
lastOldProjectCleanTime = currentTime;
}
if (currentTime - EXECUTION_DIR_CLEAN_INTERVAL_MS > lastExecutionDirCleanTime) {
logger.info("Cleaning old execution dirs");
cleanOlderExecutionDirs();
lastExecutionDirCleanTime = currentTime;
}
if (flowMaxRunningTimeInMins > 0 && currentTime - LONG_RUNNING_FLOW_KILLING_INTERVAL_MS > lastLongRunningFlowCleanTime) {
logger.info(String.format("Killing long jobs running longer than %s mins", flowMaxRunningTimeInMins));
for (FlowRunner flowRunner : runningFlows.values()) {
if (isFlowRunningLongerThan(flowRunner.getExecutableFlow(), flowMaxRunningTimeInMins)) {
logger.info(String.format("Killing job [id: %s, status: %s]. It has been running for %s mins", flowRunner.getExecutableFlow().getId(), flowRunner.getExecutableFlow().getStatus(), TimeUnit.MILLISECONDS.toMinutes(System.currentTimeMillis()-flowRunner.getExecutableFlow().getStartTime())));
flowRunner.kill();
}
}
lastLongRunningFlowCleanTime = currentTime;
}
wait(RECENTLY_FINISHED_TIME_TO_LIVE);
} catch (InterruptedException e) {
logger.info("Interrupted. Probably to shut down.");
} catch (Throwable t) {
logger.warn(
"Uncaught throwable, please look into why it is not caught", t);
}
}
}
}
private void cleanOlderExecutionDirs() {
File dir = executionDirectory;
final long pastTimeThreshold =
System.currentTimeMillis() - executionDirRetention;
File[] executionDirs = dir.listFiles(path -> path.isDirectory() && path.lastModified() < pastTimeThreshold);
for (File exDir : executionDirs) {
try {
int execId = Integer.valueOf(exDir.getName());
if (runningFlows.containsKey(execId)
|| recentlyFinishedFlows.containsKey(execId)) {
continue;
}
} catch (NumberFormatException e) {
logger.error("Can't delete exec dir " + exDir.getName()
+ " it is not a number");
continue;
}
synchronized (executionDirDeletionSync) {
try {
FileUtils.deleteDirectory(exDir);
} catch (IOException e) {
logger.error("Error cleaning execution dir " + exDir.getPath(), e);
}
}
}
}
private void cleanRecentlyFinished() {
long cleanupThreshold =
System.currentTimeMillis() - RECENTLY_FINISHED_TIME_TO_LIVE;
ArrayList<Integer> executionToKill = new ArrayList<Integer>();
for (ExecutableFlow flow : recentlyFinishedFlows.values()) {
if (flow.getEndTime() < cleanupThreshold) {
executionToKill.add(flow.getExecutionId());
}
}
for (Integer id : executionToKill) {
logger.info("Cleaning execution " + id
+ " from recently finished flows list.");
recentlyFinishedFlows.remove(id);
}
}
private void cleanOlderProjects() {
Map<Integer, ArrayList<ProjectVersion>> projectVersions =
new HashMap<Integer, ArrayList<ProjectVersion>>();
for (ProjectVersion version : installedProjects.values()) {
ArrayList<ProjectVersion> versionList =
projectVersions.get(version.getProjectId());
if (versionList == null) {
versionList = new ArrayList<ProjectVersion>();
projectVersions.put(version.getProjectId(), versionList);
}
versionList.add(version);
}
HashSet<Pair<Integer, Integer>> activeProjectVersions =
new HashSet<Pair<Integer, Integer>>();
for (FlowRunner runner : runningFlows.values()) {
ExecutableFlow flow = runner.getExecutableFlow();
activeProjectVersions.add(new Pair<Integer, Integer>(flow
.getProjectId(), flow.getVersion()));
}
for (Map.Entry<Integer, ArrayList<ProjectVersion>> entry : projectVersions
.entrySet()) {
// Integer projectId = entry.getKey();
ArrayList<ProjectVersion> installedVersions = entry.getValue();
// Keep one version of the project around.
if (installedVersions.size() == 1) {
continue;
}
Collections.sort(installedVersions);
for (int i = 0; i < installedVersions.size() - 1; ++i) {
ProjectVersion version = installedVersions.get(i);
Pair<Integer, Integer> versionKey =
new Pair<Integer, Integer>(version.getProjectId(),
version.getVersion());
if (!activeProjectVersions.contains(versionKey)) {
try {
logger.info("Removing old unused installed project "
+ version.getProjectId() + ":" + version.getVersion());
deleteDirectory(version);
installedProjects.remove(new Pair<Integer, Integer>(version
.getProjectId(), version.getVersion()));
} catch (IOException e) {
logger.error(e);
}
installedVersions.remove(versionKey);
}
}
}
}
}
public void deleteDirectory(ProjectVersion pv) throws IOException {
synchronized (pv) {
logger.warn("Deleting project: " + pv);
final File installedDir = pv.getInstalledDir();
if (installedDir != null && installedDir.exists()) {
FileUtils.deleteDirectory(installedDir);
}
}
}
public void submitFlow(int execId) throws ExecutorManagerException {
// Load file and submit
if (runningFlows.containsKey(execId)) {
throw new ExecutorManagerException("Execution " + execId
+ " is already running.");
}
ExecutableFlow flow = null;
flow = executorLoader.fetchExecutableFlow(execId);
if (flow == null) {
throw new ExecutorManagerException("Error loading flow with exec "
+ execId);
}
// Sets up the project files and execution directory.
flowPreparer.setup(flow);
// Setup flow runner
FlowWatcher watcher = null;
ExecutionOptions options = flow.getExecutionOptions();
if (options.getPipelineExecutionId() != null) {
Integer pipelineExecId = options.getPipelineExecutionId();
FlowRunner runner = runningFlows.get(pipelineExecId);
if (runner != null) {
watcher = new LocalFlowWatcher(runner);
} else {
watcher = new RemoteFlowWatcher(pipelineExecId, executorLoader);
}
}
int numJobThreads = numJobThreadPerFlow;
if (options.getFlowParameters().containsKey(FLOW_NUM_JOB_THREADS)) {
try {
int numJobs =
Integer.valueOf(options.getFlowParameters().get(
FLOW_NUM_JOB_THREADS));
if (numJobs > 0 && (numJobs <= numJobThreads || ProjectWhitelist
.isProjectWhitelisted(flow.getProjectId(),
WhitelistType.NumJobPerFlow))) {
numJobThreads = numJobs;
}
} catch (Exception e) {
throw new ExecutorManagerException(
"Failed to set the number of job threads "
+ options.getFlowParameters().get(FLOW_NUM_JOB_THREADS)
+ " for flow " + execId, e);
}
}
FlowRunner runner =
new FlowRunner(flow, executorLoader, projectLoader, jobtypeManager, azkabanProps);
runner.setFlowWatcher(watcher)
.setJobLogSettings(jobLogChunkSize, jobLogNumFiles)
.setValidateProxyUser(validateProxyUser)
.setNumJobThreads(numJobThreads).addListener(this);
configureFlowLevelMetrics(runner);
// Check again.
if (runningFlows.containsKey(execId)) {
throw new ExecutorManagerException("Execution " + execId
+ " is already running.");
}
// Finally, queue the sucker.
runningFlows.put(execId, runner);
try {
// The executorService already has a queue.
// The submit method below actually returns an instance of FutureTask,
// which implements interface RunnableFuture, which extends both
// Runnable and Future interfaces
Future<?> future = executorService.submit(runner);
// keep track of this future
submittedFlows.put(future, runner.getExecutionId());
// update the last submitted time.
this.lastFlowSubmittedDate = System.currentTimeMillis();
} catch (RejectedExecutionException re) {
throw new ExecutorManagerException(
"Azkaban server can't execute any more flows. "
+ "The number of running flows has reached the system configured limit."
+ "Please notify Azkaban administrators");
}
}
/**
* Configure Azkaban metrics tracking for a new flowRunner instance
*
* @param flowRunner
*/
private void configureFlowLevelMetrics(FlowRunner flowRunner) {
logger.info("Configuring Azkaban metrics tracking for flow runner object");
if (MetricReportManager.isAvailable()) {
MetricReportManager metricManager = MetricReportManager.getInstance();
// Adding NumFailedFlow Metric listener
flowRunner.addListener((NumFailedFlowMetric) metricManager
.getMetricFromName(NumFailedFlowMetric.NUM_FAILED_FLOW_METRIC_NAME));
}
}
public void cancelFlow(int execId, String user)
throws ExecutorManagerException {
FlowRunner runner = runningFlows.get(execId);
if (runner == null) {
throw new ExecutorManagerException("Execution " + execId
+ " is not running.");
}
runner.kill(user);
}
public void pauseFlow(int execId, String user)
throws ExecutorManagerException {
FlowRunner runner = runningFlows.get(execId);
if (runner == null) {
throw new ExecutorManagerException("Execution " + execId
+ " is not running.");
}
runner.pause(user);
}
public void resumeFlow(int execId, String user)
throws ExecutorManagerException {
FlowRunner runner = runningFlows.get(execId);
if (runner == null) {
throw new ExecutorManagerException("Execution " + execId
+ " is not running.");
}
runner.resume(user);
}
public void retryFailures(int execId, String user)
throws ExecutorManagerException {
FlowRunner runner = runningFlows.get(execId);
if (runner == null) {
throw new ExecutorManagerException("Execution " + execId
+ " is not running.");
}
runner.retryFailures(user);
}
public ExecutableFlow getExecutableFlow(int execId) {
FlowRunner runner = runningFlows.get(execId);
if (runner == null) {
return recentlyFinishedFlows.get(execId);
}
return runner.getExecutableFlow();
}
@Override
public void handleEvent(Event event) {
if (event.getType() == Event.Type.FLOW_FINISHED) {
FlowRunner flowRunner = (FlowRunner) event.getRunner();
ExecutableFlow flow = flowRunner.getExecutableFlow();
recentlyFinishedFlows.put(flow.getExecutionId(), flow);
logger.info("Flow " + flow.getExecutionId()
+ " is finished. Adding it to recently finished flows list.");
runningFlows.remove(flow.getExecutionId());
}
}
public LogData readFlowLogs(int execId, int startByte, int length)
throws ExecutorManagerException {
FlowRunner runner = runningFlows.get(execId);
if (runner == null) {
throw new ExecutorManagerException("Running flow " + execId
+ " not found.");
}
File dir = runner.getExecutionDir();
if (dir != null && dir.exists()) {
try {
synchronized (executionDirDeletionSync) {
if (!dir.exists()) {
throw new ExecutorManagerException(
"Execution dir file doesn't exist. Probably has beend deleted");
}
File logFile = runner.getFlowLogFile();
if (logFile != null && logFile.exists()) {
return FileIOUtils.readUtf8File(logFile, startByte, length);
} else {
throw new ExecutorManagerException("Flow log file doesn't exist.");
}
}
} catch (IOException e) {
throw new ExecutorManagerException(e);
}
}
throw new ExecutorManagerException(
"Error reading file. Log directory doesn't exist.");
}
public LogData readJobLogs(int execId, String jobId, int attempt,
int startByte, int length) throws ExecutorManagerException {
FlowRunner runner = runningFlows.get(execId);
if (runner == null) {
throw new ExecutorManagerException("Running flow " + execId
+ " not found.");
}
File dir = runner.getExecutionDir();
if (dir != null && dir.exists()) {
try {
synchronized (executionDirDeletionSync) {
if (!dir.exists()) {
throw new ExecutorManagerException(
"Execution dir file doesn't exist. Probably has beend deleted");
}
File logFile = runner.getJobLogFile(jobId, attempt);
if (logFile != null && logFile.exists()) {
return FileIOUtils.readUtf8File(logFile, startByte, length);
} else {
throw new ExecutorManagerException("Job log file doesn't exist.");
}
}
} catch (IOException e) {
throw new ExecutorManagerException(e);
}
}
throw new ExecutorManagerException(
"Error reading file. Log directory doesn't exist.");
}
public List<Object> readJobAttachments(int execId, String jobId, int attempt)
throws ExecutorManagerException {
FlowRunner runner = runningFlows.get(execId);
if (runner == null) {
throw new ExecutorManagerException("Running flow " + execId
+ " not found.");
}
File dir = runner.getExecutionDir();
if (dir == null || !dir.exists()) {
throw new ExecutorManagerException(
"Error reading file. Log directory doesn't exist.");
}
try {
synchronized (executionDirDeletionSync) {
if (!dir.exists()) {
throw new ExecutorManagerException(
"Execution dir file doesn't exist. Probably has beend deleted");
}
File attachmentFile = runner.getJobAttachmentFile(jobId, attempt);
if (attachmentFile == null || !attachmentFile.exists()) {
return null;
}
@SuppressWarnings("unchecked")
List<Object> jobAttachments =
(ArrayList<Object>) JSONUtils.parseJSONFromFile(attachmentFile);
return jobAttachments;
}
} catch (IOException e) {
throw new ExecutorManagerException(e);
}
}
public JobMetaData readJobMetaData(int execId, String jobId, int attempt,
int startByte, int length) throws ExecutorManagerException {
FlowRunner runner = runningFlows.get(execId);
if (runner == null) {
throw new ExecutorManagerException("Running flow " + execId
+ " not found.");
}
File dir = runner.getExecutionDir();
if (dir != null && dir.exists()) {
try {
synchronized (executionDirDeletionSync) {
if (!dir.exists()) {
throw new ExecutorManagerException(
"Execution dir file doesn't exist. Probably has beend deleted");
}
File metaDataFile = runner.getJobMetaDataFile(jobId, attempt);
if (metaDataFile != null && metaDataFile.exists()) {
return FileIOUtils.readUtf8MetaDataFile(metaDataFile, startByte,
length);
} else {
throw new ExecutorManagerException("Job log file doesn't exist.");
}
}
} catch (IOException e) {
throw new ExecutorManagerException(e);
}
}
throw new ExecutorManagerException(
"Error reading file. Log directory doesn't exist.");
}
public long getLastCleanerThreadCheckTime() {
return lastCleanerThreadCheckTime;
}
public boolean isCleanerThreadActive() {
return this.cleanerThread.isAlive();
}
public State getCleanerThreadState() {
return this.cleanerThread.getState();
}
public boolean isExecutorThreadPoolShutdown() {
return executorService.isShutdown();
}
public int getNumQueuedFlows() {
return executorService.getQueue().size();
}
public int getNumRunningFlows() {
return executorService.getActiveCount();
}
public String getRunningFlowIds() {
// The in progress tasks are actually of type FutureTask
Set<Runnable> inProgressTasks = executorService.getInProgressTasks();
List<Integer> runningFlowIds =
new ArrayList<Integer>(inProgressTasks.size());
for (Runnable task : inProgressTasks) {
// add casting here to ensure it matches the expected type in
// submittedFlows
Integer execId = submittedFlows.get((Future<?>) task);
if (execId != null) {
runningFlowIds.add(execId);
} else {
logger.warn("getRunningFlowIds: got null execId for task: " + task);
}
}
Collections.sort(runningFlowIds);
return runningFlowIds.toString();
}
public String getQueuedFlowIds() {
List<Integer> flowIdList =
new ArrayList<Integer>(executorService.getQueue().size());
for (Runnable task : executorService.getQueue()) {
Integer execId = submittedFlows.get(task);
if (execId != null) {
flowIdList.add(execId);
} else {
logger
.warn("getQueuedFlowIds: got null execId for queuedTask: " + task);
}
}
Collections.sort(flowIdList);
return flowIdList.toString();
}
public int getMaxNumRunningFlows() {
return numThreads;
}
public int getTheadPoolQueueSize() {
return threadPoolQueueSize;
}
public void reloadJobTypePlugins() throws JobTypeManagerException {
jobtypeManager.loadPlugins();
}
public int getTotalNumExecutedFlows() {
return executorService.getTotalTasks();
}
@Override
public void beforeExecute(Runnable r) {
}
@Override
public void afterExecute(Runnable r) {
submittedFlows.remove(r);
}
/**
* This shuts down the flow runner. The call is blocking and awaits execution of all jobs.
*/
public void shutdown() {
logger.warn("Shutting down FlowRunnerManager...");
executorService.shutdown();
boolean result = false;
while (!result) {
logger.info("Awaiting Shutdown. # of executing flows: " + getNumRunningFlows());
try {
result = executorService.awaitTermination(1, TimeUnit.MINUTES);
} catch (InterruptedException e) {
logger.error(e);
}
}
logger.warn("Shutdown FlowRunnerManager complete.");
}
/**
* This attempts shuts down the flow runner immediately (unsafe).
* This doesn't wait for jobs to finish but interrupts all threads.
*/
public void shutdownNow() {
logger.warn("Shutting down FlowRunnerManager now...");
executorService.shutdownNow();
}
}
| 1 | 12,729 | don't we need to remove the project version from installedVersions? | azkaban-azkaban | java |
@@ -47,6 +47,12 @@ module Selenium
@bridge.send_command(cmd: cmd, params: params)
end
+ def print_page(**options)
+ options[:page_ranges] &&= Array(options[:page_ranges])
+
+ bridge.print_page(options)
+ end
+
private
def debugger_address | 1 | # frozen_string_literal: true
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
module Selenium
module WebDriver
module Chrome
#
# Driver implementation for Chrome.
# @api private
#
class Driver < WebDriver::Driver
include DriverExtensions::HasNetworkConditions
include DriverExtensions::HasWebStorage
include DriverExtensions::HasLocation
include DriverExtensions::DownloadsFiles
include DriverExtensions::HasDevTools
include DriverExtensions::HasAuthentication
include DriverExtensions::HasLogEvents
def browser
:chrome
end
def bridge_class
Bridge
end
def execute_cdp(cmd, **params)
@bridge.send_command(cmd: cmd, params: params)
end
private
def debugger_address
capabilities['goog:chromeOptions']['debuggerAddress']
end
end # Driver
end # Chrome
end # WebDriver
end # Selenium
| 1 | 18,367 | the bridge here isn't defined as an accessor / reader to try mask it better. So you need to directly call the iVar `@bridge` here. | SeleniumHQ-selenium | java |
@@ -51,7 +51,10 @@ func (s *DaemonServer) FlushIPSets(ctx context.Context, req *pb.IPSetsRequest) (
ipset := ipset
s.IPSetLocker.Lock(ipset.Name)
err := flushIPSet(ctx, req.EnterNS, pid, ipset)
- s.IPSetLocker.Unlock(ipset.Name)
+ if err != nil {
+ return nil, err
+ }
+ err = s.IPSetLocker.Unlock(ipset.Name)
if err != nil {
return nil, err
} | 1 | // Copyright 2020 Chaos Mesh Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package chaosdaemon
import (
"context"
"fmt"
"strings"
"github.com/golang/protobuf/ptypes/empty"
"github.com/chaos-mesh/chaos-mesh/pkg/bpm"
pb "github.com/chaos-mesh/chaos-mesh/pkg/chaosdaemon/pb"
)
const (
ipsetExistErr = "set with the same name already exists"
ipExistErr = "it's already added"
ipsetNewNameExistErr = "a set with the new name already exists"
)
func (s *DaemonServer) FlushIPSets(ctx context.Context, req *pb.IPSetsRequest) (*empty.Empty, error) {
log.Info("flush ipset", "request", req)
pid, err := s.crClient.GetPidFromContainerID(ctx, req.ContainerId)
if err != nil {
log.Error(err, "error while getting PID")
return nil, err
}
for _, ipset := range req.Ipsets {
// All operations on the ipset with the same name should be serialized,
// because ipset is not isolated with namespace in linux < 3.12
// **Notice**: Serialization should be enough for Chaos Mesh (but no
// need to use name to simulate isolation), because the operation on
// the ipset with the same name should be same for NetworkChaos.
// It's a bad solution, only for the users who don't want to upgrade
// their linux version to 3.12 :(
ipset := ipset
s.IPSetLocker.Lock(ipset.Name)
err := flushIPSet(ctx, req.EnterNS, pid, ipset)
s.IPSetLocker.Unlock(ipset.Name)
if err != nil {
return nil, err
}
}
return &empty.Empty{}, nil
}
func flushIPSet(ctx context.Context, enterNS bool, pid uint32, set *pb.IPSet) error {
name := set.Name
// If the ipset already exists, the ipset will be renamed to this temp name.
tmpName := fmt.Sprintf("%sold", name)
// the ipset while existing iptables rules are using them can not be deleted,.
// so we creates an temp ipset and swap it with existing one.
if err := createIPSet(ctx, enterNS, pid, tmpName); err != nil {
return err
}
// add ips to the temp ipset
if err := addCIDRsToIPSet(ctx, enterNS, pid, tmpName, set.Cidrs); err != nil {
return err
}
// rename the temp ipset with the target name of ipset if the taget ipset not exists,
// otherwise swap them with each other.
err := renameIPSet(ctx, enterNS, pid, tmpName, name)
return err
}
func createIPSet(ctx context.Context, enterNS bool, pid uint32, name string) error {
// ipset name cannot be longer than 31 bytes
if len(name) > 31 {
name = name[:31]
}
processBuilder := bpm.DefaultProcessBuilder("ipset", "create", name, "hash:net").SetContext(ctx)
if enterNS {
processBuilder = processBuilder.SetNS(pid, bpm.NetNS)
}
cmd := processBuilder.Build()
log.Info("create ipset", "command", cmd.String())
out, err := cmd.CombinedOutput()
if err != nil {
output := string(out)
if !strings.Contains(output, ipsetExistErr) {
log.Error(err, "ipset create error", "command", cmd.String(), "output", output)
return encodeOutputToError(out, err)
}
processBuilder = bpm.DefaultProcessBuilder("ipset", "flush", name).SetContext(ctx)
if enterNS {
processBuilder = processBuilder.SetNS(pid, bpm.NetNS)
}
cmd = processBuilder.Build()
log.Info("flush ipset", "command", cmd.String())
out, err := cmd.CombinedOutput()
if err != nil {
log.Error(err, "ipset flush error", "command", cmd.String(), "output", string(out))
return encodeOutputToError(out, err)
}
}
return nil
}
func addCIDRsToIPSet(ctx context.Context, enterNS bool, pid uint32, name string, cidrs []string) error {
for _, cidr := range cidrs {
processBuilder := bpm.DefaultProcessBuilder("ipset", "add", name, cidr).SetContext(ctx)
if enterNS {
processBuilder = processBuilder.SetNS(pid, bpm.NetNS)
}
cmd := processBuilder.Build()
log.Info("add CIDR to ipset", "command", cmd.String())
out, err := cmd.CombinedOutput()
if err != nil {
output := string(out)
if !strings.Contains(output, ipExistErr) {
log.Error(err, "ipset add error", "command", cmd.String(), "output", output)
return encodeOutputToError(out, err)
}
}
}
return nil
}
func renameIPSet(ctx context.Context, enterNS bool, pid uint32, oldName string, newName string) error {
processBuilder := bpm.DefaultProcessBuilder("ipset", "rename", oldName, newName).SetContext(ctx)
if enterNS {
processBuilder = processBuilder.SetNS(pid, bpm.NetNS)
}
cmd := processBuilder.Build()
log.Info("rename ipset", "command", cmd.String())
out, err := cmd.CombinedOutput()
if err != nil {
output := string(out)
if !strings.Contains(output, ipsetNewNameExistErr) {
log.Error(err, "rename ipset failed", "command", cmd.String(), "output", output)
return encodeOutputToError(out, err)
}
// swap the old ipset and the new ipset if the new ipset already exist.
processBuilder = bpm.DefaultProcessBuilder("ipset", "swap", oldName, newName).SetContext(ctx)
if enterNS {
processBuilder = processBuilder.SetNS(pid, bpm.NetNS)
}
cmd := processBuilder.Build()
log.Info("swap ipset", "command", cmd.String())
out, err := cmd.CombinedOutput()
if err != nil {
log.Error(err, "swap ipset failed", "command", cmd.String(), "output", string(out))
return encodeOutputToError(out, err)
}
}
return nil
}
| 1 | 21,524 | This is intended. Pls revert this. | chaos-mesh-chaos-mesh | go |
@@ -68,7 +68,7 @@ interface RedBlackTree<T> extends Iterable<T> {
static <T extends Comparable<? super T>> RedBlackTree<T> ofAll(Iterable<? extends T> values) {
Objects.requireNonNull(values, "values is null");
- return ofAll((Comparator<? super T> & Serializable) T::compareTo, values);
+ return ofAll(Comparators.naturalComparator(), values);
}
@SuppressWarnings("unchecked") | 1 | /* / \____ _ _ ____ ______ / \ ____ __ _______
* / / \/ \ / \/ \ / /\__\/ // \/ \ // /\__\ JΛVΛSLΛNG
* _/ / /\ \ \/ / /\ \\__\\ \ // /\ \ /\\/ \ /__\ \ Copyright 2014-2016 Javaslang, http://javaslang.io
* /___/\_/ \_/\____/\_/ \_/\__\/__/\__\_/ \_// \__/\_____/ Licensed under the Apache License, Version 2.0
*/
package javaslang.collection;
import javaslang.*;
import javaslang.collection.RedBlackTreeModule.*;
import javaslang.control.Option;
import java.io.Serializable;
import java.util.*;
import static javaslang.collection.RedBlackTree.Color.*;
/**
* Purely functional Red/Black Tree, inspired by <a href="https://github.com/kazu-yamamoto/llrbtree/blob/master/Data/Set/RBTree.hs">Kazu Yamamoto's Haskell implementation</a>.
* <p>
* Based on
* <ul>
* <li><a href="http://www.eecs.usma.edu/webs/people/okasaki/pubs.html#jfp99">Chris Okasaki, "Red-Black Trees in a Functional Setting", Journal of Functional Programming, 9(4), pp 471-477, July 1999</a></li>
* <li>Stefan Kahrs, "Red-black trees with types", Journal of functional programming, 11(04), pp 425-432, July 2001</li>
* </ul>
*
* @param <T> Component type
* @author Daniel Dietrich
* @since 2.0.0
*/
interface RedBlackTree<T> extends Iterable<T> {
static <T extends Comparable<? super T>> RedBlackTree<T> empty() {
return new Empty<>((Comparator<? super T> & Serializable) T::compareTo);
}
static <T> RedBlackTree<T> empty(Comparator<? super T> comparator) {
Objects.requireNonNull(comparator, "comparator is null");
return new Empty<>(comparator);
}
static <T extends Comparable<? super T>> RedBlackTree<T> of(T value) {
return of((Comparator<? super T> & Serializable) T::compareTo, value);
}
static <T> RedBlackTree<T> of(Comparator<? super T> comparator, T value) {
Objects.requireNonNull(comparator, "comparator is null");
final Empty<T> empty = new Empty<>(comparator);
return new Node<>(BLACK, 1, empty, value, empty, empty);
}
@SuppressWarnings("varargs")
@SafeVarargs
static <T extends Comparable<? super T>> RedBlackTree<T> of(T... values) {
Objects.requireNonNull(values, "values is null");
return of((Comparator<? super T> & Serializable) T::compareTo, values);
}
@SafeVarargs
static <T> RedBlackTree<T> of(Comparator<? super T> comparator, T... values) {
Objects.requireNonNull(comparator, "comparator is null");
Objects.requireNonNull(values, "values is null");
RedBlackTree<T> tree = empty(comparator);
for (T value : values) {
tree = tree.insert(value);
}
return tree;
}
static <T extends Comparable<? super T>> RedBlackTree<T> ofAll(Iterable<? extends T> values) {
Objects.requireNonNull(values, "values is null");
return ofAll((Comparator<? super T> & Serializable) T::compareTo, values);
}
@SuppressWarnings("unchecked")
static <T> RedBlackTree<T> ofAll(Comparator<? super T> comparator, Iterable<? extends T> values) {
Objects.requireNonNull(comparator, "comparator is null");
Objects.requireNonNull(values, "values is null");
// function equality is not computable => same object check
if (values instanceof RedBlackTree && ((RedBlackTree<T>) values).comparator() == comparator) {
return (RedBlackTree<T>) values;
} else {
RedBlackTree<T> tree = empty(comparator);
for (T value : values) {
tree = tree.insert(value);
}
return tree;
}
}
/**
* Inserts a new value into this tree.
*
* @param value A value.
* @return A new tree if this tree does not contain the given value, otherwise the same tree instance.
*/
default RedBlackTree<T> insert(T value) {
return Node.insert(this, value).color(BLACK);
}
/**
* Return the {@link Color} of this Red/Black Tree node.
* <p>
* An empty node is {@code BLACK} by definition.
*
* @return Either {@code RED} or {@code BLACK}.
*/
Color color();
/**
* Returns the underlying {@link java.util.Comparator} of this RedBlackTree.
*
* @return The comparator.
*/
Comparator<T> comparator();
/**
* Checks, if this {@code RedBlackTree} contains the given {@code value}.
*
* @param value A value.
* @return true, if this tree contains the value, false otherwise.
*/
boolean contains(T value);
/**
* Deletes a value from this RedBlackTree.
*
* @param value A value
* @return A new RedBlackTree if the value is present, otherwise this.
*/
default RedBlackTree<T> delete(T value) {
final RedBlackTree<T> tree = Node.delete(this, value)._1;
return Node.color(tree, BLACK);
}
default RedBlackTree<T> difference(RedBlackTree<T> tree) {
Objects.requireNonNull(tree, "tree is null");
if (isEmpty() || tree.isEmpty()) {
return this;
} else {
final Node<T> that = (Node<T>) tree;
final Tuple2<RedBlackTree<T>, RedBlackTree<T>> split = Node.split(this, that.value);
return Node.merge(split._1.difference(that.left), split._2.difference(that.right));
}
}
/**
* Returns the empty instance of this RedBlackTree.
*
* @return An empty ReadBlackTree
*/
RedBlackTree<T> emptyInstance();
/**
* Finds the value stored in this tree, if exists, by applying the underlying comparator to the tree elements and
* the given element.
* <p>
* Especially the value returned may differ from the given value, even if the underlying comparator states that
* both are equal.
*
* @param value A value
* @return Some value, if this tree contains a value equal to the given value according to the underlying comparator. Otherwise None.
*/
Option<T> find(T value);
default RedBlackTree<T> intersection(RedBlackTree<T> tree) {
Objects.requireNonNull(tree, "tree is null");
if (isEmpty()) {
return this;
} else if (tree.isEmpty()) {
return tree;
} else {
final Node<T> that = (Node<T>) tree;
final Tuple2<RedBlackTree<T>, RedBlackTree<T>> split = Node.split(this, that.value);
if (contains(that.value)) {
return Node.join(split._1.intersection(that.left), that.value, split._2.intersection(that.right));
} else {
return Node.merge(split._1.intersection(that.left), split._2.intersection(that.right));
}
}
}
/**
* Checks if this {@code RedBlackTree} is empty, i.e. an instance of {@code Leaf}.
*
* @return true, if it is empty, false otherwise.
*/
boolean isEmpty();
/**
* Returns the left child if this is a non-empty node, otherwise throws.
*
* @return The left child.
* @throws UnsupportedOperationException if this RedBlackTree is empty
*/
RedBlackTree<T> left();
/**
* Returns the maximum element of this tree according to the underlying comparator.
*
* @return Some element, if this is not empty, otherwise None
*/
default Option<T> max() {
return isEmpty() ? Option.none() : Option.some(Node.maximum((Node<T>) this));
}
/**
* Returns the minimum element of this tree according to the underlying comparator.
*
* @return Some element, if this is not empty, otherwise None
*/
default Option<T> min() {
return isEmpty() ? Option.none() : Option.some(Node.minimum((Node<T>) this));
}
/**
* Returns the right child if this is a non-empty node, otherwise throws.
*
* @return The right child.
* @throws UnsupportedOperationException if this RedBlackTree is empty
*/
RedBlackTree<T> right();
/**
* Returns the size of this tree.
*
* @return the number of nodes of this tree and 0 if this is the empty tree
*/
int size();
/**
* Adds all of the elements of the given {@code tree} to this tree, if not already present.
*
* @param tree The RedBlackTree to form the union with.
* @return A new RedBlackTree that contains all distinct elements of this and the given {@code tree}.
*/
default RedBlackTree<T> union(RedBlackTree<T> tree) {
Objects.requireNonNull(tree, "tree is null");
if (tree.isEmpty()) {
return this;
} else {
final Node<T> that = (Node<T>) tree;
if (isEmpty()) {
return that.color(BLACK);
} else {
final Tuple2<RedBlackTree<T>, RedBlackTree<T>> split = Node.split(this, that.value);
return Node.join(split._1.union(that.left), that.value, split._2.union(that.right));
}
}
}
/**
* Returns the value of the current tree node or throws if this is empty.
*
* @return The value.
* @throws NoSuchElementException if this is the empty node.
*/
T value();
/**
* Returns an Iterator that iterates elements in the order induced by the underlying Comparator.
* <p>
* Internally an in-order traversal of the RedBlackTree is performed.
* <p>
* Example:
*
* <pre><code>
* 4
* / \
* 2 6
* / \ / \
* 1 3 5 7
* </code></pre>
*
* Iteration order: 1, 2, 3, 4, 5, 6, 7
* <p>
* See also <a href="http://n00tc0d3r.blogspot.de/2013/08/implement-iterator-for-binarytree-i-in.html">Implement Iterator for BinaryTree I (In-order)</a>.
*/
@Override
default Iterator<T> iterator() {
if (isEmpty()) {
return Iterator.empty();
} else {
final Node<T> that = (Node<T>) this;
return new AbstractIterator<T>() {
Stack<Node<T>> stack = pushLeftChildren(List.empty(), that);
@Override
public boolean hasNext() {
return !stack.isEmpty();
}
@Override
public T getNext() {
final Tuple2<Node<T>, ? extends Stack<Node<T>>> result = stack.pop2();
final Node<T> node = result._1;
stack = node.right.isEmpty() ? result._2 : pushLeftChildren(result._2, (Node<T>) node.right);
return result._1.value;
}
private Stack<Node<T>> pushLeftChildren(Stack<Node<T>> initialStack, Node<T> that) {
Stack<Node<T>> stack = initialStack;
RedBlackTree<T> tree = that;
while (!tree.isEmpty()) {
final Node<T> node = (Node<T>) tree;
stack = stack.push(node);
tree = node.left;
}
return stack;
}
};
}
}
/**
* Compares color, value and sub-trees. The comparator is not compared because function equality is not computable.
*
* @return The hash code of this tree.
*/
@Override
boolean equals(Object o);
/**
* Computes the hash code of this tree based on color, value and sub-trees. The comparator is not taken into account.
*
* @return The hash code of this tree.
*/
@Override
int hashCode();
/**
* Returns a Lisp like representation of this tree.
*
* @return This Tree as Lisp like String.
*/
@Override
String toString();
enum Color {
RED, BLACK;
@Override
public String toString() {
return (this == RED) ? "R" : "B";
}
}
}
interface RedBlackTreeModule {
/**
* A non-empty tree node.
*
* @param <T> Component type
*/
final class Node<T> implements RedBlackTree<T>, Serializable {
private static final long serialVersionUID = 1L;
final Color color;
final int blackHeight;
final RedBlackTree<T> left;
final T value;
final RedBlackTree<T> right;
final Empty<T> empty;
final int size;
// This is no public API! The RedBlackTree takes care of passing the correct Comparator.
Node(Color color, int blackHeight, RedBlackTree<T> left, T value, RedBlackTree<T> right, Empty<T> empty) {
this.color = color;
this.blackHeight = blackHeight;
this.left = left;
this.value = value;
this.right = right;
this.empty = empty;
this.size = left.size() + right.size() + 1;
}
@Override
public Color color() {
return color;
}
@Override
public Comparator<T> comparator() {
return empty.comparator;
}
@Override
public boolean contains(T value) {
final int result = empty.comparator.compare(value, this.value);
if (result < 0) {
return left.contains(value);
} else if (result > 0) {
return right.contains(value);
} else {
return true;
}
}
@Override
public Empty<T> emptyInstance() {
return empty;
}
@Override
public Option<T> find(T value) {
final int result = empty.comparator.compare(value, this.value);
if (result < 0) {
return left.find(value);
} else if (result > 0) {
return right.find(value);
} else {
return Option.some(this.value);
}
}
@Override
public boolean isEmpty() {
return false;
}
@Override
public RedBlackTree<T> left() {
return left;
}
@Override
public RedBlackTree<T> right() {
return right;
}
@Override
public int size() {
return size;
}
@Override
public T value() {
return value;
}
@Override
public boolean equals(Object o) {
if (o == this) {
return true;
} else if (o instanceof Node) {
final Node<?> that = (Node<?>) o;
return Collections.areEqual(this, that);
} else {
return false;
}
}
@Override
public int hashCode() {
// DEV-NOTE: Using `Objects.hash(this.value, this.left, this.right)` would leak the tree structure to the outside.
// We just want to hash the values in the right order.
return Collections.hash(this);
}
@Override
public String toString() {
return isLeaf() ? "(" + color + ":" + value + ")" : toLispString(this);
}
private static String toLispString(RedBlackTree<?> tree) {
if (tree.isEmpty()) {
return "";
} else {
final Node<?> node = (Node<?>) tree;
final String value = node.color + ":" + node.value;
if (node.isLeaf()) {
return value;
} else {
final String left = node.left.isEmpty() ? "" : " " + toLispString(node.left);
final String right = node.right.isEmpty() ? "" : " " + toLispString(node.right);
return "(" + value + left + right + ")";
}
}
}
private boolean isLeaf() {
return left.isEmpty() && right.isEmpty();
}
Node<T> color(Color color) {
return (this.color == color) ? this : new Node<>(color, blackHeight, left, value, right, empty);
}
static <T> RedBlackTree<T> color(RedBlackTree<T> tree, Color color) {
return tree.isEmpty() ? tree : ((Node<T>) tree).color(color);
}
private static <T> Node<T> balanceLeft(Color color, int blackHeight, RedBlackTree<T> left, T value,
RedBlackTree<T> right, Empty<T> empty) {
if (color == BLACK) {
if (!left.isEmpty()) {
final Node<T> ln = (Node<T>) left;
if (ln.color == RED) {
if (!ln.left.isEmpty()) {
final Node<T> lln = (Node<T>) ln.left;
if (lln.color == RED) {
final Node<T> newLeft = new Node<>(BLACK, blackHeight, lln.left, lln.value, lln.right,
empty);
final Node<T> newRight = new Node<>(BLACK, blackHeight, ln.right, value, right, empty);
return new Node<>(RED, blackHeight + 1, newLeft, ln.value, newRight, empty);
}
}
if (!ln.right.isEmpty()) {
final Node<T> lrn = (Node<T>) ln.right;
if (lrn.color == RED) {
final Node<T> newLeft = new Node<>(BLACK, blackHeight, ln.left, ln.value, lrn.left,
empty);
final Node<T> newRight = new Node<>(BLACK, blackHeight, lrn.right, value, right, empty);
return new Node<>(RED, blackHeight + 1, newLeft, lrn.value, newRight, empty);
}
}
}
}
}
return new Node<>(color, blackHeight, left, value, right, empty);
}
private static <T> Node<T> balanceRight(Color color, int blackHeight, RedBlackTree<T> left, T value,
RedBlackTree<T> right, Empty<T> empty) {
if (color == BLACK) {
if (!right.isEmpty()) {
final Node<T> rn = (Node<T>) right;
if (rn.color == RED) {
if (!rn.right.isEmpty()) {
final Node<T> rrn = (Node<T>) rn.right;
if (rrn.color == RED) {
final Node<T> newLeft = new Node<>(BLACK, blackHeight, left, value, rn.left, empty);
final Node<T> newRight = new Node<>(BLACK, blackHeight, rrn.left, rrn.value, rrn.right,
empty);
return new Node<>(RED, blackHeight + 1, newLeft, rn.value, newRight, empty);
}
}
if (!rn.left.isEmpty()) {
final Node<T> rln = (Node<T>) rn.left;
if (rln.color == RED) {
final Node<T> newLeft = new Node<>(BLACK, blackHeight, left, value, rln.left, empty);
final Node<T> newRight = new Node<>(BLACK, blackHeight, rln.right, rn.value, rn.right,
empty);
return new Node<>(RED, blackHeight + 1, newLeft, rln.value, newRight, empty);
}
}
}
}
}
return new Node<>(color, blackHeight, left, value, right, empty);
}
private static <T> Tuple2<? extends RedBlackTree<T>, Boolean> blackify(RedBlackTree<T> tree) {
if (tree instanceof Node) {
final Node<T> node = (Node<T>) tree;
if (node.color == RED) {
return Tuple.of(node.color(BLACK), false);
}
}
return Tuple.of(tree, true);
}
static <T> Tuple2<? extends RedBlackTree<T>, Boolean> delete(RedBlackTree<T> tree, T value) {
if (tree.isEmpty()) {
return Tuple.of(tree, false);
} else {
final Node<T> node = (Node<T>) tree;
final int comparison = node.comparator().compare(value, node.value);
if (comparison < 0) {
final Tuple2<? extends RedBlackTree<T>, Boolean> deleted = delete(node.left, value);
final RedBlackTree<T> l = deleted._1;
final boolean d = deleted._2;
if (d) {
return Node.unbalancedRight(node.color, node.blackHeight - 1, l, node.value, node.right,
node.empty);
} else {
final Node<T> newNode = new Node<>(node.color, node.blackHeight, l, node.value, node.right,
node.empty);
return Tuple.of(newNode, false);
}
} else if (comparison > 0) {
final Tuple2<? extends RedBlackTree<T>, Boolean> deleted = delete(node.right, value);
final RedBlackTree<T> r = deleted._1;
final boolean d = deleted._2;
if (d) {
return Node.unbalancedLeft(node.color, node.blackHeight - 1, node.left, node.value, r,
node.empty);
} else {
final Node<T> newNode = new Node<>(node.color, node.blackHeight, node.left, node.value, r,
node.empty);
return Tuple.of(newNode, false);
}
} else {
if (node.right.isEmpty()) {
if (node.color == BLACK) {
return blackify(node.left);
} else {
return Tuple.of(node.left, false);
}
} else {
final Node<T> nodeRight = (Node<T>) node.right;
final Tuple3<? extends RedBlackTree<T>, Boolean, T> newRight = deleteMin(nodeRight);
final RedBlackTree<T> r = newRight._1;
final boolean d = newRight._2;
final T m = newRight._3;
if (d) {
return Node.unbalancedLeft(node.color, node.blackHeight - 1, node.left, m, r, node.empty);
} else {
final RedBlackTree<T> newNode = new Node<>(node.color, node.blackHeight, node.left, m, r,
node.empty);
return Tuple.of(newNode, false);
}
}
}
}
}
private static <T> Tuple3<? extends RedBlackTree<T>, Boolean, T> deleteMin(Node<T> node) {
if (node.left.isEmpty()) {
if (node.color == BLACK) {
if (node.right.isEmpty()) {
return Tuple.of(node.empty, true, node.value);
} else {
final Node<T> rightNode = (Node<T>) node.right;
return Tuple.of(rightNode.color(BLACK), false, node.value);
}
} else {
return Tuple.of(node.right, false, node.value);
}
} else {
final Node<T> nodeLeft = (Node<T>) node.left;
final Tuple3<? extends RedBlackTree<T>, Boolean, T> newNode = deleteMin(nodeLeft);
final RedBlackTree<T> l = newNode._1;
final boolean d = newNode._2;
final T m = newNode._3;
if (d) {
final Tuple2<Node<T>, Boolean> tD = Node.unbalancedRight(node.color, node.blackHeight - 1, l,
node.value, node.right, node.empty);
return Tuple.of(tD._1, tD._2, m);
} else {
final Node<T> tD = new Node<>(node.color, node.blackHeight, l, node.value, node.right, node.empty);
return Tuple.of(tD, false, m);
}
}
}
static <T> Node<T> insert(RedBlackTree<T> tree, T value) {
if (tree.isEmpty()) {
final Empty<T> empty = (Empty<T>) tree;
return new Node<>(RED, 1, empty, value, empty, empty);
} else {
final Node<T> node = (Node<T>) tree;
final int comparison = node.comparator().compare(value, node.value);
if (comparison < 0) {
final Node<T> newLeft = insert(node.left, value);
return (newLeft == node.left)
? node
: Node.balanceLeft(node.color, node.blackHeight, newLeft, node.value, node.right,
node.empty);
} else if (comparison > 0) {
final Node<T> newRight = insert(node.right, value);
return (newRight == node.right)
? node
: Node.balanceRight(node.color, node.blackHeight, node.left, node.value, newRight,
node.empty);
} else {
// DEV-NOTE: Even if there is no _comparison_ difference, the object may not be _equal_.
// To save an equals() call, which may be expensive, we return a new instance.
return new Node<>(node.color, node.blackHeight, node.left, value, node.right, node.empty);
}
}
}
private static boolean isRed(RedBlackTree<?> tree) {
return !tree.isEmpty() && ((Node<?>) tree).color == RED;
}
static <T> RedBlackTree<T> join(RedBlackTree<T> t1, T value, RedBlackTree<T> t2) {
if (t1.isEmpty()) {
return t2.insert(value);
} else if (t2.isEmpty()) {
return t1.insert(value);
} else {
final Node<T> n1 = (Node<T>) t1;
final Node<T> n2 = (Node<T>) t2;
final int comparison = n1.blackHeight - n2.blackHeight;
if (comparison < 0) {
return Node.joinLT(n1, value, n2, n1.blackHeight).color(BLACK);
} else if (comparison > 0) {
return Node.joinGT(n1, value, n2, n2.blackHeight).color(BLACK);
} else {
return new Node<>(BLACK, n1.blackHeight + 1, n1, value, n2, n1.empty);
}
}
}
private static <T> Node<T> joinGT(Node<T> n1, T value, Node<T> n2, int h2) {
if (n1.blackHeight == h2) {
return new Node<>(RED, h2 + 1, n1, value, n2, n1.empty);
} else {
final Node<T> node = joinGT((Node<T>) n1.right, value, n2, h2);
return Node.balanceRight(n1.color, n1.blackHeight, n1.left, n1.value, node, n2.empty);
}
}
private static <T> Node<T> joinLT(Node<T> n1, T value, Node<T> n2, int h1) {
if (n2.blackHeight == h1) {
return new Node<>(RED, h1 + 1, n1, value, n2, n1.empty);
} else {
final Node<T> node = joinLT(n1, value, (Node<T>) n2.left, h1);
return Node.balanceLeft(n2.color, n2.blackHeight, node, n2.value, n2.right, n2.empty);
}
}
static <T> RedBlackTree<T> merge(RedBlackTree<T> t1, RedBlackTree<T> t2) {
if (t1.isEmpty()) {
return t2;
} else if (t2.isEmpty()) {
return t1;
} else {
final Node<T> n1 = (Node<T>) t1;
final Node<T> n2 = (Node<T>) t2;
final int comparison = n1.blackHeight - n2.blackHeight;
if (comparison < 0) {
final Node<T> node = Node.mergeLT(n1, n2, n1.blackHeight);
return Node.color(node, BLACK);
} else if (comparison > 0) {
final Node<T> node = Node.mergeGT(n1, n2, n2.blackHeight);
return Node.color(node, BLACK);
} else {
final Node<T> node = Node.mergeEQ(n1, n2);
return Node.color(node, BLACK);
}
}
}
private static <T> Node<T> mergeEQ(Node<T> n1, Node<T> n2) {
final T m = Node.minimum(n2);
final RedBlackTree<T> t2 = Node.deleteMin(n2)._1;
final int h2 = t2.isEmpty() ? 0 : ((Node<T>) t2).blackHeight;
if (n1.blackHeight == h2) {
return new Node<>(RED, n1.blackHeight + 1, n1, m, t2, n1.empty);
} else if (isRed(n1.left)) {
final Node<T> node = new Node<>(BLACK, n1.blackHeight, n1.right, m, t2, n1.empty);
return new Node<>(RED, n1.blackHeight, Node.color(n1.left, BLACK), n1.value, node, n1.empty);
} else if (isRed(n1.right)) {
final RedBlackTree<T> rl = ((Node<T>) n1.right).left;
final T rx = ((Node<T>) n1.right).value;
final RedBlackTree<T> rr = ((Node<T>) n1.right).right;
final Node<T> left = new Node<>(RED, n1.blackHeight, n1.left, n1.value, rl, n1.empty);
final Node<T> right = new Node<>(RED, n1.blackHeight, rr, m, t2, n1.empty);
return new Node<>(BLACK, n1.blackHeight, left, rx, right, n1.empty);
} else {
return new Node<>(BLACK, n1.blackHeight, n1.color(RED), m, t2, n1.empty);
}
}
private static <T> Node<T> mergeGT(Node<T> n1, Node<T> n2, int h2) {
if (n1.blackHeight == h2) {
return Node.mergeEQ(n1, n2);
} else {
final Node<T> node = Node.mergeGT((Node<T>) n1.right, n2, h2);
return Node.balanceRight(n1.color, n1.blackHeight, n1.left, n1.value, node, n1.empty);
}
}
private static <T> Node<T> mergeLT(Node<T> n1, Node<T> n2, int h1) {
if (n2.blackHeight == h1) {
return Node.mergeEQ(n1, n2);
} else {
final Node<T> node = Node.mergeLT(n1, (Node<T>) n2.left, h1);
return Node.balanceLeft(n2.color, n2.blackHeight, node, n2.value, n2.right, n2.empty);
}
}
static <T> T maximum(Node<T> node) {
Node<T> curr = node;
while (!curr.right.isEmpty()) {
curr = (Node<T>) curr.right;
}
return curr.value;
}
static <T> T minimum(Node<T> node) {
Node<T> curr = node;
while (!curr.left.isEmpty()) {
curr = (Node<T>) curr.left;
}
return curr.value;
}
static <T> Tuple2<RedBlackTree<T>, RedBlackTree<T>> split(RedBlackTree<T> tree, T value) {
if (tree.isEmpty()) {
return Tuple.of(tree, tree);
} else {
final Node<T> node = (Node<T>) tree;
final int comparison = node.comparator().compare(value, node.value);
if (comparison < 0) {
final Tuple2<RedBlackTree<T>, RedBlackTree<T>> split = Node.split(node.left, value);
return Tuple.of(split._1, Node.join(split._2, node.value, Node.color(node.right, BLACK)));
} else if (comparison > 0) {
final Tuple2<RedBlackTree<T>, RedBlackTree<T>> split = Node.split(node.right, value);
return Tuple.of(Node.join(Node.color(node.left, BLACK), node.value, split._1), split._2);
} else {
return Tuple.of(Node.color(node.left, BLACK), Node.color(node.right, BLACK));
}
}
}
private static <T> Tuple2<Node<T>, Boolean> unbalancedLeft(Color color, int blackHeight, RedBlackTree<T> left,
T value, RedBlackTree<T> right, Empty<T> empty) {
if (!left.isEmpty()) {
final Node<T> ln = (Node<T>) left;
if (ln.color == BLACK) {
final Node<T> newNode = Node.balanceLeft(BLACK, blackHeight, ln.color(RED), value, right, empty);
return Tuple.of(newNode, color == BLACK);
} else if (color == BLACK && !ln.right.isEmpty()) {
final Node<T> lrn = (Node<T>) ln.right;
if (lrn.color == BLACK) {
final Node<T> newRightNode = Node.balanceLeft(BLACK, blackHeight, lrn.color(RED), value, right,
empty);
final Node<T> newNode = new Node<>(BLACK, ln.blackHeight, ln.left, ln.value, newRightNode,
empty);
return Tuple.of(newNode, false);
}
}
}
throw new IllegalStateException("unbalancedLeft(" + color + ", " + blackHeight + ", " + left + ", " + value + ", " + right + ")");
}
private static <T> Tuple2<Node<T>, Boolean> unbalancedRight(Color color, int blackHeight, RedBlackTree<T> left,
T value, RedBlackTree<T> right, Empty<T> empty) {
if (!right.isEmpty()) {
final Node<T> rn = (Node<T>) right;
if (rn.color == BLACK) {
final Node<T> newNode = Node.balanceRight(BLACK, blackHeight, left, value, rn.color(RED), empty);
return Tuple.of(newNode, color == BLACK);
} else if (color == BLACK && !rn.left.isEmpty()) {
final Node<T> rln = (Node<T>) rn.left;
if (rln.color == BLACK) {
final Node<T> newLeftNode = Node.balanceRight(BLACK, blackHeight, left, value, rln.color(RED),
empty);
final Node<T> newNode = new Node<>(BLACK, rn.blackHeight, newLeftNode, rn.value, rn.right,
empty);
return Tuple.of(newNode, false);
}
}
}
throw new IllegalStateException("unbalancedRight(" + color + ", " + blackHeight + ", " + left + ", " + value + ", " + right + ")");
}
}
/**
* The empty tree node. It can't be a singleton because it depends on a {@link Comparator}.
*
* @param <T> Component type
*/
final class Empty<T> implements RedBlackTree<T>, Serializable {
private static final long serialVersionUID = 1L;
final Comparator<T> comparator;
// This is no public API! The RedBlackTree takes care of passing the correct Comparator.
@SuppressWarnings("unchecked")
Empty(Comparator<? super T> comparator) {
this.comparator = (Comparator<T>) comparator;
}
@Override
public Color color() {
return BLACK;
}
@Override
public Comparator<T> comparator() {
return comparator;
}
@Override
public boolean contains(T value) {
return false;
}
@Override
public Empty<T> emptyInstance() {
return this;
}
@Override
public Option<T> find(T value) {
return Option.none();
}
@Override
public boolean isEmpty() {
return true;
}
@Override
public RedBlackTree<T> left() {
throw new UnsupportedOperationException("left on empty");
}
@Override
public RedBlackTree<T> right() {
throw new UnsupportedOperationException("right on empty");
}
@Override
public int size() {
return 0;
}
@Override
public T value() {
throw new NoSuchElementException("value on empty");
}
@Override
public boolean equals(Object o) {
// note: it is not possible to compare the comparators because function equality is not computable
return (o == this) || (o instanceof Empty);
}
@Override
public int hashCode() {
return 1;
}
@Override
public String toString() {
return "()";
}
}
}
| 1 | 9,875 | Something wrong with cast to `(Comparator<> & Serializable)`, need to be investigated | vavr-io-vavr | java |
@@ -217,15 +217,10 @@ export default class App extends Component {
// eslint-disable-next-line no-unused-vars
handleClickSearch = (_, { suggestionValue, method }) => {
- const { packages } = this.state;
switch(method) {
case 'click':
- window.location.href = getDetailPageURL(suggestionValue);
- break;
case 'enter':
- this.setState({
- filteredPackages: packages.filter(pkg => pkg.label.match(suggestionValue))
- });
+ window.location.href = getDetailPageURL(suggestionValue);
break;
}
} | 1 | import React, { Component, Fragment } from 'react';
import isNil from 'lodash/isNil';
import Button from '@material-ui/core/Button';
import Dialog from '@material-ui/core/Dialog';
import DialogActions from '@material-ui/core/DialogActions';
import DialogContent from '@material-ui/core/DialogContent';
import DialogTitle from '@material-ui/core/DialogTitle';
import SnackbarContent from '@material-ui/core/SnackbarContent';
import ErrorIcon from '@material-ui/icons/Error';
import storage from './utils/storage';
import logo from './utils/logo';
import { makeLogin, isTokenExpire } from './utils/login';
import Footer from './components/Footer';
import Loading from './components/Loading';
import LoginModal from './components/Login';
import Header from './components/Header';
import { Container, Content } from './components/Layout';
import Route from './router';
import API from './utils/api';
import { getDetailPageURL } from './utils/url';
import './styles/main.scss';
import classes from "./app.scss";
import 'normalize.css';
export default class App extends Component {
state = {
error: {},
logoUrl: '',
user: {},
scope: (window.VERDACCIO_SCOPE) ? `${window.VERDACCIO_SCOPE}:` : '',
showLoginModal: false,
isUserLoggedIn: false,
packages: [],
searchPackages: [],
filteredPackages: [],
search: '',
isLoading: true,
showAlertDialog: false,
alertDialogContent: {
title: '',
message: '',
packages: []
},
}
componentDidMount() {
this.loadLogo();
this.isUserAlreadyLoggedIn();
this.loadPackages();
}
// eslint-disable-next-line no-unused-vars
componentDidUpdate(_, prevState) {
if (prevState.isUserLoggedIn !== this.state.isUserLoggedIn) {
this.loadPackages();
}
}
loadLogo = async () => {
const logoUrl = await logo();
this.setState({
logoUrl
});
}
isUserAlreadyLoggedIn = () => {
// checks for token validity
const token = storage.getItem('token');
const username = storage.getItem('username');
if (isTokenExpire(token) || isNil(username)) {
this.handleLogout();
} else {
this.setState({
user: { username, token },
isUserLoggedIn: true
});
}
}
loadPackages = async () => {
try {
this.req = await API.request('packages', 'GET');
const transformedPackages = this.req.map(({ name, ...others}) => ({
label: name,
...others
}));
this.setState({
packages: transformedPackages,
filteredPackages: transformedPackages,
isLoading: false
});
} catch (error) {
this.handleShowAlertDialog({
title: 'Warning',
message: `Unable to load package list: ${error.message}`
});
this.setLoading(false);
}
}
setLoading = isLoading => (
this.setState({
isLoading
})
)
/**
* Toggles the login modal
* Required by: <LoginModal /> <Header />
*/
toggleLoginModal = () => {
this.setState((prevState) => ({
showLoginModal: !prevState.showLoginModal,
error: {}
}));
}
/**
* handles login
* Required by: <Header />
*/
doLogin = async (usernameValue, passwordValue) => {
const { username, token, error } = await makeLogin(
usernameValue,
passwordValue
);
if (username && token) {
this.setLoggedUser(username, token);
storage.setItem('username', username);
storage.setItem('token', token);
}
if (error) {
this.setState({
user: {},
error
});
}
}
setLoggedUser = (username, token) => {
this.setState({
user: {
username,
token,
},
isUserLoggedIn: true, // close login modal after successful login
showLoginModal: false // set isUserLoggedin to true
});
}
handleFetchPackages = async ({ value }) => {
try {
this.req = await API.request(`/search/${encodeURIComponent(value)}`, 'GET');
const transformedPackages = this.req.map(({ name, ...others}) => ({
label: name,
...others
}));
// Implement cancel feature later
if (this.state.search === value) {
this.setState({
searchPackages: transformedPackages
});
}
} catch (error) {
this.handleShowAlertDialog({
title: 'Warning',
message: `Unable to get search result: ${error.message}`
});
}
}
/**
* Logouts user
* Required by: <Header />
*/
handleLogout = () => {
storage.removeItem('username');
storage.removeItem('token');
this.setState({
user: {},
isUserLoggedIn: false
});
}
handlePackagesClearRequested = () => {
this.setState({
searchPackages: []
});
};
// eslint-disable-next-line no-unused-vars
handleSearch = (_, { newValue }) => {
const { filteredPackages, packages, search } = this.state;
const value = newValue.trim();
this.setState({
search: value,
filteredPackages: value.length < search.length ?
packages.filter(pkg => pkg.label.match(value)) : filteredPackages
});
};
handleKeyDown = event => {
if (event.key === 'Enter') {
const { filteredPackages, packages } = this.state;
const value = event.target.value.trim();
this.setState({
filteredPackages: value ?
packages.filter(pkg => pkg.label.match(value)) : filteredPackages
});
}
}
// eslint-disable-next-line no-unused-vars
handleClickSearch = (_, { suggestionValue, method }) => {
const { packages } = this.state;
switch(method) {
case 'click':
window.location.href = getDetailPageURL(suggestionValue);
break;
case 'enter':
this.setState({
filteredPackages: packages.filter(pkg => pkg.label.match(suggestionValue))
});
break;
}
}
handleShowAlertDialog = content => {
this.setState({
showAlertDialog: true,
alertDialogContent: content
});
}
handleDismissAlertDialog = () => {
this.setState({
showAlertDialog: false
});
};
getfilteredPackages = value => {
const inputValue = value.trim().toLowerCase();
const inputLength = inputValue.length;
if (inputLength === 0) {
return [];
} else {
return this.searchPackage(value);
}
}
renderHeader = () => {
const { logoUrl, user, search, searchPackages } = this.state;
return (
<Header
logo={logoUrl}
username={user.username}
toggleLoginModal={this.toggleLoginModal}
onLogout={this.handleLogout}
onSearch={this.handleSearch}
onSuggestionsFetch={this.handleFetchPackages}
onCleanSuggestions={this.handlePackagesClearRequested}
onClick={this.handleClickSearch}
onKeyDown={this.handleKeyDown}
packages={searchPackages}
search={search}
/>
);
}
renderAlertDialog = () => (
<Dialog
open={this.state.showAlertDialog}
onClose={this.handleDismissAlertDialog}
>
<DialogTitle id="alert-dialog-title">
{this.state.alertDialogContent.title}
</DialogTitle>
<DialogContent>
<SnackbarContent
className={classes.alertError}
message={
<div
id="client-snackbar"
className={classes.alertErrorMsg}
>
<ErrorIcon className={classes.alertIcon} />
<span>
{this.state.alertDialogContent.message}
</span>
</div>
}
/>
</DialogContent>
<DialogActions>
<Button
onClick={this.handleDismissAlertDialog}
color="primary"
autoFocus
>
Ok
</Button>
</DialogActions>
</Dialog>
)
renderLoginModal = () => {
const { error, showLoginModal } = this.state;
return (
<LoginModal
visibility={showLoginModal}
error={error}
onChange={this.setUsernameAndPassword}
onCancel={this.toggleLoginModal}
onSubmit={this.doLogin}
/>
);
}
render() {
const { isLoading, ...others } = this.state;
return (
<Container isLoading={isLoading}>
{isLoading ? (
<Loading />
) : (
<Fragment>
{this.renderHeader()}
<Content>
<Route {...others} />
</Content>
<Footer />
</Fragment>
)}
{this.renderAlertDialog()}
{this.renderLoginModal()}
</Container>
);
}
}
| 1 | 19,606 | I think we need this. Not sure, I'll test. | verdaccio-verdaccio | js |
@@ -83,7 +83,6 @@ module Beaker
host['user'] = 'google_compute'
disable_se_linux(host, @options)
- disable_iptables(host, @options)
copy_ssh_to_root(host, @options)
enable_root_login(host, @options)
host['user'] = default_user | 1 | require 'time'
module Beaker
#Beaker support for the Google Compute Engine.
class GoogleCompute < Beaker::Hypervisor
SLEEPWAIT = 5
#number of hours before an instance is considered a zombie
ZOMBIE = 3
#Create the array of metaData, each member being a hash with a :key and a :value. Sets
#:department, :project and :jenkins_build_url.
def format_metadata
[ {:key => :department, :value => @options[:department]},
{:key => :project, :value => @options[:project]},
{:key => :jenkins_build_url, :value => @options[:jenkins_build_url]} ].delete_if { |member| member[:value].nil? or member[:value].empty?}
end
#Create a new instance of the Google Compute Engine hypervisor object
#@param [<Host>] google_hosts The array of google hosts to provision, may ONLY be of platforms /centos-6-.*/ and
# /debian-7-.*/. We currently only support the Google Compute provided templates.
#@param [Hash{Symbol=>String}] options The options hash containing configuration values
#@option options [String] :gce_project The Google Compute Project name to connect to
#@option options [String] :gce_keyfile The location of the Google Compute service account keyfile
#@option options [String] :gce_password The password for the Google Compute service account key
#@option options [String] :gce_email The email address for the Google Compute service account
#@option options [String] :gce_machine_type A Google Compute machine type used to create instances, defaults to n1-highmem-2
#@option options [Integer] :timeout The amount of time to attempt execution before quiting and exiting with failure
def initialize(google_hosts, options)
require 'beaker/hypervisor/google_compute_helper'
@options = options
@logger = options[:logger]
@hosts = google_hosts
@firewall = ''
@gce_helper = GoogleComputeHelper.new(options)
end
#Create and configure virtual machines in the Google Compute Engine, including their associated disks and firewall rules
#Currently ONLY supports Google Compute provided templates of CENTOS-6 and DEBIAN-7
def provision
try = 1
attempts = @options[:timeout].to_i / SLEEPWAIT
start = Time.now
#get machineType resource, used by all instances
machineType = @gce_helper.get_machineType(start, attempts)
#set firewall to open pe ports
network = @gce_helper.get_network(start, attempts)
@firewall = generate_host_name
@gce_helper.create_firewall(@firewall, network, start, attempts)
@logger.debug("Created Google Compute firewall #{@firewall}")
@hosts.each do |host|
gplatform = Platform.new(host[:image] || host[:platform])
img = @gce_helper.get_latest_image(gplatform, start, attempts)
host['diskname'] = generate_host_name
disk = @gce_helper.create_disk(host['diskname'], img, start, attempts)
@logger.debug("Created Google Compute disk for #{host.name}: #{host['diskname']}")
#create new host name
host['vmhostname'] = generate_host_name
#add a new instance of the image
instance = @gce_helper.create_instance(host['vmhostname'], img, machineType, disk, start, attempts)
@logger.debug("Created Google Compute instance for #{host.name}: #{host['vmhostname']}")
#add metadata to instance, if there is any to set
mdata = format_metadata
if not mdata.empty?
@gce_helper.setMetadata_on_instance(host['vmhostname'], instance['metadata']['fingerprint'],
mdata,
start, attempts)
@logger.debug("Added tags to Google Compute instance #{host.name}: #{host['vmhostname']}")
end
#get ip for this host
host['ip'] = instance['networkInterfaces'][0]['accessConfigs'][0]['natIP']
#configure ssh
default_user = host['user']
host['user'] = 'google_compute'
disable_se_linux(host, @options)
disable_iptables(host, @options)
copy_ssh_to_root(host, @options)
enable_root_login(host, @options)
host['user'] = default_user
#shut down connection, will reconnect on next exec
host.close
@logger.debug("Instance ready: #{host['vmhostname']} for #{host.name}}")
end
end
#Shutdown and destroy virtual machines in the Google Compute Engine, including their associated disks and firewall rules
def cleanup()
attempts = @options[:timeout].to_i / SLEEPWAIT
start = Time.now
@gce_helper.delete_firewall(@firewall, start, attempts)
@hosts.each do |host|
@gce_helper.delete_instance(host['vmhostname'], start, attempts)
@logger.debug("Deleted Google Compute instance #{host['vmhostname']} for #{host.name}")
@gce_helper.delete_disk(host['diskname'], start, attempts)
@logger.debug("Deleted Google Compute disk #{host['diskname']} for #{host.name}")
end
end
#Shutdown and destroy Google Compute instances (including their associated disks and firewall rules)
#that have been alive longer than ZOMBIE hours.
def kill_zombies(max_age = ZOMBIE)
now = start = Time.now
attempts = @options[:timeout].to_i / SLEEPWAIT
#get rid of old instances
instances = @gce_helper.list_instances(start, attempts)
if instances
instances.each do |instance|
created = Time.parse(instance['creationTimestamp'])
alive = (now - created ) /60 /60
if alive >= max_age
#kill it with fire!
@logger.debug("Deleting zombie instance #{instance['name']}")
@gce_helper.delete_instance( instance['name'], start, attempts )
end
end
else
@logger.debug("No zombie instances found")
end
#get rid of old disks
disks = @gce_helper.list_disks(start, attempts)
if disks
disks.each do |disk|
created = Time.parse(disk['creationTimestamp'])
alive = (now - created ) /60 /60
if alive >= max_age
#kill it with fire!
@logger.debug("Deleting zombie disk #{disk['name']}")
@gce_helper.delete_disk( disk['name'], start, attempts )
end
end
else
@logger.debug("No zombie disks found")
end
#get rid of non-default firewalls
firewalls = @gce_helper.list_firewalls( start, attempts)
if firewalls and not firewalls.empty?
firewalls.each do |firewall|
@logger.debug("Deleting non-default firewall #{firewall['name']}")
@gce_helper.delete_firewall( firewall['name'], start, attempts )
end
else
@logger.debug("No zombie firewalls found")
end
end
end
end
| 1 | 6,706 | I'm going to need to check if google compute requires these steps in this order, or if you can disable iptables after the fact. | voxpupuli-beaker | rb |
@@ -2,6 +2,7 @@
// sources:
// build/static/charts/traefik-10.3.001.tgz
// build/static/charts/traefik-crd-10.3.001.tgz
+//go:build !no_stage
// +build !no_stage
package static | 1 | // Code generated for package static by go-bindata DO NOT EDIT. (@generated)
// sources:
// build/static/charts/traefik-10.3.001.tgz
// build/static/charts/traefik-crd-10.3.001.tgz
// +build !no_stage
package static
import (
"bytes"
"compress/gzip"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
)
func bindataRead(data []byte, name string) ([]byte, error) {
gz, err := gzip.NewReader(bytes.NewBuffer(data))
if err != nil {
return nil, fmt.Errorf("Read %q: %v", name, err)
}
var buf bytes.Buffer
_, err = io.Copy(&buf, gz)
clErr := gz.Close()
if err != nil {
return nil, fmt.Errorf("Read %q: %v", name, err)
}
if clErr != nil {
return nil, err
}
return buf.Bytes(), nil
}
type asset struct {
bytes []byte
info os.FileInfo
}
type bindataFileInfo struct {
name string
size int64
mode os.FileMode
modTime time.Time
}
// Name return file name
func (fi bindataFileInfo) Name() string {
return fi.name
}
// Size return file size
func (fi bindataFileInfo) Size() int64 {
return fi.size
}
// Mode return file mode
func (fi bindataFileInfo) Mode() os.FileMode {
return fi.mode
}
// Mode return file modify time
func (fi bindataFileInfo) ModTime() time.Time {
return fi.modTime
}
// IsDir return file whether a directory
func (fi bindataFileInfo) IsDir() bool {
return fi.mode&os.ModeDir != 0
}
// Sys return file is sys mode
func (fi bindataFileInfo) Sys() interface{} {
return nil
}
var _chartsTraefik103001Tgz = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x00\x0a\x40\xf5\xbf\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\x03\xec\x5c\x79\x73\x1b\x37\x96\xcf\xdf\xfc\x14\x58\xb9\xb6\x46\xaa\xa2\xa8\xcb\xc7\x8c\xb2\xb5\x15\x45\x92\x13\xce\x38\x92\x4a\x92\xc7\x9b\x4a\x4d\xd9\x60\x37\x9a\x44\xd4\x6c\xf4\x00\xdd\xa2\x38\xa9\xf9\xee\xfb\x0e\x00\x8d\x26\x29\xd9\x53\xeb\x64\x66\x77\xcd\x4a\x2c\xb2\xd9\x0d\x3c\xbc\xe3\xf7\x2e\x80\x8d\x95\xaa\xd0\x77\x7b\x5f\xfd\x8a\xaf\x7d\x78\xbd\x7a\xf1\x82\xfe\xc2\x6b\xf5\x2f\xbd\x3f\x38\x7a\xf5\xea\xe8\xf9\xcb\x23\xbe\xfe\xea\xe8\xd5\xf3\xaf\xc4\x8b\x5f\x93\xa8\xf0\x6a\x5d\x23\xad\x10\xbf\xc5\x54\xff\x8a\xaf\xc6\xcb\x7f\x34\x53\xe5\x5c\x4f\x2b\x63\xd5\x67\x9f\x03\x05\xfc\xf2\xf9\xf3\x27\xe4\xff\xaa\x2f\xff\x83\x83\x17\x87\x47\x5f\x89\xfd\xcf\x4e\xc9\x86\xd7\xff\x77\xf9\x2b\xd7\xb8\xbd\xc1\x3f\x9b\x8c\x2f\xaf\x7f\xd2\x2b\xd8\xff\xe9\x4c\xda\x66\xb4\x94\xf3\xf2\xf3\xcf\xf1\x11\xfb\x3f\x38\x38\xd8\x5f\xb5\xff\xa3\x83\xc3\x2f\xf6\xff\x5b\xbc\x64\xad\xff\xac\xac\xd3\xa6\x3a\x16\xf7\x87\x03\x59\xd7\xf1\xe3\xe1\xe8\xc5\x68\x7f\x90\x2b\x97\x59\x5d\x37\x74\xe9\x44\xdc\xb2\xbe\x88\x89\x74\x2a\x17\x7f\x6a\x27\xca\x56\x0a\x40\x44\xe8\x6a\x6a\x95\x73\x22\x33\x55\x63\x4d\x59\x2a\x3b\x98\x99\xb9\x3a\x16\xb3\xa6\xa9\xdd\xf1\xde\x9e\xd7\xb4\x91\x36\x7b\x03\x9d\xe1\x68\xe1\x1b\x2b\x17\xa3\xa9\x6e\x66\xed\xa4\x75\xca\xe2\x00\xaa\x6a\x46\x99\x99\x87\x67\xe2\xdf\xfb\xc3\xd1\xd1\x5e\x6e\x32\xb7\xe7\xef\xda\x93\xce\x29\x00\x30\x3d\x9f\xc6\x09\x4a\x33\x35\xa3\xba\x9a\x0e\xee\xd4\x72\x61\x6c\xee\x8e\x07\xbb\xc2\x7f\x09\xef\x3c\xa1\x83\xb9\xd4\x55\x03\xff\xc3\x72\xf1\x06\x05\x9f\xcb\x63\xf8\xa3\x4b\xf5\xcd\xbd\x6c\xa7\x0a\x29\x18\x08\x51\x49\x5c\x05\x5d\xa7\xcb\xdd\xbd\xb9\xac\xb4\x2a\x47\x8d\x99\x67\xea\xe7\x6f\xa6\x78\xb1\xf7\x4c\xce\xdf\x74\x0f\x94\xb9\xfa\xdb\x37\x1d\x1f\xe2\x8d\x78\x7d\xc0\x6f\x03\x9d\xce\xb4\x36\x53\x44\x59\x60\x13\xb3\x68\x13\x5f\x3e\xe9\xa6\x5d\xf4\xf0\xbb\x19\xda\xf9\xa0\x59\xd6\x30\x17\x08\xbb\xd4\x99\x44\xd1\x0e\xee\x83\xd4\x0f\xf6\x47\x47\x23\x30\xca\x81\xac\x2a\xd3\xd0\x97\x40\x85\x10\x45\xa9\x14\x08\x45\x36\x4d\xa9\x50\x86\x93\xb6\xca\x4b\xb5\xab\xf3\x63\x71\x77\xe4\xbe\x38\x90\xff\xa5\xaf\xa0\x24\xdf\xb5\x3a\x57\x25\x58\x83\x1b\xcd\xf3\xcf\x3c\xc7\x47\xf0\xff\xc5\xab\x57\x07\x2b\xf8\x7f\xb8\x7f\xb8\xff\x05\xff\x7f\x8b\xd7\xb3\x88\xe8\xdf\x03\x3c\x08\x0a\x03\x44\xa7\x0c\x83\xc1\xed\x4c\x3b\x01\x90\xdb\xce\x01\x6e\x85\x69\x1b\xba\x2e\x9a\x99\x12\xd3\x78\x9b\x28\x8c\x15\xb9\xba\x57\xa5\xa9\x01\x5f\x87\x62\x2e\x2b\x39\x85\x77\x42\x56\xb9\x50\x0f\x00\xd5\x39\x7e\xc2\xa7\xc2\x7c\x08\x47\x82\xe0\x68\x34\x18\x5c\x92\x83\x91\x65\xa9\x9b\xe5\xe0\xa4\x2c\x45\x65\xaa\x5d\xf0\x3b\x0d\xe0\x53\x29\x0a\x25\x9b\x16\x40\x5b\x6c\xbf\x0e\xef\x00\x9b\x70\x92\x5c\x36\xc6\x2e\x45\x63\x04\x8a\xb1\xc1\x39\xfc\xf8\x3b\x00\xf4\x34\x5f\x37\x8f\x98\x83\xb0\xc5\x44\x09\xe3\x67\x1b\x89\xc7\xa7\x72\x33\xd3\x96\x39\xde\x9d\x6b\x27\x27\x25\x78\xbb\x6d\x40\x55\xe4\x02\xbc\x05\x3e\xc4\x09\xee\x65\xd9\x82\xd9\x60\xec\x24\x0a\xf0\x12\x3c\x6a\x98\xe3\xb3\x0f\x3f\x24\x9e\xce\xe4\xbd\x12\x52\xf8\x47\xc4\x33\xb1\x1d\x58\x18\x1f\x44\xc9\x08\x39\x31\xf7\x40\x11\x49\x11\xd8\x6b\x16\x4e\xcc\x75\xa5\xe7\x40\x0b\xb8\xd0\x42\x4f\x5b\x4b\x18\xcf\xa3\x2a\xf0\xea\xc2\x14\x2c\x31\xf4\x07\x20\x9a\x67\xcf\xc4\x69\x20\xdf\xb3\x5f\x9c\x3f\xc8\x79\x5d\xaa\xc1\xe0\xc3\x87\x0f\x48\xd8\x00\x06\x9c\xaa\xe3\xe8\xd0\x82\x5f\x82\xaf\xbd\x06\xf9\x85\x0b\x78\x1b\x98\xc1\x53\x02\xa5\x56\x81\xfa\xd0\x57\x39\x3c\x55\xc1\xfa\xb3\x52\x49\x5b\x2e\x1f\xe5\x30\x51\x75\x01\x8c\xfd\x38\x65\xcf\x40\x33\x8c\x25\xea\x9e\x09\x11\xc2\x93\x0b\x93\xfb\x0b\x42\xb0\x33\x54\xf3\xba\x59\x9e\x69\xbb\x99\xe6\x54\x88\x1b\xe8\xee\x49\x4e\x4c\x96\xb8\x10\xd9\x96\xcd\x13\x0b\xb8\x35\x2c\x0f\xf8\x5e\xbb\xa1\x68\x60\xfe\x52\x36\x4a\x4c\x4a\x93\xdd\xa1\x7d\xc9\x46\x40\x38\x44\x5f\x8b\x4a\xc1\xd0\xa0\xe4\x56\x65\x2d\xf8\x69\xb0\x33\xd0\x79\xc8\xdc\xc8\xee\xd4\x83\x76\x20\xae\x8c\x24\xc7\x33\x81\x5e\x11\x69\xad\xf3\x56\x37\x3f\xee\x38\xf2\xcb\x2f\x10\x04\x15\x62\xf4\x67\x26\xca\xb3\xe7\xef\x7f\x07\xe9\x6d\xfe\x6a\x94\x30\x4d\xd0\x7d\x42\xec\x41\x04\x96\x2b\x7a\xfb\xcb\x2f\x4f\x3d\x30\x42\xf6\x8a\x38\x3a\x00\x01\x7e\x48\xde\x7a\x7e\xab\x8d\x86\x12\x18\x99\x1a\x4c\x6d\xea\x16\x59\x95\x0b\x67\x98\x4f\xb0\xc0\xa5\xc8\x64\x85\xdf\xaa\x8a\xad\x09\x84\xe0\x34\xf0\x74\x29\xda\xca\x8b\x27\x20\x90\x53\x19\xaa\xfc\xd3\xda\x15\xcc\xe9\xd7\x52\x33\xbe\x98\xda\x6d\xbc\x74\x6f\xca\x16\xad\xe8\xe0\x3b\xdd\x71\x87\x2f\x7a\x58\x21\x9d\xf3\x36\x12\x80\x86\xd5\x32\x65\xe2\x48\x8c\x9b\x75\xed\xdc\x96\x35\x62\x61\x61\xcd\x9c\xb9\xc1\x74\x47\xae\x04\x48\x21\x45\xdc\xe1\x51\x09\x3c\x80\xdb\x35\x28\x20\xa8\x22\xb3\xb7\x43\x1f\xd0\xbb\x4d\x00\xe4\xef\x46\xb6\x23\x16\x79\x14\x2a\x64\xa6\x01\xe6\x25\x26\x0d\x2b\x10\xb4\x98\xa9\x6a\x23\x4d\xf0\x58\x94\xa2\xca\x87\x74\x4f\x04\xd8\x88\xa9\xd2\xe2\x93\x1a\xc0\x37\x62\x6a\x67\x8a\x20\xd5\x1b\x0d\x41\x3c\x38\x7b\x30\xa4\x55\x5d\xc3\xe7\x69\x58\x17\x06\xee\x8c\x0e\x94\x0a\x6d\x0d\x3e\x44\x6b\x93\x68\x6d\xd1\xc6\x54\x58\x70\x30\x62\x98\xeb\xc2\x34\x0a\xe5\x06\x37\x78\xc5\x8c\xf4\x7a\x13\x85\x67\xd6\xdc\x02\xb2\x08\x49\xf0\xca\xfe\xb3\xf7\x57\x32\x47\x96\x4b\x58\x61\x95\xf2\x61\xa3\xc5\x1c\x7f\x14\x94\x41\x15\xe5\x14\xd3\xbb\x7d\x48\xef\x56\x54\x10\x3f\x82\x75\x95\x57\x06\x32\x83\xe5\xb1\x18\x17\xb0\x90\x2b\xa0\x0c\x26\x64\x5d\x3c\x01\x3d\x60\x01\x14\xda\x02\x79\xb4\x1a\xa0\x2a\x57\xd6\x0b\x1d\xa6\x05\x97\xcc\xd7\x21\x05\x83\xe0\xc1\xfb\x6b\xab\xfe\xda\x6a\x60\x08\xac\x22\x32\xc3\x2f\xe6\x38\xb5\xa4\xda\xd8\xc6\xb1\xd9\x60\x5a\x73\x2c\x7e\xbf\x4f\x1f\xd6\x8c\xe5\x99\x4f\x7b\xc4\xf3\xe7\x47\x09\x6e\x6b\xc4\xbf\x4c\x92\x60\x0a\x22\x0a\x73\x3d\xe1\xda\x09\x92\xa3\x99\x18\xef\x6a\x78\x29\x08\x0e\x56\x35\x4b\xd4\x64\x5a\x5a\x87\xfc\x31\x56\x20\x54\x78\x37\xd3\xa0\x0b\xb5\xcc\xc0\xf4\xcf\x1f\x80\xa3\xa0\xb3\xe1\x0a\xde\xcd\xea\x22\xef\x8d\x46\x89\xa1\x5a\xb0\x4a\x80\x0d\x8c\xc4\xa9\x81\x28\x88\x47\x8b\x78\x96\x41\x92\x5c\x27\x83\x3c\x8a\xd3\x8f\xa0\xa6\x55\x09\x32\x02\x63\x09\x5f\x12\x8b\xa7\x20\x60\xa2\x9a\x85\x02\xd3\x52\x32\x9b\x45\x01\x11\x2f\x36\x23\x20\xc5\x53\x0a\x60\x02\x1d\x52\x34\x2f\x42\x0c\x1a\xc2\xa0\xf3\x63\x86\x78\xe2\x7e\x3c\xf9\xe1\x8d\x38\x53\x4e\x4f\x2b\x0f\x57\xab\x43\x92\x77\xc7\xef\xa3\x51\x61\xb2\xbf\x5b\x58\x0d\x2b\x2a\x97\x84\x55\x51\x53\x28\xba\x21\x7f\xe7\xd4\x7c\x82\x04\x25\xd6\x95\xf7\x61\x03\x79\xa4\x9b\x20\x56\xf8\x2a\x6f\x33\x8d\x31\x8f\x37\xf7\x72\x39\xf4\xca\x88\x66\xe0\xd8\xfa\xbb\x29\x32\x63\x61\x96\xda\x54\x44\x56\x01\x09\x7d\xee\x52\x7b\x46\x16\xac\xcf\x80\x3e\xd8\xc7\x46\xcc\x6a\xf7\xc8\xb2\xbd\x70\xfc\x83\x58\x6f\x08\xc2\x01\xd5\x42\x34\x81\x25\xe9\x8a\xd3\x6c\xd4\xbe\x05\xfa\xb3\x80\x40\x00\x68\x43\xbc\x8b\x2e\x7a\xd8\x40\x90\x93\xf7\x12\x56\x36\x29\x09\x52\xc3\xa4\x60\xd5\xc8\x29\x4f\xfa\xf5\xf9\xc9\xd9\x0f\xe7\x91\xf5\x88\x5c\x14\x49\xd4\xad\xad\x8d\x03\xda\x7d\xfc\x1f\xf2\xbf\x37\xe3\xd3\xf3\x8b\x9b\xf3\x5f\x25\xc7\x78\x3a\xff\x3b\x7c\x79\xf0\xe2\x68\x25\xff\xdb\x7f\x7e\xf0\x25\xff\xfb\x4d\x5e\xe2\x63\xaf\x13\x40\x25\xd0\xa7\x37\x3a\x83\x8c\x80\x03\xbe\x47\x5e\xbe\x70\x88\x8e\x65\x28\xfe\x28\xab\x16\x51\xe6\x70\x7f\xff\xf9\xa3\x0f\x11\xba\xef\xed\x2d\x16\x8b\x91\xa4\x69\x46\xc6\x4e\xf7\x4a\x9e\xca\xed\x0d\xf0\xc1\xdb\xf3\xeb\x1f\x6e\xc4\xc9\xc5\x99\x38\xbd\xbc\x38\x1b\xdf\x8e\x2f\x2f\x6e\xc4\xeb\xcb\x6b\xf1\xf6\xe6\x7c\x08\x6a\x7e\x75\x7d\x79\xf6\xf6\x14\x2f\x0f\xe9\xae\xb3\xf1\xcd\xed\xf5\xf8\xdb\xb7\x78\x85\x06\x38\x18\x01\x30\x01\xd6\x13\xf2\xba\xd1\xc0\x53\xb3\xe5\x57\xb4\x05\x26\x0a\x81\xb8\x98\x2b\x19\x8c\xde\xce\x1d\x45\x3d\x59\xc0\x6b\xb6\x1f\xc0\xab\x21\x20\x52\x6d\x0d\x20\x0c\x05\x2c\x7e\x28\xbc\x17\x22\x8e\xc6\xea\x49\x4b\x88\x24\xbb\x4c\x06\x02\x90\x1b\x0e\x62\x9c\x38\x80\xf1\xad\x69\xa7\x33\xf1\x07\x76\x34\x49\x66\xbd\x4a\x97\xb1\x6b\x84\x65\xa6\x5e\x5a\x3d\x9d\x01\x3e\x2c\x2a\xf0\xb6\x08\x1f\xe0\xb7\x00\xee\x65\xdb\xcc\x8c\xd5\x7f\xa3\xf9\xfc\x38\x9b\x9e\xa0\x60\x19\x26\x9d\x5a\xd9\x45\xc3\x9e\x0f\x09\x01\x6a\x0a\x8e\xf9\x9c\x86\x5e\x23\xa2\xad\x70\x81\xde\x4d\xca\x8c\x46\x09\x54\x00\x1b\xe0\x5e\x3f\x0c\xf9\x08\xfe\x4a\x2b\x9f\xcf\xf8\xa8\x78\x48\x20\x16\x0b\xc5\x48\x34\xc1\x1c\x5e\xe5\x40\x02\x21\xd2\x54\x7e\x24\x7f\xa3\x58\xe8\x66\xc6\xe3\xf0\x84\x23\xf1\xda\xf8\x98\x83\x41\xcd\x75\x5c\x8d\x02\x0f\x32\xda\xf2\xa3\x6c\xd1\x52\x9c\xd8\xd6\x3b\xfc\xa8\x59\x28\x3b\x04\xf1\x41\xac\xda\x20\x11\xba\xe2\xf7\x43\x72\x0e\x92\x53\xb0\xa0\xf7\xfc\x15\x71\xc0\x72\x8d\x43\x85\x00\xd8\xb5\xe0\x18\x99\x30\x0a\x64\x69\xf9\x93\x25\x53\x2f\x69\xec\x94\x33\x0b\xed\x18\xdb\xb7\x35\x50\x42\xe2\x71\x33\x5d\xe3\x48\x85\x2e\x80\x9b\xb5\xb2\x19\x0e\xbd\xfd\x62\xff\xdf\x77\x68\x3a\x0c\x36\x99\xf1\x61\xa0\xb6\xc1\x50\x94\x02\x6c\x10\x13\x85\xb0\x3c\x22\x0c\x39\x51\x15\x30\x21\xd3\x20\xca\xde\xe8\x09\x9d\x9d\xc8\x7f\x34\xed\x96\xd8\x86\x67\xf1\x9d\xdd\xda\x49\xa5\x2e\x2b\xe2\xc9\xbd\xce\x5b\x1c\xcb\x8a\x54\x3f\xfc\x00\xea\x01\xa8\xd5\x94\x6c\x02\xdd\x73\xed\x1c\x29\x3c\xe9\x19\x1b\x01\x89\x65\x4d\xd5\x6e\xa8\xca\xbd\x85\xe6\x35\x5f\xd5\x34\xc8\x1e\x0a\x05\xbe\x39\xe7\x6f\x0b\xe2\xf8\x1d\x4e\x31\x37\xb9\x2e\x7c\xe1\xda\x05\x01\xeb\x2a\x2b\x5b\x62\x05\x18\x21\xf9\xdb\x12\xfc\x7f\xc3\xd1\x86\x33\x45\xb3\xa0\xf4\x80\x26\x14\x98\xbb\x0e\xa3\xed\x71\x05\x9c\x87\xe1\x1b\x86\xc1\xfe\x93\x30\x03\x5d\x69\x02\x1f\x97\x93\x9f\x41\x15\xd6\x49\x97\xd5\x92\xaf\x81\x38\x20\xed\x40\x82\x28\x70\x9a\xab\x6c\x26\x2b\x8c\xd4\x83\x81\x02\x73\x1c\xde\x29\x83\x42\xd1\x95\xd2\x7f\x2c\x20\xbf\x62\xf6\xd0\x70\xc3\xfe\x02\xfd\x18\x2b\xcb\x04\xb3\xa9\x35\x1a\x94\x21\xe2\xfc\x32\xa7\xa0\x09\x96\xe2\xa6\xde\x82\x53\xf4\x82\x95\xfa\x06\x00\xc5\x47\x6c\xbb\x73\x48\xdd\x24\xa5\xad\xc9\xb2\xdf\x19\x7b\xb7\x06\x0a\x0b\xb8\x48\x14\x13\x0e\xa1\xa6\x75\x26\x00\xf1\x88\x5f\x46\x34\x00\x66\x9d\x5f\xd6\x5c\xe6\x49\x40\x93\x24\x12\x5e\x59\x86\x88\xa6\xa8\x80\x99\xf4\xaa\x24\x23\x2e\x04\x74\x03\x36\xc0\xcd\x11\xde\x98\x53\x1c\x7f\x23\xac\x34\x0d\xfa\x16\xe2\x50\xa0\xd6\x0f\xb1\x8d\x11\x33\xa7\xf2\xf8\x20\x40\xfb\x7d\x0c\xdc\xe1\xce\x93\xba\xc6\xc2\xe5\x03\x18\x53\x69\x16\x3b\x1d\x17\xce\x94\xd5\xf7\xc0\x45\x08\x21\x91\x21\x6e\x6b\x55\x03\x70\x8e\xcd\x3c\xf0\xab\xf7\x23\x31\x0f\x02\xe1\xdc\x54\x03\xe9\x6f\x53\x39\x15\xe6\x40\xed\x07\xed\x61\xac\xc2\xa9\x48\x5c\x05\xc5\x84\x3a\x9b\x25\x60\x00\xc2\x82\xa4\x19\xcd\xdd\xaa\x7b\x4d\xa2\x44\x2d\x8e\x3d\x9c\xa1\x50\xc0\x61\x63\xc3\x27\x18\xc2\x8b\x39\xb5\x26\x3f\x18\x7a\x39\xca\xf9\x88\xfb\x98\xe4\x18\x2e\x3c\xc2\x63\x7a\xaa\x31\x71\x5b\x97\xf9\x3a\x1e\x07\x9c\x2a\x7a\xe6\x3f\x14\xab\xec\xf3\xdc\x43\x6d\xf6\xb2\xa3\xe1\xbd\xd7\xb0\xd8\x3d\x8b\xf6\x49\x69\x09\x6a\x0a\xf2\x85\x96\x31\x87\x24\xa8\x5c\x62\xa6\x73\x47\x8c\x9b\x80\xb6\xa0\x9e\x60\xb8\xbf\x13\x84\xae\x01\x88\x6c\x01\x09\x16\x3a\x89\x61\xe2\x23\x23\x53\xd7\x88\xa2\x0a\x9f\x29\x3a\xa9\x9f\x22\x94\x7b\x1f\xbf\x51\xe2\xab\x36\x10\x4d\x36\x99\x2f\x32\xd0\x1b\x5c\xf0\xa5\x91\x0e\x1c\xac\x27\x13\xd2\xe1\xdc\x47\x22\x61\x24\x5f\xf9\xa2\xa7\xe0\xfb\xc7\x88\x1f\x26\x46\x81\xfd\x52\x2e\xb2\x07\xd8\x86\x8c\x18\xb0\xc3\x83\x47\x88\x3b\x48\xbb\x88\x72\x97\x14\xc9\x68\x22\xc2\xf1\xb5\xb0\x22\x48\x99\xdc\xdd\x93\xde\x22\x0d\x54\x10\x95\x69\x7a\xd4\xf7\x89\x02\x66\x16\xc0\x8a\xc7\x83\x97\x4f\xf3\xf6\x62\x2b\xae\x69\xcb\x8f\xc5\xfe\x3e\xc2\x32\x56\xb7\x4b\x30\x40\x6b\x00\x8c\x87\x28\x85\x09\xd6\xce\xd0\xa2\x2c\x3e\xc7\xf9\x59\x5b\x79\xee\x0b\xaa\x7c\x24\x4c\x57\x1d\xa3\x90\x4f\x90\xc6\x45\x63\x21\xfe\xbb\xe1\x93\xae\x28\x62\x57\x3a\x07\xfc\xd7\xd1\x84\xc5\x8a\x92\x6b\x66\xae\x81\xd1\x12\x97\x15\x43\x21\xb7\x74\x90\x9b\xba\x14\xc2\xc1\xe7\x42\x8a\x8b\xa1\x06\xf9\x48\x7f\x07\x8b\x1f\x3d\x1f\x47\x2b\x31\xd6\x4a\x99\x3e\x4c\x60\xa4\xa7\x05\x09\xb7\x91\x6f\x10\xe3\x66\xad\x73\xa1\xad\xa3\xe7\x84\x97\x3e\x8c\x7c\x47\x88\xd7\xb9\x26\xf5\x10\x98\xd0\x5f\x6b\xd0\x47\x58\x8a\xab\x75\xd6\x9a\xd6\x81\xf1\xce\xa5\xbd\x43\xe8\xb3\x5d\x74\x14\x42\x2e\x2a\x1a\x10\xf6\x83\x2a\xa2\x8c\x88\xb1\x1b\x35\x11\xc1\x6a\xeb\x02\xf8\x2d\x45\x6a\xab\xa3\xad\x75\x13\x5e\x89\xaf\xe3\xb2\x83\x05\x7e\x34\xe4\x49\x19\x88\xf8\x38\x5f\x99\x54\xcc\x24\x96\xa1\x40\x9f\xb0\x0a\x4a\x48\x0e\x44\xa7\xf3\x74\x46\xe8\xd4\x5f\x5b\xd0\x1f\x6a\x78\x64\x06\xf8\xcd\xee\x1a\x03\xde\xc4\xfc\x18\x88\x0e\x47\xe2\x3b\x0c\xab\x70\xda\xd3\xb8\xfc\x10\x59\x89\x9b\x96\x9d\xab\xd7\xd5\x8d\xc9\x4c\x62\x66\x29\x2a\x53\x69\x27\x61\x90\x40\x08\x01\x9a\x29\x8a\xa3\xb8\x00\x82\x43\x58\x25\x44\x78\xb5\x6a\x80\x33\x41\xfd\x00\xfa\xca\x7c\xa1\x31\xd6\xc0\x8a\x24\x49\x1e\x0b\x24\xf8\x91\x36\x1c\x4c\x31\x71\x32\x4b\x59\x36\xcb\xdd\xc2\x2a\xf8\xa4\x21\xb0\xbb\x37\x19\x02\xf9\x9a\x37\xf7\xf9\x1f\xd7\x7f\x38\xdb\x82\x27\xc0\xc6\x6a\xd4\xe3\x35\xa4\xeb\xe0\xbc\x6e\x27\xf0\x2c\x70\x11\x14\xb5\x2e\x25\x28\x7a\xbc\x02\x34\xb3\xab\x75\x74\xc5\x07\x16\x69\xde\x96\x86\xf9\x11\x8b\x29\x58\x5e\x9b\x71\x83\x3b\x27\x6c\x61\x01\x1d\x25\x02\xba\x92\x08\xba\xff\x07\xa4\xb3\x0d\x8f\xa9\xba\x41\x03\x73\x4d\x30\x46\x22\xd0\x97\xe8\x77\x44\xcd\x6b\x4d\xa4\x07\xe1\x3a\x0c\x46\x85\x36\x8c\xf2\x02\x41\x94\x47\x9b\xa2\x50\x96\xeb\x8c\x25\xc0\x2f\xff\x0b\x88\x62\x6c\xc3\x82\x89\x38\xe0\x03\x65\x1f\x15\x12\xcc\x84\x95\x71\xf9\x13\x65\x14\x66\xa5\x2d\x2d\xe8\x1a\xaa\x72\xc9\x5c\x46\xec\xf2\xa4\x65\xa5\xd4\xc0\x6f\xbe\x37\x59\x1c\xb6\x8a\xda\x15\xee\x46\xdc\xac\xc0\x7a\x9d\x93\x56\x93\x75\x16\x16\xd0\x27\x64\x34\x4a\x07\xdf\x97\x1a\xfe\xb6\xdb\x81\x34\xd8\x54\xca\x7b\x44\x80\xbf\x89\xaf\xf0\x79\x5f\xaf\xed\xea\x03\x61\x41\x9c\xe1\x7a\x6f\x0b\xe4\x73\x90\xd7\x27\xce\x4f\xb1\x40\x51\x04\x5f\x37\x12\xe3\x02\xe5\x1f\x73\x21\x07\x48\x85\x3a\x1d\x85\xd2\xe8\x29\x93\x20\xa7\x12\xbf\x26\x90\xf3\x89\xfb\x76\xe7\xb0\x62\x6c\x6d\x8d\x73\xbb\xc4\x30\x5c\x46\x66\x5a\x8c\x9f\xf8\x33\x48\x5e\x8a\x52\x2e\x5c\xab\x1b\x5c\x6a\xa9\xa6\xec\x04\xb8\x13\x47\xc4\x77\x31\xc1\x0a\x2a\x3e\x05\x70\xe4\x13\x98\x70\xe7\x53\xed\x6e\x9c\xac\x13\xce\x32\x2c\x2b\xc8\x63\x4e\x91\x6a\x83\xdd\x23\x5c\x56\x5f\x13\x43\xc8\x14\x92\x51\x6f\x29\x21\xd1\xe8\x6c\xcc\xbb\xbc\x10\x55\xb1\x77\x40\x13\x45\xe9\x05\x5d\x91\x2e\x04\x6c\x39\x56\x86\xbd\xf2\x45\xee\x62\xa9\x15\x33\x30\x86\x82\xe7\x23\x71\xad\xd2\xca\xd0\x88\xa6\x9e\xcb\x65\x87\x6c\xab\x28\x04\x38\xa8\x43\x6c\xd3\xc3\xa3\x27\xa2\x3c\x12\x09\x86\x8d\x30\x59\x0b\x20\x47\x7a\x84\x11\x0d\xfc\x35\xd1\x23\xf7\xd3\x66\x76\xe1\x8f\x20\xd9\xb0\x4b\x85\x88\x21\x9d\x6a\xcd\x95\x62\x29\x17\x06\xbb\xd7\xec\xdf\x03\x76\x1d\x07\x3f\xbb\x2d\x77\x78\xa5\xd8\xc0\x9a\x22\xbd\x48\x1e\xe7\x1b\x20\x56\x0d\x4b\xa4\x02\x78\x12\xfa\xc6\xec\x10\x5f\x6b\x0b\x95\xe4\x1f\x56\x33\x89\xaf\xc9\x8d\x86\x39\x27\xc9\x9c\x5c\xb8\xe9\x42\x69\xcc\xa3\x30\x7f\xe7\xa2\x8e\x45\x15\x82\xf4\x41\x57\xa8\x27\x9c\x3d\xba\x64\x7a\x84\xb8\xa8\xd2\x38\x26\xa6\xee\x53\x62\x86\xe2\x71\xfa\x33\x67\xc9\xcc\x56\x61\x91\x7f\x18\xe2\xe6\x24\x85\xa7\xec\x00\x28\x5a\x5d\x5c\x32\x71\x9c\xb0\x53\x88\x21\x5a\x58\xe7\x1d\x87\x5e\xbb\x87\x08\x8b\xb9\xc2\xb8\x69\x98\x04\x13\xa4\xa2\x4d\x67\x6e\x7e\x6d\x5d\xb7\x77\x85\x9e\x55\x48\xc5\x57\x17\xb9\x31\x7a\x86\x31\x88\xb8\x9c\xbb\x27\xe0\x65\xa8\x97\x01\xec\x64\x8b\xb3\x4d\xe7\xb8\x78\x25\xeb\xae\xba\xcf\xb4\x7c\x07\x41\x2b\xca\xdf\x27\x7e\x28\xea\xad\x8b\xcb\xdb\xf1\xe9\xf9\x16\x18\xdf\x43\xc3\x2d\x0c\x30\x3b\x3f\x07\x86\xdc\xc9\x3c\xa9\x75\x25\x10\xb0\xc1\x52\xd6\x38\x4b\xf2\x4a\x86\x0a\xa9\xa7\x04\x19\xca\x9c\x72\xcc\x4e\xe9\xd4\x46\xb6\xfa\x96\x8e\x4a\xd9\xef\x41\x8d\x90\x81\x17\xe2\xb7\xed\x7c\x02\x5f\x93\x61\x36\x73\x78\x23\x5f\x49\xd9\x60\x8c\x52\x49\x87\xe9\x54\x5a\xa5\x0f\x0d\xcd\x68\xad\x10\x18\xe1\xbe\xd2\x40\xa6\x0c\x34\x76\xbc\xee\x38\xd4\xd3\x2a\xf7\x24\x0d\x5f\xa7\x60\xde\x53\xb2\xd4\xae\xfb\x05\x28\xec\xab\x45\x9c\x41\x97\x39\xed\x3c\xe0\xfa\xf8\xc6\x0e\xd7\xb9\x2c\x43\xac\x97\x54\xb9\x7c\x6e\xb0\x81\x4b\xc5\x8a\xa5\x50\x00\x01\x19\x20\x0b\x0b\x06\xb4\xf9\x2e\x2e\x72\x19\x65\x53\x61\x7d\x0e\x12\x66\x0c\x2c\x94\xb4\xdc\x6f\xf3\x7b\x8f\xdd\x3a\x9b\x13\x79\x53\xf0\xc0\xa9\x74\x2c\xf2\x41\x0e\xd1\x25\xaf\x18\xa1\xf4\xc9\xf1\xb6\x45\x88\xb5\xec\xd5\xe6\xa3\xdb\x90\x79\x8e\xef\x2d\xe6\x3b\xa9\x46\x26\xa3\x04\xd2\x3d\x87\x3e\xc5\x12\x86\xcc\x7d\xa7\xf3\x9e\xea\x58\xbf\xe5\x00\xb7\x1f\x54\x79\x3b\x0f\x61\x6b\x4f\x63\x02\xb0\x70\xfe\x17\xc4\xb9\x8a\x69\xc4\xe0\x50\xc4\x00\x36\x6c\x34\x26\xaa\x56\x61\x67\x98\xe2\x00\xdb\xae\xea\x1f\x33\xe6\xb1\xbe\xc5\x46\x16\x75\x59\x05\x85\xad\x54\xac\xe7\x00\x60\xa5\xf0\x95\x88\x02\x07\xf1\xeb\x48\x49\xc6\x92\x9c\xc6\xa8\xb5\x17\xe5\x6e\x88\xe0\xbb\xd2\xde\x86\x96\x11\x0f\x93\xf4\x8a\x4c\xb1\x81\x9a\x61\x67\x36\x05\x25\x8b\xcb\x47\x52\x91\xb4\x3a\x17\x4d\x89\xc6\xc3\xa9\x93\x6a\x5e\x47\xc0\x5a\xb7\xaa\xe7\x85\x63\xd4\x8d\xb5\x64\x0a\xa5\x51\x8f\x7a\x65\x99\x98\xa9\xac\x64\x02\x3d\x81\xbc\xa0\x64\xc7\x77\x02\x38\x57\xed\xa2\x40\x37\x12\x6f\xab\x12\x4f\x0b\xa0\xd0\xb0\x07\xae\x33\x8d\xe9\x2f\x8d\x98\x34\x48\x62\x7d\x63\xb9\x1a\x45\x26\xc5\xac\xa4\x8c\xf5\x68\xe9\xaa\x8b\xf4\x71\xc6\xd5\x42\x0e\x87\x7a\x93\xb4\xfa\xfc\x8f\xa4\x66\x3e\xcc\x22\x32\x13\x85\xe1\x21\x38\x74\xcd\x43\xf7\x91\x9f\xbf\x30\x0d\x3e\x14\xbb\x37\xe4\x5f\x70\xa7\x26\x26\x65\x68\xb6\x53\x4a\xef\xd0\x8d\x10\x69\xae\x05\x77\xe0\x54\xae\xb8\x11\x84\x66\x90\x88\xc4\x4f\xc4\xd1\x45\xdc\xb7\x11\x53\xa2\x29\xe4\x74\xa4\xf8\x4b\x6f\x21\x94\x91\xa9\x07\x95\x25\x10\x4f\xc0\x1b\x19\x62\xd5\x54\x5a\xee\x2b\xad\xe6\x1e\xbe\x17\xf0\x72\x84\x1b\x6d\x39\x00\x71\x7e\x67\x57\x88\xa3\xe3\x36\x0e\x0a\xb9\x93\x8e\x10\x32\xde\x37\xd4\x38\x7c\x09\x6d\x0c\xdc\x98\x91\x44\x34\x58\xf5\x52\xf6\x1e\x6b\xfa\xfe\x23\xd0\xe4\x75\xd8\xef\xe2\xf0\x4a\x1b\x28\x0e\x9a\xd2\xa5\xa9\x7e\xb3\x11\xeb\x04\x38\x74\x67\x68\x63\x20\x8b\x14\x1c\xbf\xa1\x4d\x30\x7e\xf3\x16\x1f\x72\x99\x78\x51\xc4\xa4\x03\x2b\xb5\x6b\xf5\xd9\x60\x4d\x41\x6e\xde\x1b\x6c\x70\x01\xcc\xa9\x57\x23\x71\xa6\x1d\xa5\x4e\xd8\xb4\x2d\xc4\x3b\x88\x3f\x81\x2f\xcb\x68\x04\x91\xd4\xc9\x32\x9c\xc9\x40\x4a\x21\xc5\xea\x60\x80\xa4\x48\xc9\x4b\x57\x05\x1b\x76\x02\xf3\xb6\xef\x3a\x52\xb7\x79\x3f\x6f\x16\x6c\x37\x4d\x6e\xe3\xdd\x58\xbe\xec\x09\x77\x07\xeb\x5a\x00\xf9\x5b\x27\x37\x62\x7c\xb3\x25\xbe\x3d\xb9\x19\xdf\x04\xe6\xbe\x1b\xdf\x7e\x7f\xf9\xf6\x56\xbc\x3b\xb9\xbe\x3e\xb9\xb8\x1d\x9f\xdf\x88\xcb\xeb\xb4\x2d\x7f\xf9\x5a\x9c\x5c\xfc\x28\xfe\x34\xbe\x38\x83\x70\x47\x73\x07\xf8\xa1\xa6\x73\x41\x71\x25\x9a\x70\x25\x4f\xca\xa4\x9d\x05\x51\x9d\x34\xee\x47\x5e\x42\x92\x4b\xac\xa2\x84\xc8\xae\x43\x2c\x30\xf3\x76\x7c\xfb\xe6\x7c\x08\x5c\xbf\xd8\x1d\x5f\xbc\xbe\x1e\x5f\x7c\x77\xfe\xc3\xf9\xc5\xed\x50\xfc\x70\x7e\x7d\xfa\x3d\x50\x79\xf2\xed\xf8\xcd\xf8\xf6\x47\x52\xa1\xd7\xe3\xdb\x8b\xf3\x1b\xde\x3e\x70\xe2\xc7\xb8\x3a\xb9\x06\x81\xbd\x7d\x73\x72\x2d\xae\xde\x5e\x5f\x5d\xde\x9c\xb3\xb7\xe5\x6e\x61\x89\x9d\x05\xde\x1f\xe4\x34\x75\x1d\xa8\x33\xc3\x59\x61\x5f\x5d\x40\x72\xd6\xd4\x56\x63\x78\x4e\x0b\x2e\xfc\xf6\x5b\xd2\xbf\x0e\x71\x93\x7a\x29\x57\x1b\x9d\xc3\x8d\x9d\xb0\xdc\x00\xd7\xda\x11\xb2\x3b\x93\xe9\x98\x26\x33\xa8\xfb\x3e\x2b\x55\x63\xd3\x46\xeb\x7a\x32\xcb\xba\xf7\xfb\x11\x7c\x0e\x2c\xc5\x87\xde\x68\x39\xc1\xdd\x97\xb8\xdf\x0a\x3d\xaf\x80\xf0\xa7\x6a\x88\x0e\x1e\x03\x2e\x95\x54\xec\x04\x1a\x21\xd3\x4e\x4a\x2d\xa1\x93\x05\x0a\xd4\xa4\x25\x83\x4a\x4d\x4b\x3d\xc5\xfd\xc7\x3b\xc3\xd8\xed\x1e\xf6\x4a\xb9\xb1\xf2\xf3\x51\x7d\xdf\xe6\x40\x01\x6b\xfa\xa5\x9e\x50\x40\x47\xc4\x4d\xb1\x1e\x11\xfb\x16\x61\xca\x06\x77\x20\x38\xea\x8e\x6f\xb6\x0f\x46\xcf\x9e\xfb\xc0\xa2\x4c\x10\x59\xa9\x69\x62\x5f\x11\x20\xd1\x4a\xdc\x33\xd9\xab\xe1\xe3\xd3\x61\x4b\x40\xb7\x39\xc0\xd5\x0a\x7b\xeb\x49\xf7\x19\x0c\x0a\x02\x5b\x6e\x25\x60\x00\xc3\x35\x5d\x6c\xc8\xf9\x41\x03\x42\x63\xcd\x0d\xe8\xc6\x72\xb5\xe5\x9e\x39\x7a\xf1\xe8\xab\xb1\x6b\xbc\x9a\xe8\x12\x37\xdb\x88\x31\x2d\x5f\xd1\x95\x17\x66\x82\xab\x69\xc5\x60\xfb\xc9\x9e\x78\xa0\x0a\x97\x5d\x1a\x56\xd8\xa9\x31\xf9\x42\x97\x69\xed\xf0\x0e\x37\xe3\xd6\xb5\xc4\x2a\x21\xc6\x04\x2d\x12\x5e\x48\x5d\xe2\xfe\x48\xea\xc9\x97\x45\x5b\x75\xc1\x0d\x39\xc1\x0d\x3b\x41\x68\xd7\x9b\xcd\x7a\xfc\xe0\x89\x95\x03\xc5\x41\x3d\xc4\x00\x7d\xb5\x10\xe7\xc7\x88\xc5\x74\x99\xdf\x6b\x6a\x92\x16\x7e\xfb\x06\x58\x80\x67\x42\xd8\xdc\xe0\x87\x67\x0b\xf8\xc3\x48\x9c\x64\xe8\x13\x90\x0b\x01\x79\x71\xe6\x93\xce\x51\x27\x46\xf1\x6e\x86\xa1\x7b\xdf\x5c\x57\x9b\x85\x4f\xb6\xdb\x42\x14\x9a\xcd\x8c\xe1\x2a\x28\x55\x3a\x7b\xcd\x76\xaa\xb9\x42\xdc\x56\x28\xc2\x13\x80\x3a\xa2\x50\xfa\x2d\xfc\xe0\xec\xb9\x0c\xea\xd1\x6f\x49\x7a\xa7\xe6\x15\x6e\x2d\xe9\x0a\x62\xcc\xd6\x32\xd0\x2e\xcc\xa4\xf4\x55\x28\x8a\x5b\xf6\x10\x76\x30\xf2\xe5\x56\x0b\x1d\x11\x68\x42\x7e\xa5\x03\x82\xc6\x04\xe3\x7b\xb3\xc0\x4c\x88\x53\xc9\xc8\x30\xe2\x67\x32\x70\xb7\x3e\xda\xd1\x52\x95\x49\x37\x24\xc6\xdc\xbe\x2d\x42\x45\x5c\x7f\x19\x81\xb4\x83\x51\xa2\x97\x22\x9d\xae\x8b\xd2\x21\x7a\x57\x29\x4a\xd4\xc0\xd7\x84\x31\x67\xd2\x05\xe3\x33\x1a\x3c\xdb\x3b\xf1\xa6\x88\xbc\xc9\x55\x01\xe9\x8a\x3f\x26\x63\xca\x7c\x43\xe9\x5c\xda\x39\x21\x51\x08\xae\x23\x17\x3b\x73\x6e\xad\xed\xba\x65\xbe\x72\x8c\x87\x4d\x2d\x9a\x8f\x2f\xa2\x0e\xd7\xeb\xc6\x93\xa5\x0f\x36\xba\x05\x2d\x91\x03\x1d\x4f\x63\x30\xbf\x48\xb4\x31\x09\x1b\x23\x2d\xac\xc0\xe7\x17\x67\xe8\x57\x37\x6d\x83\xa3\xef\x4f\xae\xae\xe0\x96\xf1\x7f\x1d\xa3\x08\xa9\x5a\x50\xe3\xa1\x07\xde\xbe\x90\x6e\xdd\xc3\xef\x88\x94\x45\xec\x25\xc1\xeb\xf6\x13\x1f\x18\xfa\x6d\x14\xfd\x6a\x42\x08\xab\x0d\x58\x8d\xe5\xf3\x2b\x9c\xcd\x0d\xbb\x4c\xde\x6f\xa9\x05\x07\x01\xc6\xce\xa0\x3f\xc1\x2e\xa5\x02\xcd\xdc\xfa\xe9\x2f\x5b\x11\xf8\xa8\x32\xe1\xbd\xdd\x32\x28\x13\xa1\xaa\xcf\xfa\x92\x4c\x7a\x24\xb6\xcf\x4c\xf5\xbb\xb8\x5f\x20\xb1\xd1\x30\xf8\xbf\xed\x08\xca\xd6\x29\x4d\xed\xf6\x48\x47\x3a\x7c\x76\x90\xb8\xed\xa4\x37\x8b\xb6\xe2\x96\x80\xe7\x0f\xb1\x11\x4a\x49\x3d\x13\x00\x38\xa1\xf8\x40\x04\x38\x03\xba\xdb\xd7\x49\x03\x8a\xd3\xbd\xac\x37\xa0\x65\x18\xb1\x72\xda\xd5\x9d\xa5\x26\x0f\xee\x5b\xab\x13\xd5\x6d\x59\xa1\x0e\x69\xa0\xc4\xe1\x83\x5b\x40\x1c\x15\xae\x11\x83\xb7\xd0\x57\xf4\x3b\x9f\x7e\xf3\x0b\x9d\x4d\x90\x4e\xc7\x7e\xbc\xe7\x5c\xe8\xbb\xc6\xf2\x4c\x57\xe4\x90\x36\x9b\x61\xc7\x9a\x95\xa1\x6b\x26\xe2\xe9\x4b\x52\x6a\x50\x72\xd3\xba\x0d\x5f\x86\x33\x7c\x6f\xe4\xc4\xd1\xc3\x5e\x65\xf2\x24\x83\xea\x2b\xd3\x30\xdd\x1e\x2a\xb6\xf1\x86\xb8\x03\x73\xe7\x6b\x1c\x22\x64\x27\x08\x0b\xf1\xe0\x13\x31\xd2\x07\xf5\xbc\x75\x1a\xc2\x05\x02\xca\xa8\x5f\x31\xe0\x11\x5d\x0d\xc0\x4c\xa8\x76\x26\x7b\x05\xbc\xa0\xd6\xb2\x09\xca\xff\xb1\x0d\xa8\x7e\x53\xf4\x2e\x90\x4c\x8f\x7c\x4a\xbc\xfe\x58\x24\xe2\x77\xa0\xe1\x30\x49\x81\x6d\x7d\xbf\x13\x6d\x94\x4f\x6e\x78\x2c\x1e\xff\x1f\x06\xe3\x21\x0c\x27\xb6\xdd\x28\xd5\x23\x21\xa8\x3c\x05\x39\xa0\x41\xb0\xb4\x6a\xda\xe2\x81\x9c\x29\xe4\xa8\xb6\x5a\xdd\xe7\xe7\x6b\x27\x5d\xf4\xee\xd6\xd7\x35\xfa\x72\x44\xfc\x5f\xf1\x15\xf6\xff\xf3\x79\x81\xcf\x7f\xf6\x1b\x5f\x1f\x3b\xff\xfd\xf2\xd5\xcb\xd5\xfd\xff\x2f\x5f\x1d\x7c\xd9\xff\xff\x5b\xbc\xe2\xf9\xef\xc1\xe0\x27\xff\xee\x2f\xdb\x1b\x7e\xb2\x63\x87\xce\x00\x62\xed\x09\xec\x5f\x7c\x7f\x7b\x7b\x25\xa8\x74\x8e\xdd\x75\x6b\x1e\x38\xe6\x2f\x8d\x04\x50\x94\x25\x02\xb4\xe5\x9d\x9d\x98\x68\x80\x67\x37\xcb\xc1\x5c\x63\x53\x99\xcb\x3a\x5c\x55\xa4\xe3\xc6\x7c\x8a\x66\x8c\xfb\x9a\x7c\x89\xd2\x9f\xe2\xe2\x23\xdb\x13\x63\x1a\x00\x43\x59\xbb\xe8\x74\xc2\xc6\xb9\x43\xae\x7e\x3e\xf9\x1b\x24\xc3\x01\xe7\xe0\xa7\x54\xf1\x11\xd7\xca\xff\xae\x86\xf8\x30\xe6\x7b\xaf\x21\xad\x52\x1f\x8e\xc5\x7f\x84\x35\xe3\xaf\x8b\x8c\x92\x85\xfb\x5a\x89\x75\x7b\x77\x71\xa2\xdd\xcc\xe6\x7b\xff\x49\x94\x3f\x13\x57\x90\x32\x18\x67\xea\xd9\x92\x8f\x00\xa5\xe7\xe9\xf9\x38\x3d\x9d\xbf\xc9\x5a\xbf\xb7\x33\x7c\xcf\x6c\xa1\x48\xa3\xb7\xd1\x98\xcf\x06\xdf\x29\x55\xb3\x0b\xec\xc6\x81\xe5\x52\x97\x03\x00\x19\xbb\x31\x94\xfa\x80\xdf\x59\x60\x8c\x03\x08\x8c\xb1\x1f\x1e\x3c\xa3\xea\xe8\xd4\xca\x18\x6d\x92\xcf\x17\xec\xf3\x21\x12\x6f\xfd\xf1\xc6\x50\x58\x0e\x20\x8f\x4e\x37\x03\x89\x60\x3c\x00\x39\x93\xa1\x02\x20\x9e\x9e\x6a\xe8\x84\x5c\x06\x9c\xa3\x1d\x64\x14\x31\x51\x28\x68\x44\xd1\x96\x05\x1e\xb8\xa4\x78\x0d\x0f\x4c\xba\xe3\xc1\xc1\x48\xa0\x97\xb0\x58\x3f\xa7\x46\xb9\x3f\x0b\x1d\x16\xde\xdf\x56\xed\x0f\x2f\x6d\xff\xd4\x9d\x99\xa2\xc0\x49\x77\x6a\x88\x27\xf8\x47\x6e\xe6\x7f\xf8\x05\x59\xf1\x3e\x9c\x05\x7b\x4f\xbf\x41\xb0\xc7\x83\xbc\xa7\xd6\xec\xde\xce\xce\xe0\x70\x44\x0d\x1b\x50\xc9\x65\xd7\x09\x48\x66\x85\xf9\x3e\xa0\x38\xb3\xa6\xf4\xe1\xef\x6e\x21\xe6\x4b\xbc\x87\xce\x6e\x7d\xd8\x19\x1c\x8d\xfc\xef\x16\xac\x4a\x61\xfb\x27\x52\xbb\x9b\x76\x42\xa4\xfc\x23\x64\x3a\xff\x8c\x7b\x0f\xd6\xf2\x7e\x5a\x1a\xb0\x15\xa2\x97\x4d\x00\x90\xa8\xc4\x9d\x7f\x5e\xaf\xac\xa2\x40\xc3\xe1\xd1\xc0\xc1\xe0\x5d\x88\x77\x30\xe0\x44\x63\xfb\x40\x3f\x6c\xe0\x8d\xe1\xc3\x90\x76\xdb\x40\x62\x61\xfd\x3e\x68\x0c\xa7\xb0\xd0\x8b\xbf\x20\x43\x3f\xed\x70\x7f\x24\x7e\xd2\x3c\x87\xca\x1f\xa1\x9a\x8c\xe5\x3d\x5e\xda\x7b\xa6\x23\x3d\xf4\xcb\x31\x40\x24\xa4\xcc\x41\x88\xbf\x0b\xf6\x09\x61\xbb\x71\x3a\xfc\x08\x03\x4e\xc4\x27\x18\x27\xd2\xcd\x06\x44\x21\xde\x41\x4d\x99\x26\xfc\xf0\x43\x3a\x73\x62\x69\xbd\x1f\x0d\xa0\xae\x3b\x1e\x77\xad\x69\x9f\x05\x2d\x7c\x75\x42\xcc\xb5\xda\x0a\xa3\x91\x8d\x73\xf2\xa3\x3c\x1c\x32\x34\x01\x8a\x10\x8b\xde\x70\xaa\x3d\x18\x9c\xb5\x7c\x18\x90\xfa\xfc\xb4\xc3\xeb\xf4\xfa\x2c\x22\xcd\x7f\xb7\xf7\xa5\xdd\x6d\x1b\x59\xa2\x9f\x1f\x7f\x05\x0e\xec\x9e\xb6\x7a\x44\x50\xa4\x16\x3b\x3c\xad\x9e\xa7\xc8\x76\xa2\xd7\xb6\xcc\x23\xc9\xc9\x79\x27\xd3\xc7\x86\x48\x88\x44\x0c\x12\x0c\x00\x6a\x99\xd8\xff\x7d\xee\x52\x2b\x50\xe0\x26\x45\x71\x27\xc4\x4c\x3b\x62\xed\xcb\xad\xbb\xd7\x2d\x25\x91\xdb\xa6\x54\xe5\x32\x2f\x18\x4c\x1e\x20\xdd\x62\xc9\x95\xae\x59\xd4\x55\xac\xbd\x51\x5f\x23\x14\xd5\x14\x4c\xe4\xb3\x57\xf7\x41\x8e\x31\x87\xeb\x76\xd0\xde\xa7\x2e\xc8\x3b\x1d\x72\x4b\x99\x07\x4d\xf8\xb7\xd3\xae\x64\x74\x3a\xac\xa0\x41\x7b\x83\xf7\xb9\xf1\xb9\x59\xf7\xd5\xe7\xcc\xcd\x9d\x5b\x0d\xfa\x13\x41\x46\xae\xbf\x09\x3a\x3b\x41\xc7\x9a\xc1\x4f\xb7\xff\x9a\x33\xfb\x9a\xdc\xfa\x05\xc3\x4c\xd5\x5f\x7b\x07\x2f\x3c\x1b\x53\x9f\x5f\x71\x5e\x7f\x73\x86\xf9\x99\x61\xed\x25\x21\x77\x23\x1c\x49\x09\x40\xc5\xe1\x52\x67\xa2\x1c\xd0\x49\x02\xed\x13\xd4\x59\x4d\x08\x27\xf0\x21\xee\x48\x80\x22\x07\xb0\x2c\x1a\xa7\xd7\x5a\x96\x65\x00\x94\x50\xcb\x13\x16\x54\xea\xd5\x2d\x1e\x9b\x52\x08\x96\x01\x8c\xe7\x32\x0d\xb3\x81\x20\xb9\x1a\xc9\x29\x13\x4e\x74\x4b\x22\xaa\xb3\x96\x71\xbb\x7e\x9b\x64\x86\x1c\xc3\x53\xc4\x74\xff\x18\x39\x00\xb4\x15\x5d\x8c\x66\x7c\xb1\x3e\x63\x8b\xf8\x18\xef\xc4\xe0\x75\x8b\x9b\xf0\x8e\xbc\x71\x8c\x0e\x54\xc3\x41\xe3\x35\xa9\x37\xf9\xa2\xfd\xb6\x9d\x49\xaa\x14\x22\xf2\xf2\xd6\x33\x48\xb1\xd1\x35\x79\xe7\xf0\xed\xba\x90\x6e\x90\x37\x61\x44\x37\x58\x9e\x91\x43\x43\xe2\x7b\x2b\xef\xe9\x33\x99\x3c\x8c\x0a\xc8\x1a\xe4\x5e\xb3\x99\x93\x33\x36\xde\x86\x02\xda\x10\xe8\x53\x8a\x48\x0a\x65\xf9\x43\xb1\x4b\x3e\x94\x05\xf6\x61\x3a\x2b\x0e\xf9\xae\xc1\x37\xc0\xbd\x76\xf1\x1f\x71\x55\x7e\xc2\x7a\x2c\x98\xaa\x77\x23\x47\xab\x34\x34\x6e\xca\xb4\x2d\x6c\x98\x62\xea\x0d\x5e\x62\xb6\x35\x10\xe8\x62\x5d\xfe\xe1\x99\x2c\x0c\xe1\x2b\x07\x51\xd3\x6b\xca\x74\xcd\xba\x68\x6f\x67\x36\xcc\x58\x70\x12\x2f\x0b\x0f\x97\x34\x98\xe5\xad\xeb\x76\x98\x4c\x47\x61\xbb\xf1\x29\x9e\x0c\xba\x56\xf7\x8d\x71\x54\x84\x80\x6e\x43\x1d\x6e\x40\x03\x17\x72\x18\x98\x0e\xac\x4e\x76\xd7\x4b\x81\x3d\xa1\x88\x62\x9e\xd7\x04\xde\xe5\x12\xfe\xca\xb0\x09\x95\x36\x0e\x8b\xfe\x08\xd5\x59\x79\xf1\xec\xa3\x0e\xea\xd6\x0f\x93\x11\x24\x7d\xdc\xf2\xfe\xe3\x3f\xbc\x67\xbd\xb0\x18\x01\x85\xbc\x8a\x6f\x9f\x7d\x6c\xa9\xae\x20\xf3\xf3\x67\xcf\xca\x83\x49\x7d\xdc\x92\x0e\x96\x3c\xf0\xb3\x99\x72\xfd\x94\xfc\x68\x57\x79\x1a\x34\xc5\xf0\xa1\xde\xff\xa5\x4b\x23\x13\x75\x63\x4b\xb7\x20\x8e\xc2\x39\xd7\x96\xa7\xd5\x30\xa5\xe1\x81\x3d\x21\xd5\x1f\xec\x3e\xfb\x1c\x28\x07\xc6\x88\xed\xcf\x92\xc5\xdd\xf6\xa6\x09\xc5\xdd\x41\xaf\x23\x02\xf5\x9f\xcc\x76\x38\x18\xd3\xbf\x9e\x05\x41\x0b\x84\x7b\x71\xb5\xf5\xf4\x3b\x90\xd4\xb6\x1e\x42\xa4\x56\x88\x47\x70\x2a\xf9\x6f\x10\x09\x78\xe5\xf8\xbf\x6d\xf8\xd5\xd9\xc4\xff\x7d\x8c\xaf\xba\xff\xc8\xfe\xa1\xb7\x41\x50\x4c\x1f\x28\x16\xe8\x02\xf9\x7f\x6f\xf7\xa0\xbc\xff\xbb\xcf\xdb\xcf\x37\xf2\xff\x63\x7c\xbf\xfe\xda\xfa\x9b\x77\x1d\x8f\xbb\x80\x0a\xd9\xff\x0f\x2f\x7c\x1e\xa2\x6b\x24\xea\x43\xbb\xde\xdf\x5a\x5f\xbe\x34\x1a\x58\xac\x01\x3c\x84\x88\x7c\xc5\x2a\x6d\x93\xed\x0d\x1a\x54\x10\x23\x94\xf0\xd5\x77\xcf\x97\xa8\x1b\xcb\x02\xa9\xd4\xb9\x24\x0d\x06\x1c\x71\xf6\x14\x1b\x92\xe1\xa3\xb0\xe4\x3b\x29\x3a\x7e\x06\x1a\x34\x9b\xf4\xbd\x83\x5d\xfa\x33\x1e\x9f\xcf\xae\x00\xa5\x7b\x7e\x53\x37\x86\xe2\x59\x53\x8d\xef\x18\x30\x68\x21\xb9\x20\xa5\x74\x97\xec\x10\xb0\x4b\xb3\x5c\xbb\x07\x72\xa9\x24\x04\xd6\x73\xce\xd8\xa9\x94\xee\x8f\x54\xf1\x57\x9e\xff\x97\xbc\xf9\x97\xdc\xb7\xe7\xc0\x7f\x4b\xc9\xe2\xb3\x34\x68\x78\xfe\x7f\xfa\x9e\xff\xc1\x5f\x7f\x3e\xa1\x5a\x33\x90\xb6\x81\xaa\xff\x32\x0b\x13\x76\x69\x06\x2a\x4f\xf3\x0c\x1a\x3f\x46\xdc\x3a\x95\x2f\xb0\x0f\x1c\xb9\x8e\x83\x93\xa7\x30\x44\x43\x0c\xa0\xd5\x11\x96\x19\x64\xcb\x0c\xbb\x30\x51\xa6\x67\xd2\x89\xf2\xf4\x1c\xcb\x92\x2d\x10\xa8\x38\x90\x1c\xa0\x6a\x59\xc4\x04\x8b\x1a\x11\x0c\x42\x6e\x2e\x7b\x8c\x86\x46\xe1\x51\x95\x8b\xc8\x45\x34\x78\x31\xda\xda\xe5\xc6\x32\x36\xb8\x18\xd1\xc8\x64\xa6\x82\x10\x59\xa6\xb6\xc0\x52\x4b\x9e\xe4\xba\xa5\xa7\x34\xfc\xee\xe1\xf2\x50\x6a\x8c\x53\xad\x04\xb7\x12\x9c\xf1\x32\x71\x65\x35\x56\x2b\x75\xe5\x01\x96\x01\xd0\x6a\x8d\xfb\x5d\x05\xce\xea\xfe\x66\xf8\xbb\x28\x1d\x74\xe9\x93\x05\x0c\x38\xde\x07\x11\x5e\x07\xf5\xbb\x29\xca\x1f\x71\xf1\x53\x27\x1a\x78\x26\xbd\xae\x1d\x30\x10\x6c\xe9\xc0\x72\x56\x53\xb4\x05\x75\x87\x86\x7d\x47\xfb\x7c\x4b\x61\x0a\xac\xa0\xbe\xca\x28\x55\x79\x06\x4f\x2f\x54\x86\x81\xf8\x2f\xb4\x35\x45\x06\x35\xa0\x9b\x63\xf9\x28\x1a\x88\x9e\x83\xc6\xb7\x77\x7c\x4d\x7e\x22\xae\x31\xc6\x39\x03\xb9\x8c\x32\x27\x84\x98\xbf\x93\x7f\x1a\x86\x72\xfa\x47\xeb\xef\xa2\x72\x13\xd3\xfe\xc1\x57\xa2\x0a\x61\x43\x35\x16\xb6\x21\x17\x56\xf9\x30\x07\x8d\xf7\x90\x46\x2e\xb1\xda\x15\x75\xa2\xd5\x6a\x6c\xb7\x56\x9e\x93\x6a\x67\x0a\x0c\x84\x47\x0c\xe7\x25\xac\x13\xe0\xbe\x38\xf4\x3e\xca\x35\x74\x4d\x5f\xf0\xf0\x7a\xba\x82\xa5\x0d\x70\xe1\x24\x90\x7f\xac\x6e\xf1\x2a\x6d\x21\x3b\xae\x37\xfe\x29\x34\x21\x32\x4e\xc5\x61\xd3\x40\xdd\xaa\x00\x35\x47\xd6\x5a\x00\x25\xaa\xed\x5c\xf7\x68\x9e\xe2\x72\x9f\xf7\x5e\x11\xcf\x3c\x8e\x76\xb7\xcb\xe2\x76\x05\xa6\x1c\x46\x2f\x6c\x4a\xc7\xcd\x01\x5d\xfc\xe5\x5b\xa5\x80\x93\xf1\x07\x86\x7a\x93\x6b\x91\xaf\xb6\x19\xba\x9e\x83\x04\x3b\x16\xfa\x67\x80\x7e\xcf\xdf\xf6\x97\x5a\x23\xdd\xf8\x96\xe7\xc0\x26\xf3\xc6\x07\xe2\xea\x6f\x33\x36\xbb\xe1\xca\xb8\x7e\x6f\x7e\xeb\x6b\xfb\x1c\xfc\xff\x34\x1d\xa8\x40\x86\x0f\x22\x03\x2c\x8a\xff\xf5\xbc\x5d\x8e\xff\xbc\xb7\xff\x7c\xc3\xff\x3f\xca\xe7\xa2\xdb\x00\x00\x17\x02\x00\x7c\x19\xef\xd5\xd4\xef\xe0\x57\x8a\x0e\x8f\x1f\x36\x45\x66\x3d\x79\x3c\xb5\x85\x0b\x9b\x3c\xd2\x35\x64\xa3\x5c\xa7\x48\xff\x3f\x9a\x7e\x02\x40\x9d\x13\x74\xe6\x02\x8c\xfa\xc2\x2e\x21\x62\x1e\xea\x04\x83\x2b\x84\x81\x65\x71\x3f\x5f\x94\x8f\xb8\x62\x8c\x3e\xac\x33\xa3\xa8\xe7\xe9\x54\x54\xee\xe5\xfd\x2c\xc4\x20\xad\x3e\x5e\x3e\xf1\x6b\x0a\x21\x1d\x80\x22\x2d\xd1\x70\x6d\xb1\x34\x2b\xba\x18\x1b\xf7\x97\x59\x5a\x10\x05\x1b\x44\xb7\x1a\x73\x61\x88\xcb\x39\x63\x0c\xb4\xae\x6c\x8b\x0a\x2f\x58\x10\x2b\x81\x84\x1a\x43\xa5\xe5\xd6\x62\xd2\xe0\x54\xd8\xe1\x92\xb4\x16\x98\x8b\x24\x4d\x44\xc4\xe1\xd7\x54\x13\x82\x92\x55\xaf\xda\xaf\x08\x42\xd1\xbc\xbc\xa3\x66\x14\x92\x17\x34\x76\x7e\x65\xa9\x14\xb5\xab\x12\x41\xb7\xd6\x62\x0e\x0c\xbe\xa1\x95\xb9\x07\xf8\x49\x55\xe6\xa2\xbe\x28\xf4\x6a\x0f\x78\x94\xf3\xa8\x9f\xa1\xa7\x9b\x6a\xb0\x9c\xa3\xb7\x69\x8d\xb3\x50\xe5\xb0\x69\x71\x2a\xdc\x92\x8b\x13\x37\xb6\x4a\x5e\x72\x86\xb3\xf9\x5d\x06\x64\xb3\x17\x65\x71\x0a\x8c\x0f\x3a\xdc\xe7\x5d\xef\x60\x47\x7a\xe1\xa6\x79\x71\x1a\x15\xe8\x0f\xd8\x35\xc3\x3e\x1b\xe9\xcb\x6d\xc5\x60\x92\x73\x84\x59\x5d\x5c\x25\x71\xcb\x0b\xe0\xbd\x76\xdd\x27\x71\x21\x3c\xd7\x90\x7f\xd6\xab\x6e\xa5\x77\xe7\x6f\xff\xc1\x9c\xce\xfb\x95\x46\x9a\xbc\xa5\x80\x12\xac\x83\xc1\x01\x58\x3e\x08\x7e\xe6\x43\x16\x0d\xd1\xa9\xeb\x0e\xd6\xfd\xcb\x17\x63\xe9\xa8\x2e\xcb\x33\x5f\xbe\x74\x21\xa3\x24\x79\x1e\xa9\x07\x59\x4a\x55\x8a\x70\x08\x35\x34\xf6\x51\x70\x65\xae\xa2\x55\x43\xc7\xf5\x35\xcf\xd9\x1c\x54\x60\xf0\xd8\x66\x8d\x2c\x52\x8f\x93\x78\xea\xab\x6c\x8a\x2a\x65\x56\xad\x5d\xf2\xf6\x4e\xb5\x98\xb5\xee\x1e\xa9\xc7\xf1\xad\x81\xbc\x97\xa5\x97\x91\xd9\x37\x1a\x94\xbf\x8b\x0a\x33\xc9\xf3\x18\x4d\xb7\xa6\xda\x4b\x55\xa4\x4b\xbc\xac\x16\xda\x44\xc9\x81\xa6\x81\x59\x5d\xd6\x28\x0a\x93\x62\xd4\x1f\x45\xfd\x4f\x00\xb2\x26\x6a\xc6\x4f\xb8\xc6\x5f\x8c\x60\x01\xd0\xfb\xb8\xeb\xb5\x8d\x5c\x8a\x53\x14\x26\x2f\xa3\x24\xbc\x53\xe7\xab\xbd\x63\x94\x98\xda\x67\xcf\xca\xcb\x67\x64\xfa\xaa\x69\xbb\x88\xc7\x51\x3a\x2b\x54\xd5\x8e\xca\x4b\xe2\xeb\xe8\xdf\x71\xe5\x76\x7f\xf7\x95\x13\xa1\xa8\xe5\x4f\x04\xcb\x0c\x5d\x04\x58\xd7\xb2\xed\x3d\x65\xe3\x1d\x0a\x9b\x36\x69\x37\x66\x26\x18\x12\x59\xd4\xc8\x69\xea\xd3\x27\x75\x37\xcc\x2d\x58\xeb\xa2\x50\x4e\x4f\x6e\x80\x68\xaa\xc4\x18\x54\xfb\x22\xc4\x5c\x5d\x68\x99\x6a\xb5\xe5\x2e\xea\x38\x87\xce\x4e\x4e\x7a\xd5\x2e\x4e\x7a\x95\x0e\xca\xc5\x9c\xcd\x03\x17\x54\xa4\xfd\x34\xb1\x60\xcd\xbf\x38\xee\xf9\x7a\xe2\xa2\x88\x6b\xc1\x1c\x6d\xd6\x24\x59\xf8\x4a\xda\xae\x91\x4a\xa0\xc7\xb8\x51\xb6\x94\x55\x46\x7b\x0b\xd1\x99\xa3\x7b\x8e\xe6\xff\x16\xa9\xb1\x85\x46\x0d\x88\x50\xf0\x84\xe8\x9f\x1f\x78\x90\x74\xc2\x3a\x96\x63\x6c\xa4\x47\x67\xb6\xa6\x16\x29\xc1\x4a\xb5\x4a\x5c\xb2\x59\x3c\x9f\x5d\xf6\x1c\x35\x44\x72\x6d\x2f\x35\xd5\x9c\x5b\x2c\xa7\x59\x8c\xa7\x75\x73\x69\xd9\x79\xa4\xde\xc9\xd2\xb4\xa0\xa3\x56\x85\x22\x3e\x95\x72\x5c\xbc\xbc\xb9\xbb\x53\xa4\x75\xd3\xc4\x7b\x16\xb0\xd1\x9e\x5b\x9d\xbf\xa8\xea\x67\xb9\x1c\x12\xa5\x77\x93\xe4\x0e\x0d\xe6\xb3\x68\xc9\x83\x23\x47\x19\xdd\x22\xca\xa2\x78\x08\x49\x30\x4d\x66\x43\xe0\x70\x03\xf9\x7e\x85\x73\xe8\xa2\x50\xdd\x50\xfd\x96\x28\xd0\x14\x0f\x28\xf8\x2b\x8e\x48\x5f\x43\xf9\xc1\x00\x50\xd7\x96\x4a\xa0\x9f\x5f\x71\x25\x02\x1f\x66\xc3\xf9\x2c\x05\x3b\xcc\x1d\x65\xc3\x19\xc7\x4f\xaf\x03\x82\xf2\xda\x11\x3f\xe9\x44\xad\xb5\x6b\xe2\x4c\x5c\x19\xf3\xcf\xc3\xfd\x38\x30\xbf\xd9\x34\x7c\x22\x82\x5f\x7f\xa5\xb6\xbf\x7c\xc1\xe5\x44\x05\xdb\x61\xb7\x8a\xec\x5b\x26\x52\x2c\xfa\x53\x27\x52\x4c\x30\xaa\xb1\xc5\x1e\xae\x32\x57\x1a\x58\x38\x8d\x03\xe5\x57\x71\x68\x0b\xc4\xa2\x08\x72\x08\x95\x9c\x45\xe2\x79\x7d\x99\x00\xf5\x0b\x83\xd4\xb5\x4a\xa5\x12\xee\xe1\x94\x0a\xa9\x45\x34\xf0\x55\x4d\x91\x65\x57\xca\x31\xe6\x78\x72\x95\xcc\x6e\x07\x97\x73\x06\x2d\x8b\xcc\x1f\xb5\x2c\x35\x6f\xd8\xe5\x32\xa5\x71\xbb\xdb\x93\x60\x31\xb7\x41\x05\x3b\xeb\xaf\x84\x5b\xc1\x52\x1a\x95\x2e\x34\x7f\x35\xca\x9a\x10\x32\xca\xb8\x66\xe0\x54\x99\xdc\x63\x16\x18\x81\x22\x77\x9d\x07\xbb\xc0\xfc\xd1\x73\x99\x79\x3b\x69\x97\x58\xfb\xa4\x96\xa9\x78\x8d\x3e\xbc\x86\xa6\xe0\x11\x76\xd4\xe8\x67\x83\x55\x57\x6f\x9e\x91\x60\xb5\xce\x85\x1d\xce\x81\x51\xd0\x96\x5f\x32\x07\xaa\xb6\xd7\x32\xe8\xac\x35\xb0\x85\x86\xc2\x43\x4b\x94\x5e\xdd\x4e\x16\xac\x01\xbc\xf3\x7a\x21\x65\xe0\xb9\xf4\x68\x5c\x6d\xae\x56\x5d\x13\x8c\x57\xe9\xf0\x41\x20\xdb\x62\x95\x74\x9f\xdf\xc1\x22\xdf\x84\x77\x2b\x6e\xe5\x90\x6b\x55\x8e\x6f\x4d\x27\x8e\xe2\xf3\x86\x6c\xc2\x69\x76\x19\xf6\x2b\x40\x4a\x89\xca\x44\xf5\x48\x27\xda\xb0\x89\x2d\x86\xd0\xb2\x71\xee\xa1\x61\x72\xbd\x93\xb7\xca\x0c\x5c\xe6\xcf\xa5\x67\x31\x8f\xf9\xd3\xd4\xc8\x62\x01\x9f\x2e\xcd\x03\x06\x18\x5c\x00\x63\x48\x5c\xa4\xd5\x62\x4f\x8b\x94\x64\x70\x68\x91\xed\x05\xa5\x76\x17\xb5\xa1\x59\xca\xa9\x64\x29\xcd\x21\x43\xe1\x00\x55\x3e\xaa\x3e\xc5\xf3\xd1\x74\x33\x28\x52\x66\x39\x79\x18\x01\xfb\x66\x0f\x84\x5a\xa0\x7a\x5e\xd6\xef\x28\xef\x8f\xa2\x71\x74\x48\x57\x41\x96\x85\x2c\x39\xf9\x22\x99\xbf\xc0\x90\x3f\x07\xc2\x96\x18\x34\x34\x50\xc7\xd6\x9a\x7d\xc8\x57\x9a\xd6\xed\x43\x36\x70\x68\x30\xf9\x76\xbb\x6b\xac\x4c\xd0\x8f\xb2\x02\xaf\xb9\x25\xd7\x91\x0b\xdd\x2f\x39\x34\xb3\x95\xf2\xf8\x4a\x3d\xac\x33\xc8\x41\x3a\x26\x4f\xa9\xba\x53\x46\xc0\x0f\x07\x8c\xcb\xd1\x01\x5b\xa6\x36\xf6\xc2\x99\x01\xd5\x5b\x7b\xfa\xa2\x87\x9f\xb0\x08\x1f\xc4\x2f\x5f\xfe\x45\x6d\xd2\x62\xd8\x7d\x2c\xbd\x00\xa2\x5a\x1e\xde\x07\x68\xdc\x43\xc3\x36\x71\x68\xca\x45\xa2\xd4\xd9\xfa\x78\xef\xb7\x48\xb4\xa4\xfa\x24\x1d\xd6\x88\x87\xec\x0e\x95\x04\x1c\x74\xc1\xb1\x64\x50\x55\x64\x12\x73\x52\x29\xbf\xec\xce\x4c\x22\x5d\x39\xc1\x57\x8c\x3d\xff\xd5\xd9\xd9\xbb\x33\xbf\xa6\x4f\x2a\x63\x75\xc9\xb5\x3e\x7b\xb3\xe9\x74\xa5\x43\x11\xf0\x65\x96\x39\xf8\x8a\x0b\x24\x0e\xa1\xb7\xd4\x44\xed\x2a\xa9\x16\xcc\xb5\x2a\x57\x5a\x75\xc0\x97\x33\x0c\x6b\x13\x63\xd0\xc8\xff\x89\xe6\x76\x6a\x95\x34\xfb\x2e\x37\xb1\xea\x10\xae\xe2\xa4\xb0\x8c\x7c\xf5\x65\x48\xde\x9a\xe5\xf8\x86\x82\xeb\xe8\x19\x2b\x54\x2d\x6f\x2d\x97\xb3\xb9\x35\x07\x0e\xc4\x11\x4e\x78\x58\x20\x27\x53\xe6\x19\x6a\x86\x65\xd5\x58\xb7\xdb\x31\xa0\x0d\x79\xfb\x76\x99\x4e\x8d\xf2\xae\xb5\xb0\x9b\xbb\x8f\xc6\xc9\xe8\x19\x1d\x93\x03\xa1\xe6\xc2\x3b\xe7\x76\xc7\x94\x2b\x8f\x9e\x51\xca\xd1\xbf\x20\x28\x54\x47\x28\xee\xe8\xef\x90\x9f\xf1\x42\xed\x9d\xbb\x59\x0e\x33\xb8\xc4\x28\xa9\x20\x61\x6c\xd5\x09\x54\x3b\x54\x09\xa2\xa7\x25\x96\xc6\xdd\xfe\x28\x0a\x89\xb9\x9d\xbf\x1a\x8e\x52\x0f\xb1\x1a\xb2\xd9\xa5\x57\xc3\xaa\xf0\x20\xab\xb2\xbe\x36\x24\x4e\xd2\x62\x9e\xac\x41\xf9\x45\xfa\x29\x9a\x58\xb2\xad\x4e\x5e\xe1\x6c\x8f\xc2\xfc\x9f\xd1\x9d\xdd\x88\xe7\x2b\x05\xaa\x8b\x92\x70\x47\x5a\xc7\x5a\x19\x83\xbe\xf2\xb8\xe4\x38\x2c\xba\xaa\x55\xf2\xbf\x87\xc6\xbc\x32\x9c\x68\x72\x6d\xe6\xc3\xcf\x87\xb0\xa3\xb9\xba\x79\x8d\x21\x89\xed\xae\x30\xe9\xfe\xdd\x95\xe0\xcb\x70\x3b\xd1\x6b\xed\x72\x3e\x71\x5a\x4c\x16\xd5\x5e\xce\x1d\x45\x18\xba\xaa\xd7\x17\x97\x36\x20\xce\x31\x04\x3a\x8f\x8e\x2a\x50\xb0\xa5\xe7\x18\x43\xa3\xd9\x0e\x04\x14\x2d\x4d\x39\x23\xad\xe8\xe9\x6f\x0d\x00\xdf\xda\x06\x06\x81\xfa\x70\x80\x1c\xde\x8c\xb0\x52\xe5\xeb\xea\xd0\xf1\x02\xf8\x74\x5b\x20\x9d\xf5\xeb\x4d\x8f\x4b\x19\x1e\x97\x37\x3b\x8a\xad\x88\x7e\xf1\x02\xbc\x84\xe5\xf9\x39\xb9\x88\x95\x70\x07\x27\xda\x2b\xce\x69\xa7\x2b\xf4\x43\x6b\x67\x75\xc6\x02\xd6\xdb\x70\x5a\xea\x4f\xa5\xdb\x5d\xae\x32\xa9\xe5\xcc\xf1\x8b\x8e\xd6\x0f\x2e\xa3\xee\xb2\x27\x4b\x56\x76\xfa\xd5\x2d\x33\xa4\x65\x8d\xb5\xf5\xa6\xda\x5a\xe0\x9a\xef\xdd\x16\x5e\xd1\xeb\x66\x86\xd3\x96\x4c\xb9\x97\xeb\x60\xa5\x9f\x22\x4d\xa2\xac\xec\x9f\x6b\x24\x3e\x6c\x6f\x13\x60\x50\xaa\x5a\x69\x33\xf5\xde\xfd\x59\xca\xc8\x38\x25\x0f\x0e\x8c\xc7\x67\x7b\x8c\x56\xb2\x6c\xdc\x59\x5f\x71\xf1\x1c\xa7\xe4\x8d\xe4\xf6\x2a\xa9\xf5\x29\x59\x69\xb2\x30\xd2\x7b\x5e\x6c\x70\xf8\xff\x4b\x57\xc5\x07\xbb\x00\xbc\xe0\xfe\xef\xce\xee\x7e\xc5\xff\xbf\xdd\xd9\xc4\xff\x7a\x94\x6f\xb9\xfb\xbf\x08\x72\xf2\x96\x40\xad\x63\xab\xe3\xca\x26\x7b\x6c\x04\x5c\xe5\x25\xd7\x38\x13\x15\x1c\x77\x1a\x5b\xfe\x6a\x35\xad\xab\x91\xfe\xfc\xeb\x8d\xbf\xf7\x42\x7f\xa5\x5f\xf5\xfc\x0f\xc2\x68\x8c\xe1\xab\x0b\x0a\x0a\xf2\x10\x7d\x2c\x38\xff\x9d\xbd\x83\x4e\xf9\xfc\x77\xf6\x36\xe7\xff\x51\x3e\x87\xd1\xd2\xe0\x9f\x24\x7f\xf3\x0c\xd9\xc4\x6a\x36\x06\x43\xf1\xfc\x97\x04\x30\xe7\xc0\xa8\xf2\x4d\xcc\x25\x65\xd1\xa6\x20\x68\xa6\x18\xda\x2c\x13\x70\x75\xa7\xda\x0f\xc2\xfe\x38\x0a\x7c\xab\x10\x17\x43\xbf\x61\xef\x99\xc4\x22\x47\xc7\x6f\x5f\x79\x32\x22\x79\x48\x91\xb1\x63\x0e\x65\xa4\x23\x71\xdd\xe0\x33\x51\x22\x74\x98\x0a\x6b\x44\x97\xd4\xab\x73\x31\x29\xae\x39\x64\xfd\xbb\xfa\x8b\xf9\xea\x67\x2b\xb8\xf2\x6f\x79\x3e\x9d\x3e\xcd\xec\xab\x99\xf9\xff\x3d\xf9\xef\x89\x47\xfa\xe8\xae\xc7\x85\x3c\x74\xfe\x17\x21\x8d\xc5\x93\x39\x78\xcf\x5e\xd4\x35\xf8\x83\x46\xb3\xd9\xb4\xc2\xfa\x84\xd3\x29\xc6\xf1\x11\x11\x7c\xd4\x6c\x1d\xe1\x7b\x96\xbb\x11\x60\x5e\x35\xba\xcf\x35\xa3\x75\xae\x18\xad\x7d\xbd\x68\xf5\xab\x45\x15\x80\x36\x0e\x41\x62\xdc\x29\xaa\x61\xde\xf6\x2a\x1b\x53\xb9\x40\x37\xaf\x87\xb0\x7c\x73\x6e\xe9\x6e\xe4\x75\xa5\xdc\xe2\xa9\xe9\x12\xfa\x1b\xeb\x92\xd8\xfd\xae\x88\xad\xbe\xa0\x1c\x9b\xef\xbc\xc0\x0b\xcf\x43\x21\xc8\x20\xe3\xd1\xf5\xce\x52\x8a\x39\xf8\x9e\x83\xf7\x61\x7a\x66\xa6\xc8\x01\x8f\xc3\xdb\xf7\x13\xf5\x6c\xbc\xc5\xb3\x5b\xe5\x03\xbb\x20\xf7\x2e\xa7\x54\x33\x3f\xeb\xfe\x63\xe0\xfd\xa1\x19\x09\x17\xfd\x17\x5a\xc7\xe6\x28\x4d\x3f\x35\x85\xf7\x05\x85\xe8\x5a\x93\x23\x58\x40\xff\xdb\xfb\x7b\xfb\x25\xfa\xff\xfc\x60\x73\xff\xf7\x71\xbe\x12\xc3\x1e\x1b\x31\xdd\xb4\xfe\x59\xf1\x01\x08\xff\x0f\x1b\x25\x6e\x09\x32\xd3\xd4\x91\xe4\x1c\xf7\x8e\x25\xe5\x40\x60\xed\x7a\xfe\x34\xcd\x8b\xa6\x88\xae\xb8\x4d\x3f\x66\xd3\x21\xbe\x2b\xe5\x2b\xaa\x6a\xa1\xd9\x9a\x09\x57\x50\xee\x22\xa4\x5b\xc2\xee\x7f\x12\xba\xb8\xfc\x82\x26\xd6\xcd\xdb\x15\xd6\x72\x4e\xf0\x40\x19\x2c\xd3\x0c\x20\xa8\xc2\x07\xae\x16\x20\x90\x5a\x2c\x45\x07\xb4\x63\x03\xce\x89\x0b\xe8\x8c\x09\xf8\x6f\x42\x32\x1c\xf8\x5f\x33\x1e\x0f\x24\x00\x2e\xc0\xff\x80\xf3\xf7\xca\xf2\xdf\xde\xee\xee\x06\xff\x3f\xc6\xf7\x10\xf2\x9f\x4a\xb0\x04\x40\x68\x75\x48\x56\x9f\xc2\x55\x15\xa3\xa3\xc5\xfd\x30\xdf\xf2\xda\x96\x58\xb5\xb4\xd8\x58\x27\x38\x2e\x2d\x3a\xba\x84\x47\x19\x75\x9a\x22\xdf\x8a\xf7\x02\xfb\x63\x32\x94\x60\x28\x4f\x54\x91\x8d\x53\x8e\xb0\x3d\xc1\x87\x76\x55\xfc\x5e\x31\x1d\x4b\x6a\xac\xca\x8d\x7f\x60\x49\x52\xed\xec\x46\x94\xfc\x63\x8b\x92\xd2\x65\x2e\xd5\xc7\x3a\x9c\x15\x69\xde\x0f\x51\xe8\xb2\x2d\x62\xf2\x90\x5b\x76\xe0\xf6\x3c\x74\xe0\x98\xdd\x57\x27\xbb\xe6\xeb\x4b\xad\x95\xad\xb1\x4a\xd5\x79\x0c\x2c\xe1\x0b\xb0\x91\x68\x37\xdf\x5a\x5f\x95\xff\x13\xd7\x5d\x1e\x4c\xfb\xbf\x58\xfe\x7f\xbe\xf7\xbc\x12\xff\x77\x77\x67\xc3\xff\x3d\xc6\x37\xcf\xcb\x61\xee\x3d\x2b\xe0\x07\x3c\x8b\x21\x98\x70\x00\x20\x24\x02\xb7\xcd\x4f\x2f\x08\x95\x96\x74\x01\xa2\x19\xcd\x22\x78\x8a\x47\x10\x70\xd8\x14\xd0\x27\xd2\xe9\xea\x8e\x1d\x73\xa4\x1a\x17\x70\xc9\xb1\xab\xf6\x14\x35\xc3\xde\x45\x7f\x86\xf5\x5f\x4b\x95\x14\x7a\x91\x42\xfd\x78\x42\x04\x54\x21\x50\xec\xf8\x26\x37\xd1\x65\x29\x16\x87\x8e\x61\x81\x4f\x25\x89\x44\x21\xa5\x7a\x0a\xc9\xf3\xaa\x60\x09\x56\x8f\xc8\x74\x45\xf2\x74\x51\x9b\xf2\x99\xe9\x44\xc2\xac\x41\x2d\x58\x08\x28\xfe\xa6\x7c\x29\xb0\x64\x89\x51\x17\x30\xf0\xae\x87\x71\xc3\x7d\x41\xcb\x58\x9a\x5f\x1c\xd4\x6e\x3a\xc6\xa2\x3d\xad\xac\x1a\x79\x41\x44\x73\xd7\xee\x5c\x7a\xa2\x98\x11\xcd\x8c\x8e\xce\xa2\x2b\xd3\x31\x49\x07\x54\xc1\x32\x0e\x7f\xb7\x21\xec\xc2\xd4\x28\x41\xbf\xed\x22\xbc\x2d\xaa\x04\x49\x39\xaa\xc0\x6f\xbd\x89\x4f\xef\xb1\x8b\x25\xb6\xc0\xf8\xb3\xf6\xfc\xd7\xe2\x7f\x7a\xba\xf2\x61\x88\xc0\x42\xff\x8f\x4e\x39\xfe\xfb\xde\xf3\xbd\x0d\xfe\x7f\x94\xef\x3e\xf8\xdf\xc6\xea\x84\x42\x97\xa7\x08\x55\x31\x51\x22\x5e\x29\x69\xe8\x57\xdc\xb4\xa2\x19\x5a\x10\xf0\xd9\xd4\xd9\x4b\x01\xfa\xe6\x73\x7e\xd5\xf3\x3f\x9a\x86\x0f\xc8\xfb\xe1\xb7\x88\xff\xeb\xec\x57\xde\x7f\xd8\xe9\xec\x6d\xce\xff\x63\x7c\xa5\xf3\x5f\x23\xca\x5b\x8a\x1f\x5d\xa4\x75\xdd\xb9\x84\x63\x2c\xd9\xbb\xef\xd3\x2c\xfe\x1f\xd4\xba\x25\xbd\x74\x70\x24\x8a\xc1\xf1\xdc\x68\x84\xe6\xe8\x13\x94\x89\x1e\xd7\xea\x02\x5f\x86\x2f\x14\x4b\xe3\xd2\xb7\x69\x96\xc3\xd0\xb9\x59\x9c\xcf\x12\x2b\x3b\x8e\x27\x67\xa6\x6e\xc6\xb5\xfd\x46\x19\x51\x29\xbc\x5d\x5c\x49\x97\x11\x95\x38\xe0\x49\x17\x5d\x55\xcb\x31\xa3\xcc\x7a\x22\x4c\xd0\x67\xcf\x54\x40\x6d\xf0\xfa\x6f\xfe\x55\xf1\xbf\x30\xe0\x3d\x1e\xff\xd7\xe9\x74\x76\x2b\xfc\xdf\xfe\xfe\x06\xff\x3f\xc6\xe7\xb0\xff\x08\x00\x20\x7e\x4e\x5b\x80\xf2\x68\x7c\x1d\x65\xc7\xe9\x78\x8a\xef\xc5\xf8\xff\x38\xec\x04\xbb\xc1\x8e\xbf\x9a\x6d\xa2\x64\x1f\x0a\x8e\xc3\x69\x78\x19\x27\x71\x11\x43\xb1\xa3\xde\x89\xa8\x99\x07\xdf\x03\xfe\xf0\x0d\xfe\x51\x71\x8f\xad\x13\x63\x74\x7e\x99\x36\xb9\x6a\x48\x7d\xb2\xb8\xf0\xb2\x46\x9f\x44\xe3\x56\xef\x98\x49\xa3\xdd\x3b\xc8\x69\xa6\x2d\xcd\x5a\xea\xab\x30\x49\x2e\xc3\xfe\xa7\x23\xd5\xae\xe7\x8b\x56\xfc\xad\x55\xab\xf9\x5b\xe5\x21\x02\x02\x96\x66\xae\xea\x68\xe9\xdd\x8d\x25\x5b\xd7\x2a\x7a\xe3\xfe\x95\xdb\xc8\x84\xf6\x34\x74\x25\xa7\x97\x68\xf1\x61\x22\xa4\x7d\x45\xfd\x6a\x89\x27\x65\xbc\xb8\xbc\xd6\x96\x5c\x6b\x39\x96\xb0\xd8\x61\xb2\x18\x15\x17\x11\x0b\xa3\x95\x68\x73\xde\x14\xf0\xdb\xa4\x6c\x3b\x6c\xb2\xb9\x10\x71\x2e\xbc\xd0\xe9\xa7\x7d\x39\xf2\x4f\xc1\xa8\xac\xc2\x61\x2c\x10\xe1\xc4\xc2\x6e\x44\xb8\xdf\xff\xab\xd2\xff\x69\x3a\x18\xc4\x79\x36\xa3\x28\x34\x97\xb3\xc1\xf0\xde\x37\x01\x16\xc9\x7f\x7b\xcf\xcb\xf2\xdf\xc1\xee\xfe\xc1\x86\xfe\x3f\xc6\x57\xbe\x28\x97\x0e\x5e\xaa\xcd\xff\x96\x37\xbf\xce\xf9\x6f\x4a\xb1\xe4\x15\xa1\x63\xac\xdc\xab\x36\xb0\x91\xff\x96\x92\xff\xbe\x36\x33\xf7\x12\x90\x01\x22\xe2\x91\xed\x59\x6d\xa6\x94\x0c\x25\x0b\xaa\xa3\xe4\x57\x38\xcc\xff\xcb\x0c\xc3\xe1\xe2\x3d\xc7\x3f\x7c\x89\x26\xdc\x83\xf9\x23\xda\xcc\x1d\xf8\xff\xba\xff\xc8\xfa\xbf\xfd\x76\x55\xff\xb7\xb7\xf1\xff\x7b\x94\xcf\x21\xff\xb9\x42\x40\x3c\x33\xfd\x7d\x6a\x43\x34\x6c\x55\x68\x84\x72\x10\xeb\xb9\xc2\x46\xac\x4d\x19\x16\x79\x3c\x99\x23\x34\x5d\x9e\xe4\x91\x5e\xd6\xb3\x4a\xd3\x0e\xf9\x9c\x49\x73\x2a\x9e\x57\xf9\x14\x45\xd3\x3f\x1b\x8d\xe2\x68\x3f\x6f\x31\xec\x95\x74\x8a\xae\x89\x33\xa2\x4b\xda\x92\x52\xe9\xed\x98\x2c\xfa\x05\x6a\xea\x27\x10\x44\xd8\xfa\xfa\x37\x06\x30\x52\x98\xd5\xe0\x9c\xf0\x25\xa2\x31\x96\xd7\x84\xe7\x96\x4e\xa9\x5e\xf5\xaf\xab\x5a\xe9\xcf\x41\x0e\xfe\x6d\xe9\x41\x15\xff\x63\x78\xda\x87\x7d\x04\x1e\x91\xfc\x6a\xef\xbf\x77\x76\x77\xda\x9b\xf7\xdf\x1f\xe3\xab\xd9\xff\x7e\x02\xeb\x12\x65\x20\x9c\xaf\x7b\xe9\xcb\xf8\x16\xd0\xff\xbd\xce\x41\xd9\xff\x6b\x7f\x7f\x73\xff\xfb\x71\x3e\x07\xfd\xb7\x82\x56\x5b\x84\xbf\x14\xb9\x9a\xc9\x3d\xd3\xf7\x63\x06\x98\x33\x00\x18\x8b\x01\xa0\x2a\xe1\xac\x18\xa1\x6d\x90\xa8\xb0\xa1\x99\xdd\x08\x86\x73\x88\x6e\x36\x4b\xe4\x95\x26\x58\xd0\xef\xd0\x4d\xc9\x78\x73\xce\xf7\x05\x01\x2d\xbd\xc5\xd6\x54\xd7\x96\x54\x82\x0c\x55\x9f\x1b\x45\xe8\xe9\x41\xfa\x7d\x1d\x65\x97\x46\x6d\x94\xd7\xe5\xdf\xe8\xff\xa6\x7e\xdc\xa0\x44\x5a\x37\x9a\xe8\x16\xe8\x26\x69\xd1\x55\x52\x45\xc7\x5b\x37\x5e\x19\x50\x3f\x2f\xa7\x90\x42\x36\xfa\xda\x46\xd9\xe2\xd8\x9f\xae\x51\xcd\xa4\xf7\xb7\xb3\x77\xc7\x5d\xc9\x05\x9d\xb1\xab\x99\x33\xb5\xe8\x4f\xdd\x19\xb3\x81\x91\x31\x8e\x07\x83\x24\xba\x09\xb3\xc8\x95\x66\xb5\x51\x24\xb9\x88\x3b\x6d\x26\x21\x1f\x64\xd4\x15\x53\xa8\x40\x18\x26\xc0\x81\x87\xec\x49\x4e\xbe\x7d\x6b\xec\x59\x55\xce\x97\xb1\x93\xf8\xcd\x42\xfb\x66\x81\x73\x85\x99\x35\xb7\x16\x95\xbc\x44\x75\x89\xe5\xd0\x8b\x73\x4f\x60\x40\x32\x66\x13\xf5\x13\xbb\x21\x73\x96\x47\x95\x10\x34\x6b\xbe\x63\xe0\x9c\xa3\xc3\xa7\xaa\x6e\xc4\xa6\x23\x9f\xb1\x59\x76\xb2\x09\xce\x46\xee\x03\x9e\xba\xb5\x47\x3c\xe7\xa4\x99\x23\x99\x8a\xce\xc5\xfa\xcf\x3d\x83\xab\x8f\xe5\x77\x1e\x05\x86\xfd\x36\xd0\x40\x69\x00\x80\xc5\x43\xe5\x71\x8a\x11\x9a\x92\xc8\xf8\x59\xbb\x5d\x35\x63\x7d\xd8\xbd\xd4\x23\xbf\xcf\x0a\x6e\xa2\x39\xfd\xc6\xdf\x62\xfe\xff\x12\xf8\x3b\xdc\xf0\xf5\xc5\x80\x85\xfe\x7f\xbb\x95\xf8\x0f\x50\x7e\xc3\xff\x3f\xc6\x77\x3f\xfe\xdf\xc5\xfe\x7f\xcb\xf0\xb2\x91\x02\x1e\x46\x0a\x80\x15\x15\xfe\x80\x12\x1f\xcf\x59\xce\x86\xf4\x0d\x34\xc5\xb1\x95\x0c\xf7\xb3\xcb\x9f\xa3\x7e\x21\xe4\x0e\x6e\xeb\xdc\x7a\x3c\x9c\x66\xa1\x1a\x5c\xed\xa5\x71\xfb\x4a\x8f\xe3\x2a\xcf\x06\xcb\x3f\xf6\x57\x83\xff\xcb\xdc\xee\xbd\xae\x03\x2e\xc0\xff\xbb\xed\x9d\x83\xb2\xfd\xff\xf9\xfe\x26\xfe\xcf\xa3\x7c\xab\x88\x5d\xe5\x10\x00\x75\x0e\x00\x76\x0b\xf3\x7d\xb3\x00\xca\xfa\xe9\x78\xaa\x9e\x5f\x0e\xe8\x7a\x48\x09\x45\x86\x09\x3e\x68\x3a\xe8\x65\x29\x06\x28\x65\x99\x0e\xe3\xf7\xe1\xbb\xdd\x2d\xe1\xbf\xb5\x7c\x6b\xa2\x82\xd1\x9a\xab\xb1\x0d\x25\xaa\x31\x02\x4d\xb3\xf8\x1a\xd6\x0d\x7a\xe8\x7a\x57\x61\x92\x23\x81\xa1\x0d\xea\xc9\x8c\x57\xe8\xd5\x4d\x9b\xac\x4b\xa0\xa5\x27\xce\xa2\xc1\xcb\x2c\x9d\x9a\x1e\x98\xd2\x8e\x74\xf4\xe6\x8d\x94\x95\x4d\x8e\xa3\x14\x26\x39\x80\x7d\x3a\xca\x4f\xd3\xc9\x99\x8a\x33\x2e\x60\xc3\xd5\xe6\xe9\xab\x8b\x0f\xdf\x9e\x9c\xbe\xfc\x70\xfe\xea\xec\x87\x93\xe3\x57\x0d\xcb\xb6\x87\xef\x91\x9f\xb2\x3c\x63\xd9\x81\x8c\x74\x5d\xf0\xa4\x77\xac\xe7\x42\x4f\xa5\x9f\xbc\xd4\x09\x57\x39\x13\xe6\xb2\xb4\xbf\x78\xf4\x1e\x5f\xb8\x54\x43\x1e\x87\xb7\x5d\xef\x60\x7f\x7f\x77\x5f\x48\x42\xe3\x78\x22\xdf\xaa\x47\x85\x60\xd7\x7b\x0b\x08\xeb\x0c\xdb\x69\x94\x5d\x30\x39\x9f\xf2\x8e\x26\x77\x8d\x92\xa5\xca\x18\x96\x73\x86\x3d\xf9\xd2\x7d\x65\x10\x62\x08\xd6\xe2\xc9\x77\xaf\x71\x26\xaf\x61\xd3\x39\x4e\xaf\x7a\x05\x9b\x26\xfa\x1e\x78\x81\xf5\x96\xc4\x9e\xa8\xc8\x5d\x61\xbe\x88\x08\xde\xc4\x93\xd9\x6d\xd7\x55\xd0\xa3\x48\xa0\x49\xc4\x5a\x18\x29\xe2\x7e\xa5\x5b\x67\x3d\xf1\xd0\xd4\xc1\xf8\xe9\xd7\x20\xbd\x99\xdc\x84\xd9\xe0\xa8\x77\xd2\xd0\x7a\x5d\xfa\x53\x06\x9b\xa7\x1f\x20\x88\x23\x57\x17\x0d\x2a\x5a\xb6\xba\xd7\x1e\x9a\xee\x77\x1e\xea\xe4\xf1\xdf\x9b\x8a\x6d\xbe\x75\xbf\x1a\xfe\xef\x61\x0c\x7f\xe2\x5b\xc0\xff\x75\xf6\x3a\x65\xfb\xdf\x5e\x7b\x13\xff\xeb\x71\xbe\x45\xf2\xff\x9c\x47\x6b\x45\xb8\xbc\x8d\xc5\x6f\x63\xf1\x5b\x6c\xd7\x7b\xec\x4e\x37\x66\xba\xaf\xd6\x4c\x57\xb3\x8b\x5f\x85\xa9\xee\x4f\xa7\x79\x9a\x43\xff\xef\xaf\xf8\x17\xdf\x42\xfd\x7f\xbb\xfc\xfe\xcb\xfe\xde\xfe\x46\xff\xff\x28\xdf\x83\xd0\xff\x8d\xca\xff\x2b\x50\xf9\x6f\x74\xfd\x9b\x6f\xf5\xaf\x06\xff\x8b\xad\x0d\x79\x6b\x7f\xdb\xfb\x7f\x3b\xcf\xf7\xcb\xf7\xff\x0f\x3a\x3b\x1b\xfd\xff\xa3\x7c\x4e\x9d\xab\x79\xac\x39\x84\x9a\x76\xf4\x2c\x61\x88\xd2\x65\x0f\x27\x62\x5f\x05\x71\xfc\x29\x30\xfc\xe2\x0b\x2c\xf6\x0a\x1d\xad\x1d\xb5\x77\x21\x86\xad\x9e\x7f\xd1\xf5\xe3\xc5\xff\x7c\xde\x79\xee\x88\xff\xb9\xd1\xff\x3c\xca\x57\x51\x7b\xf3\xe6\x9b\x77\x7e\x31\x74\xcf\x53\x90\x42\xc9\x42\x80\x6f\xe0\x0e\xe2\x7e\xc1\xc0\xe5\x3d\x05\x41\xd6\x91\xae\xe2\x48\x8a\x47\xc6\x59\x6d\xed\x19\x91\x24\x49\xfe\xb4\xae\xf0\x60\x9c\xf3\x22\x3d\x2f\x32\x7c\x94\x4b\xd4\x08\x64\x2c\xc8\x2d\xcf\x7f\xff\xb2\x67\x84\x34\xf7\x9e\x7e\xc0\xe6\x30\x22\xbb\x1e\x03\x75\xa7\x7a\xab\x89\x53\x61\xd6\x54\xb3\xaa\xab\x59\x09\xa4\xe8\xbe\xdd\xf6\x06\x05\xe5\xf5\x98\xda\x18\x4a\x68\xf3\x83\x31\x24\x2d\x40\x9b\xfd\xe1\x1c\x2c\x44\xcc\x26\x1a\xa3\x6b\x8b\x67\x5b\x4a\x84\xb6\xb1\xee\x7d\x31\xef\xba\xd8\xf7\x5e\x18\x78\x3d\x2c\xcc\x00\xe1\x46\xbd\xa5\x17\x4b\xd6\x7a\x85\xb6\x62\xed\x36\xba\x7b\x36\x8e\x32\xe3\xe1\x68\xd9\xab\x51\xe5\xe2\xb8\x37\x2f\x7b\xeb\x1e\x43\x93\xb6\x5c\x99\xf5\x94\x9e\x80\xc6\x23\x2c\x82\xf9\xf8\x6f\xd2\x70\xf0\x6d\x98\xe0\xea\x65\x7e\x65\x18\x54\x5c\x35\xce\x21\xd0\x09\x4b\x58\xe9\xb5\x4b\x8b\xdd\x2f\x1c\xfd\xbc\x67\xcf\xed\xcb\xf2\x0f\x01\xb1\xeb\x01\xcf\x54\x1a\x4d\xe5\x28\xeb\xd0\x5e\xe9\x58\xcb\xe2\x70\xe2\x25\xa6\x8b\x6e\xa7\xa9\xf9\x9c\xb9\x11\x37\x57\x6e\x8a\x42\x8a\x18\x2e\xd7\xae\x37\xe8\x59\x21\x74\xcd\x38\xb8\xe5\x10\xb8\x05\xc5\x98\xeb\xa9\xa0\xbc\x94\x6f\xdd\x71\x14\x73\x53\x61\x78\x8d\x21\xf8\x00\x94\x7e\x05\x3b\xbb\xaa\x97\xa6\x87\x2f\x3b\x57\xc6\x28\xd2\x44\xac\xdd\xfa\x82\x95\xfd\x5f\x2e\x81\x89\x0a\x03\x65\x09\xa0\x97\x38\xfe\x46\xf9\x73\xd2\x30\x9e\x91\xb9\xd7\x40\x9a\x35\x25\xba\x2b\xc3\x75\xfd\xfb\x20\x73\x06\x88\x6a\x54\x7c\x84\xe8\xa4\x67\x8c\xc9\x48\x5c\x6f\x18\x26\xa9\x53\x34\x49\x11\xd8\x47\xa1\x49\x4d\xe8\x6e\x43\x97\xbe\x5e\xba\x04\x8c\xd8\x86\x2e\xb9\x17\xf6\xdf\x8f\x2e\xd9\x47\x5b\x16\xff\xf7\xa3\x4b\x24\x1d\x6c\xe8\x12\x7e\x7f\x74\xba\xb4\xd1\x1a\x3f\xe0\x57\xd5\xff\x28\x13\xf3\x63\xf9\xff\xec\x1c\xec\x57\xde\xff\xdb\x6d\x6f\xe2\xbf\x3d\xca\xb7\x8c\xae\x06\x20\xe2\xdd\x54\x69\x1f\x57\x7c\xff\xf5\xe2\xcd\x39\x57\x76\xeb\x47\x0c\x32\x70\x4f\xd5\xef\xd3\x35\x54\xbf\xaa\xce\x42\x06\xef\xe9\x9a\xba\xdf\xa7\xb5\x7e\xd4\x06\xfe\x93\x6b\xae\xb1\x60\x07\x4b\xa2\xbf\xfd\x6f\x8d\xec\xaa\xe7\xff\x3a\x4c\x62\x74\x90\x91\xaf\xe8\x36\xfb\xd9\xe0\x7e\xa8\x60\x91\xff\x5f\xdb\x61\xff\x39\xe8\x6c\xce\xff\x63\x7c\x4f\x04\x43\x82\x4f\x75\x26\xd1\x04\xfe\x49\xd3\x4f\xb3\xa9\xe7\xcf\xb3\xdf\xfb\x9e\x6f\x5c\x30\x83\x5f\xf8\xff\x5b\x5b\xde\x0e\x81\xe9\x13\x66\xdd\xaf\xd2\x19\xc0\xad\x54\x0b\xeb\x0c\x52\xbc\x72\xa6\x3f\x0f\x7f\xc8\x80\xcb\xf4\xaa\x8e\xcf\xbe\xf6\xf7\x6d\x86\xb4\x17\x0f\xd2\x12\xf1\x9b\x6b\xb7\xf4\x56\x79\x77\x3d\x48\x23\xf7\x9b\xd6\x39\xbb\x85\x5d\x48\xb7\xb0\x7b\x34\xa5\x90\xfd\xfd\xda\x38\x47\x7f\xb6\xfb\x34\x61\x3d\xc5\x5c\x6d\x48\xbc\x1a\x5b\x17\x09\xdc\x28\x09\x27\x63\x14\xe6\xff\x8c\xee\x64\xc7\xda\x44\x11\x6c\x89\x72\xff\xa7\x34\x36\xab\x08\x5e\x88\x30\xda\x93\xa8\xdc\xf5\x4b\x90\xe1\x0f\x40\x83\x29\xaa\x1e\xd9\x54\x44\x9b\xd6\x88\x30\x14\xb8\x2c\x42\x53\xb3\x46\x22\x6f\xda\x78\xfe\x99\xfc\xeb\xf8\xec\x65\xee\x61\xd8\xf6\x71\x9c\xe7\xe8\xd2\xe5\xf5\x88\x26\x79\x02\xc5\x7b\xc5\x48\x3d\x65\x8b\xe8\xde\x23\xf2\xe8\x5d\x46\x57\xf8\xd8\xad\x28\x84\x13\x2a\x46\x71\xce\x99\x01\x9d\xfa\x85\x13\xdb\xf0\xea\x5f\xf9\x27\xe9\xff\x35\xb3\x7a\x0f\x1b\xf8\x53\x7c\xf3\xe9\xff\xee\xee\x7e\xe5\xfe\x67\x7b\xff\x60\xc3\xff\x3f\xca\xf7\xc4\x13\x61\xed\x3d\x86\x00\x0f\x8e\xbc\x7c\xca\xbe\x41\x0f\x37\x54\x1f\xe8\xf2\xbc\x27\x52\xf1\x92\x63\xc4\xfe\x50\x3d\xf9\xd0\x40\x35\xce\xb0\xcb\xbe\xea\xd3\x59\x92\xf4\x44\xb4\xcc\x93\xab\xd3\xb4\xe8\x01\xf5\xc4\xd7\x6a\x1a\x4f\x00\x4d\x1c\x13\xcb\x3b\xa3\xc7\xb4\x23\x4f\x3f\x05\x0c\x99\xfa\x07\xf6\x2d\x4c\xd1\xea\x6e\x19\x54\x0d\x27\xf8\x50\x75\x14\x43\xcd\xcc\x78\x06\x07\x5f\x57\x78\x19\x46\xe3\x74\x72\x4e\x4e\xcc\x8e\x67\x72\x9e\x78\xa7\xb3\x31\x30\xeb\x5e\x7a\x45\x6e\xc2\xf8\x5f\xbb\x7f\xef\x59\x3a\x49\xee\x70\x4e\x09\x90\x05\xef\x66\x04\x4c\x11\xbd\x00\x78\x78\x68\x34\xb5\xd5\x30\x5f\x37\x6e\x53\xcb\x47\xea\xb5\x72\xb3\x39\x33\xfc\xe8\xb3\x28\x00\xdc\x8b\x2b\xfc\x73\x18\x0d\xa3\xac\x99\x4e\xa3\x2c\xc4\xc7\xfb\xf2\x78\x10\xf5\x01\x0e\xe3\x09\xba\xe4\x41\xe1\xad\x92\x93\x88\xf7\xeb\x97\x39\x9d\x08\x9d\xb0\x6e\xff\x2a\x4e\x80\x3b\x43\x94\x6d\x94\xba\xbc\xf3\xfa\x00\xee\xe9\x58\x94\xdf\xd2\x42\x97\xa3\x79\x58\x9e\x9a\xc1\x8f\xa3\x7c\xa4\x47\x8a\xab\x3e\xcd\x52\x90\xee\x46\xd1\x2c\xf7\xf2\x7e\x06\x12\xe2\x64\x88\x8d\x43\x13\x47\xf3\xe7\xd0\x83\x4e\xe6\x0c\x1e\xb3\x5d\xa3\x86\x86\xdf\xd4\x0e\x5c\xb0\x04\x00\x91\xd6\x98\xf1\x81\x21\xd8\xee\x2b\x54\x8f\x61\xdb\x62\xc9\xa9\x3d\xfd\xd0\xfc\xb1\xaa\xdc\xf5\x7e\xfa\x17\x49\x78\x4f\x28\x96\x4d\xde\x6d\xb5\x06\x69\x3f\x0f\x50\x84\x1d\xa4\xc3\xd1\x2f\xc0\x7b\x8c\x5b\x83\xe8\x3a\x4a\x70\x1b\x73\xc8\x1d\xe2\xa5\x83\x7c\xd0\x9a\x4d\xe2\xdb\x0f\x79\xda\xff\x14\x15\xad\xff\x2a\xc2\xcb\x43\xbc\x60\x29\xda\x6a\x8a\xc3\x04\xd9\x61\xd1\x84\x85\xbb\xbd\x13\x39\x7c\xda\x3c\xe0\x61\xa0\xff\x16\xe5\x77\xdb\xc1\x4e\xb0\x2f\xf2\xc3\x6c\x88\x83\xf2\x9b\xb9\xbf\xed\xf9\xcd\x19\xfe\x3b\x1b\x4c\x9b\x59\xd4\xbf\xee\xbe\x68\x77\xf6\x29\x01\xba\x6e\xc2\x29\x1b\x14\x69\xb7\x25\x86\xc0\xff\xf1\xe5\x6c\xf8\x42\xe1\x5b\x74\x2c\x12\xd2\xf6\x13\xd2\xeb\xf2\xb8\x06\xf9\x80\xcb\xab\x1c\xcf\x1b\x63\xd9\x5e\x58\x8c\xba\x5e\x4b\x65\x5a\x6b\x2e\x2e\x29\x7a\x3a\x84\x36\x2e\x3a\x3e\xe5\x41\x3a\xc2\x78\x12\x17\x7a\x65\xc9\xdb\xd8\xb5\xe4\xd6\x4e\xfc\x20\xee\x3d\xea\x6d\xa8\x1f\x22\x5d\x60\xc5\xf1\x19\x83\x9e\xf2\x78\xaf\xc3\xac\x95\xcd\x26\x2d\xde\x9a\x26\xaa\xa5\x33\x80\xae\xf2\x04\x4a\x23\xd4\x80\x03\x8c\x1d\xc6\x37\x46\xa0\x8c\xf0\x52\x24\xb1\x50\x00\xf5\x21\x80\xfa\x28\xbd\x41\x4c\x94\xa4\x37\x08\x44\x76\x13\xc6\xb8\x2f\x00\xc1\xf8\xbc\x42\x4d\xdd\x42\xee\x53\x0d\x0d\xae\x1e\xf0\x56\x8a\x79\x03\x36\xef\x2e\x9d\xe1\x45\x5a\x0c\x45\x9e\x9a\x3d\xc3\x7f\x90\x54\x8b\xc6\xcf\x22\x14\xdb\x07\x9c\xda\x55\xa0\x3a\x84\x65\x9f\x5d\x12\x88\x2a\x11\x5f\xfc\x97\xeb\xb7\x0e\xbe\x79\xde\x29\x2d\x6c\x75\x8c\x6a\x3d\x05\x70\x5e\xce\xf2\xbb\xcb\xf4\x16\xe0\x72\xb7\x1d\xb4\x55\x2e\x74\x33\x0e\x11\xd9\xfe\xe4\xe7\x23\x82\xce\x3e\xfe\xdb\x1f\x8d\xe1\x00\x37\xcf\xae\xbd\x83\x9d\x1d\xaf\x85\x27\xa7\xf5\x37\x05\x87\x5e\x0d\x24\x1a\xb0\x08\x15\x8c\x64\x1b\x10\x45\x26\x90\x03\xc6\x0e\x88\xaf\x5e\x9e\x9e\x8b\xa8\x04\x81\x77\x04\x18\xfc\x0e\xd7\xf1\xa3\x75\xd3\x1b\xa9\xc8\x47\x26\x61\x93\x5c\x92\x27\x21\xcd\xbe\x8e\xb3\xbc\xf8\x11\x56\xee\x7b\xae\x51\x01\x12\x5c\x84\x1e\x90\xb5\x73\x75\x9d\xaa\x9c\x24\xb7\x5d\x2f\xea\xf8\xee\x2c\x1a\x02\xaf\x9e\xdd\x81\x18\xc1\xa5\x50\x15\x04\x54\x90\xd0\x9b\x7e\x6b\xc3\xe3\xc7\x36\x1a\x8e\xf8\xf4\x16\x1d\x94\xb7\xce\x9f\x54\x82\xdc\x33\x1d\xb2\x63\xf0\xef\x60\x47\xef\x4b\x6f\xea\x04\xde\xc9\x70\x92\x0a\x38\x13\xd4\x1e\xaf\xc7\x10\x7c\xfd\xdd\xeb\x04\xbb\x5e\xcb\xd3\x5a\x2d\x48\x82\xdd\x7e\x11\xdc\x36\xcc\x46\xba\xd4\x1b\x09\x38\x00\xb9\xe8\x41\x0a\xc8\xa7\x68\x42\xf9\x82\x30\xc0\x5d\x54\x6c\x03\xdc\xd2\xd5\x00\x17\x5c\x82\xbc\xd3\x07\x02\xde\x42\x7d\x5d\x13\xab\x62\xcd\x16\x72\x0d\xad\x76\xc7\x35\x61\xfb\x09\x1e\x73\x21\x70\x7e\x70\x48\xe0\xbc\xf6\x23\x2f\xac\x46\x75\xf4\x40\xba\xf3\xe4\xfb\x45\x78\xaa\xf1\x61\x5f\x38\x74\xde\xf1\x49\xeb\xf8\x25\x13\xfb\x3e\x53\xa9\xc0\x7b\x85\x47\x5f\xbd\xbc\x04\xed\x57\xdf\x40\x22\x0e\x07\xd6\xf5\x08\x28\xe0\x35\x2a\x16\x7b\x71\x02\xf3\x87\x93\x1a\x0d\x33\x6a\xa7\x31\xc5\x14\xe7\xbe\x15\xe9\xa7\x68\x22\x78\x24\x40\x0e\xe9\x70\x98\xc8\x06\x5e\x86\xf9\xe8\x32\x0d\xb3\x01\x03\xa8\xfc\x25\xeb\x42\x8f\xaf\xa8\x35\xcf\x8c\x23\xe8\x5d\x45\x61\x31\xc3\x0b\x60\x66\x2a\x85\x6a\x48\x66\xc3\x58\xda\x78\x2b\xe3\xa8\x84\x1f\xac\x29\x47\x3a\x4e\xeb\xad\xe1\xae\x92\xbf\xf9\x4e\xa2\xf9\xee\xb3\x85\xaf\xc5\x63\xcb\x7e\x1f\xe5\x79\x75\x92\x99\x2f\xf3\xf9\x2c\xe8\x64\x3e\x2e\xfe\xf8\x2e\xb7\x32\xbe\xbd\x93\xcc\xe6\xb6\x7c\x6a\xd6\xbb\x49\x67\xc9\x00\x79\x40\x8e\x80\x37\xc0\xcd\x47\x36\x4e\xdf\x2e\x40\xe4\x89\x12\x2f\xb3\x3f\x08\x81\x12\xcc\x8b\x54\xa2\x4e\x7a\x9d\x0a\x5a\xe3\x46\xa0\x81\xb0\x50\x3d\x00\x6c\x84\x00\xd2\xc8\x60\xaa\x9b\x0c\xdb\x8a\x18\xc4\xc0\xf8\x92\xe2\x9a\x90\xbf\x44\x5d\xc6\x95\x07\x19\x47\x04\x19\x5d\x6e\x1d\x78\x56\x53\x7f\x44\x50\x48\x9c\xa7\xda\xf2\xd8\xc8\xc6\x26\xf5\xf6\xdb\xfb\x22\xf8\xe0\x0a\x01\x33\x1a\x77\xf3\x6d\x65\x35\x35\x55\x08\x28\xda\xe2\x16\xef\x73\x85\x59\x9b\xd7\xc9\x1c\xbe\xcd\x9a\xa9\x8b\x81\xb3\x18\x4f\xbc\xf1\x82\xea\x85\xf7\x74\x31\x13\x67\xeb\x42\x6c\x90\x76\x3e\xcb\x86\xf4\xab\x2c\x42\x00\x1b\x75\x0d\xec\x1c\x70\x10\x4f\x1a\xea\xef\xae\x05\xe3\xc7\x67\x2f\xeb\xd6\x51\xed\x9b\x06\x5e\x06\x5f\x5f\x6c\xa3\x6f\xb5\x24\x26\x57\xbf\x2b\x89\x7d\x56\xa2\xc9\x75\x9c\xa5\x13\x3c\x96\x87\x30\xb8\xc1\x8c\xd8\xe6\x6d\x64\x98\xd3\xc1\xa1\x16\xaa\x96\x1c\x08\xa6\x9c\xf4\x90\xb7\x1a\xd0\xaa\xff\x53\xa3\x69\x31\xb2\xd2\xfd\xdf\xe9\xec\x32\x89\xf3\x51\x34\x10\xfa\x30\x69\x80\x75\x1c\x74\x6c\xbb\x27\x8b\x9b\x2d\x4b\xa3\x07\x9c\xb3\x7e\x3a\xbd\xf3\xf8\x76\xad\x77\x05\x7c\x7f\xe0\xbd\x4e\xb3\x31\xf0\xaa\x7a\xf8\xd2\x87\x1c\x13\x54\xbb\xfa\x1c\xb3\x1a\x49\x9e\xc7\xdc\xf0\x19\xc2\x72\xc8\xb7\xbd\x03\x5a\x94\xc1\x26\x0a\x3c\xdb\x20\x20\x54\x4c\xa6\x38\xec\x62\xe5\x90\xf0\x07\xc4\x63\x71\x3e\x1f\xcc\x9b\x38\x49\x10\x41\x4c\x31\x94\x28\xe3\x87\x69\x12\x34\x90\x19\x43\x15\x16\x4b\x90\x33\x91\x45\x6c\x05\xd0\x0d\xc4\x63\x50\x23\xce\x50\xa6\x09\x45\xb0\x8d\x71\x38\x65\xd4\x30\x4a\x13\x10\x18\x65\x72\x00\x00\x9d\x10\x57\x88\xad\x1e\x5d\x01\xe4\xd3\xa8\xc4\x20\x46\xc0\x1f\x5e\x46\x20\x3b\x52\xdb\xd1\x60\x1b\x39\x5b\x2a\xc0\xd5\x73\x66\xeb\xe4\x1c\xe0\x8c\xcc\x72\xd6\xb0\x45\xde\x47\xcd\x04\x1f\x65\xc3\x19\x02\x4e\xfe\x91\x2e\xe3\x32\xbe\xd9\xf6\xa2\x61\x17\x3a\x75\x14\xc3\x64\x00\x97\x66\x53\x9d\x82\x80\x86\x88\xff\xe0\xba\x1c\xb6\xb8\xfb\xd6\xe0\x0e\x7e\xc6\x7d\x9a\x85\xaf\x2a\xc1\x08\xac\x1f\x01\xb4\x99\xdd\x11\x2c\x1d\xde\x44\x97\x7e\xe3\xda\x60\xca\x35\x8b\x43\x10\xd6\x6f\xe2\xf2\x35\x9e\xd8\x6c\x9a\xdf\xc2\xd4\xdc\xa7\x74\x76\xc8\x11\x51\x4a\x74\xf5\xbf\x1a\x4f\x13\xfe\x85\x5e\x85\xc3\x05\xf2\x2b\xce\x2d\x7f\x75\x34\x4e\x65\xcd\xd6\x75\x84\x94\x86\x43\x42\x61\x5e\x93\x94\x16\x03\x45\x35\x24\x24\x2a\x4e\xbc\x51\x96\x42\x04\x8b\xca\x87\xf2\x09\x02\xbc\x27\xad\x7c\xac\x21\xe0\xcd\x0b\xbd\x04\x64\xc0\x51\x3c\x05\x32\x4c\x67\x93\x9f\xc1\xa0\xd4\x86\xc9\x13\x4a\x5d\xab\xca\xb0\x59\x5b\x94\x59\x20\x4b\x72\xec\x38\x93\x37\x58\xb2\x24\x89\x1a\x2f\xea\xa5\x97\x78\x8c\x58\x9b\x7d\x87\x75\xf3\x56\x03\xff\x65\x1e\x4d\x4e\x10\x53\x70\x96\xb0\x25\x13\x0f\x04\xd7\xec\x0e\x4e\x22\x01\x1d\x82\x37\x50\xf9\x68\x42\x6b\x23\xcb\x03\x99\x8b\x92\x2b\xef\x19\xaa\xc8\x8a\xd9\x74\x5b\x2c\xee\x8c\x59\x9c\x6d\x6c\x02\x96\x05\xc8\xe2\x68\x56\x60\x2c\x9a\x6d\x92\xec\xf2\xd4\x4b\x27\x5b\x48\x64\x87\x11\x2c\x26\xb3\x22\x65\x4a\x8e\xeb\x4e\xc3\x41\x19\x31\xf4\x30\xc6\x0e\xae\x18\xe0\x11\xef\x19\x4a\x13\xd0\xc2\x36\x30\xc4\x05\x11\x72\x38\xae\x52\x06\x4e\xa0\xf5\x30\xff\xa4\xc8\xe7\xcf\x39\x73\x74\x58\x11\x68\x36\x26\x89\x5f\xec\xc4\x20\xea\x5d\x09\x14\x85\xc5\xeb\x46\x83\x92\x3c\x72\xb2\xa8\xca\x87\x45\xa0\x27\x2c\x41\x8c\x48\xc8\x73\x05\xf8\x3c\x1a\xf0\x10\xd7\x8b\x8a\xb2\x46\xfd\xe5\xab\x6f\xdf\x7f\xb7\xed\xf5\x8e\x4e\x4f\x8e\xb7\xbd\xd7\x47\x17\x47\x6f\xb6\xb9\xea\xb6\xf7\xe3\xd1\xd9\x29\x2f\xc9\xc9\xe9\xeb\x77\xcc\x75\x50\xd5\x2e\x97\x68\xc8\x17\x55\xe4\x02\x5d\xa4\x02\x29\x97\x00\xc7\x89\xab\xed\x19\xd0\x5a\xe2\x80\x6e\x32\x64\xa4\x27\x06\x3a\x39\xa6\xe5\x44\x08\x12\x98\xda\x7b\x76\xfc\xe6\xf5\x56\xa0\x3b\xc5\x3a\x62\x3b\x60\x0d\xff\xdf\xf9\x3b\x18\x36\xee\x0b\x2d\xae\x6b\x55\x65\xdd\x13\xd6\x9e\x0d\x61\x71\xf4\x2e\xc0\x9e\x4e\x30\xc2\x12\xca\xd9\x83\x6d\xa1\x5e\x63\xc4\x4f\x1d\x53\x09\xc4\xbc\x78\x84\xa2\x70\x10\xd4\xef\x91\x1a\x9b\x02\x17\xe2\xcb\x00\x02\xee\x40\x76\x00\xa2\x9a\x22\x11\x02\x3e\x89\xc0\x11\x2d\xeb\xf1\xd5\x1d\x80\xd3\xe5\xec\xea\x8a\x38\x91\x73\x7c\x30\xc6\x1e\x33\x11\x00\x4e\x42\x15\x1e\xeb\x24\x73\xea\x61\xa2\x74\x83\xd0\x17\x20\xdc\x49\xa4\xc9\x14\x11\x14\x7c\xf4\x07\x87\x30\x8e\xc6\x69\x76\x27\x0d\x24\x38\x44\xbc\xfe\xcc\xed\x43\x43\x63\x89\x59\xd8\x1f\x10\xa6\x0a\x8c\xd0\x74\x56\x80\xd8\x35\x81\xe3\x01\xc8\xac\x0f\x68\x2d\xdf\x66\x42\x28\xc6\x82\x34\x69\x88\xbc\x22\x48\xad\x20\x12\x4d\x51\xe4\xa7\x25\x81\xe3\xaa\xa4\x7d\x6b\x62\xc0\x09\xed\xec\x88\x8c\xd7\x8a\xf7\x5a\x0e\x45\x30\x80\x11\x02\x6a\x3d\x51\x8c\x1b\x35\xc6\xbf\x34\x07\x88\xcd\x33\xbd\xef\xd3\xcb\x3f\x9e\xdf\xd9\xd9\xd9\xde\xdd\xd9\x69\xee\xee\x74\x7c\x55\x04\x70\x7a\x76\x17\x16\xe8\x48\x80\xa8\x52\xf1\x43\x42\x24\x1d\x08\xac\x81\x83\x1e\xe7\x6a\xd4\x11\xd0\x54\x97\x96\x6d\xb9\x81\x27\xf1\x98\x96\xbe\x09\x8b\xdd\xbc\xa2\xc6\xf8\xb2\x21\xa6\x8d\x00\xb6\x58\xa9\x84\x73\xc2\x3c\xc9\xf9\x58\x78\x09\x3f\x01\x9f\xe3\x14\x59\x0e\xf1\xb2\x13\x7f\x13\x0e\x36\xf8\xab\x76\xfa\xc3\xa1\xbe\xba\x0d\xc7\xd3\x24\x32\x3c\x93\x49\x11\x91\xc4\x00\x4a\x18\xf6\x4c\xa8\x2f\xb2\x54\x36\x24\xc6\x52\xd3\xa3\x51\x70\xb5\x1e\xb1\xaf\xe6\xd1\x10\x35\xe5\xb0\xfe\x83\xb0\x5f\x58\xd9\x47\xa6\xf9\xbe\xd4\x0d\x0d\x18\x83\x9b\x4d\x8a\xe6\x05\x51\x4e\x9a\x77\x43\x3d\xd8\xce\x12\x28\xe9\x3a\xbb\x82\x46\x01\x59\x24\xce\xd7\x6b\x77\x9e\x07\x3b\xf0\x7f\x6d\xd2\x3b\x52\x6e\x3c\xb9\x4a\x66\xb7\x83\xcb\x4a\xd9\x24\xed\x87\x09\xaa\x60\xba\x2f\x76\x5e\x7c\x23\x72\xb5\x13\x27\x3b\x7b\x6b\x05\xb2\x64\xab\x01\x98\x7a\xc8\x77\x74\xe5\x13\xf2\x0d\x09\x87\xf9\x60\x5e\x1f\x38\x9e\xc6\x30\x49\x2f\x2d\xae\xc8\x63\x9e\x86\xd3\x83\xfe\x28\xea\x7f\x9a\x44\x37\x42\xe3\xe1\x97\xb2\x51\x7d\x0a\xc2\xdf\xe4\x0e\x68\x72\x3e\xcb\xc3\x61\xe4\x97\x05\x0e\xc5\xbc\xa2\xb8\xd4\xb7\xe9\xa2\xcd\x77\x84\x72\x10\x88\x13\x34\x43\x0a\x88\x52\xb4\xf1\x57\x60\x14\xe3\x49\x98\xdd\x61\x3d\xc0\x31\x5a\x85\x2a\x22\xbd\x98\x29\x93\xda\x53\x92\x45\x80\x18\x30\x90\x5b\x8b\xc7\xd4\xb4\xc6\xd4\xea\x27\x71\xab\xf1\x84\x35\x25\xfd\x59\x06\x38\xe6\x32\x43\x41\x03\x47\x85\x43\x12\xe6\x9f\xae\xf7\x11\xd5\x31\xca\x30\xdc\x6c\x02\x35\x3c\xf4\x1d\x9c\xe6\xe1\xaf\x26\x93\xa9\xa5\x23\x29\x56\x9a\x8f\x78\x48\x21\xa7\x89\x0a\x12\x74\x03\xdd\x6e\xe2\x01\x0e\x88\x18\x1e\x12\x09\xfd\xe2\x7f\x6c\xb8\xf8\x59\x66\x34\xcb\x3c\xed\x8a\xdd\xf9\xaa\x89\x52\xaf\x3e\xab\x54\x94\x7c\x06\xab\x90\xc5\xb8\xd2\xa5\xdd\xd2\x3c\x91\xde\x2d\x10\xeb\x4a\x6c\xf0\xf9\xbb\xb7\xaf\x3e\xfc\x70\x74\x46\xdc\x28\xad\x67\x97\xb0\x7d\x13\x5a\x6d\xd2\x6f\x47\xe1\x0f\xaf\xcf\xde\xbd\xfd\x70\xfc\xee\xf4\xf5\xc9\x77\x1f\xde\x1e\xf5\x74\xe5\xd7\x70\x26\xba\x0d\x56\x7d\x2a\xae\x96\x02\x43\x48\x75\x28\xb7\xa4\xc4\x94\x26\xc9\x5b\x32\xf3\x53\x74\x27\xf3\x9a\xf0\x77\xb9\xeb\xf3\x57\xc7\x67\xaf\x2e\xdc\xbd\x31\x87\xfe\xcf\xe8\xce\xd1\x1b\xe7\x39\xba\x12\x19\xd8\x15\xae\x0d\xb5\x27\xd7\xc7\x35\x7c\x73\xf0\x4d\x63\xf4\x32\x58\x55\xa5\xa8\xd9\xb3\x2d\xfd\x53\x2c\x23\xe5\xf3\xce\x0a\x77\x92\x02\xc9\xba\x87\x24\x76\x82\x24\x77\xf2\xd7\x82\x74\x46\x23\x74\xb4\x18\xa0\x06\x3f\x2e\x14\x33\x22\x19\x4a\x8c\x6c\x89\x84\x9f\xac\x14\xd4\x5a\x82\xdc\x0d\x26\x00\xfc\x5d\x22\xd5\x36\xd8\x52\x40\x41\x3f\xe3\xfb\xe6\xa8\x10\x12\x4e\x7c\x00\x2b\x90\x99\x41\xb5\x4f\x4c\x51\x05\x34\x32\x62\x63\xe7\xf9\x6f\x76\x14\xe1\xc6\x03\x29\xe3\x6f\xa2\x36\x16\xce\x9b\x24\xf6\x32\xd9\x2c\x5f\xaa\x75\xd2\x93\x75\x90\x17\x43\x1d\x6c\x8e\x5a\x57\x43\x72\x27\xbe\x45\x09\xde\xa9\xb7\x43\x88\x7b\x67\x1b\xe4\x96\x58\xbc\xb7\x00\xf4\x39\x0a\x27\xb8\x1a\x00\xdd\x09\x05\x7d\x44\x46\x02\x8d\x1c\x50\x97\x26\x43\x27\xe9\x8a\x10\x06\x19\x6f\x64\xfa\x49\x2f\x0f\x94\x1e\xed\x26\x9c\x48\x93\x0c\x74\x44\x6c\x34\x2e\x3e\x81\x97\x34\x68\x4c\x22\x3c\x4e\x4a\x15\x27\xba\xc3\xbe\x98\x7b\x03\x64\xaa\xfa\x12\x6d\xa1\x3d\xd6\x5c\x92\x93\x1e\x10\xa0\x6f\x3a\x41\xfb\xe0\x45\x00\x0c\x10\xfc\xaf\x21\x72\xa5\xda\x80\xd9\x45\xb1\x6d\x2d\xbd\xa3\xb8\xf8\x01\x33\x80\xbc\xed\x57\xb3\x04\x07\x21\xf5\xb6\x91\x67\x6a\x63\x9e\xb0\xed\x0a\x36\x59\x7a\xaf\x7b\xe8\xac\xef\x49\xdf\x7a\x16\x9b\x04\x97\x96\x23\x89\x4f\x8a\x11\x91\x16\xc5\xae\x99\x69\xc6\x4e\x8a\xdc\x97\xd0\xd5\x84\x6d\xcc\xa4\x68\xc4\x51\xd3\x1d\x0a\x18\x9c\xb8\x3e\xc1\xbb\x6b\x5c\x6e\x81\x2c\xf3\xc6\x80\x97\x66\xa2\xb1\x53\x71\x3f\x21\xb0\xe0\x04\xb7\xe6\xfc\xfb\x77\xef\xdf\xbc\xf4\x4e\xdf\x5d\x88\x56\x4b\xda\x13\xe8\x10\x96\x5f\x6b\xa6\x0c\x93\x71\x6e\x30\xfa\xb8\x7b\xb8\xc3\x24\x3c\xb3\x78\x02\xc7\x07\x35\x40\xc8\xdd\xa2\x35\x15\x8f\x1b\x41\x85\x78\x3d\x62\x5b\x54\x46\x51\xe2\x23\xa2\xec\x7e\x91\x50\x7f\x4d\x58\x31\x8c\x5b\xfa\x11\xf5\x2c\x42\xfb\x1a\x7a\x14\xb3\x4b\xd9\x29\x98\x0d\xa0\xf1\xda\x52\x0f\x1e\x6e\xb9\x3a\x34\x78\x3e\xb9\x24\xb8\x69\x4d\x92\x71\xfd\xc4\x3a\x6e\x17\x72\x8d\xd5\x1d\x91\x67\x17\xc7\xbd\xd6\xfb\x97\x3d\x56\x4b\x6a\xc6\x04\x92\x21\xe5\x26\xba\x34\x0f\xee\x0b\xdd\x92\x3e\x9d\x2a\x51\x0e\x57\xf1\xbe\xd6\x28\x5e\xac\x3b\x06\x79\xe0\xf1\x0a\xca\xd4\x40\x13\x65\x48\x16\x47\x6c\x14\x5e\x47\x8a\x23\x81\x45\x52\xd2\x3c\x9c\x67\xd1\x9a\x09\x42\x52\x79\xad\x2e\xbd\xec\x76\x76\xd4\x50\x09\x2b\x9d\x45\x83\x38\x63\x63\xbf\xe4\xd5\x81\xc1\x21\x01\xce\xeb\x04\x9d\x6d\x85\x0c\xc7\xe1\x27\xb6\x91\x86\x13\x24\xa6\x99\xa8\x07\x28\x20\x0e\x3d\xad\x4a\xd2\xc7\xa3\x8e\x97\x01\x80\x02\x30\x68\x19\x55\x5a\x4f\x32\x3d\x0a\x51\x5d\xa6\x5c\xa4\x5d\xdc\x27\x86\x1f\xde\x33\xfe\xdb\xda\xb9\xbd\xbd\xdd\xea\xce\xc9\xc4\xf9\x3b\xa7\xab\xae\xb3\x75\xe6\xca\xea\x96\xce\x01\x3d\x5e\xbc\x39\x47\x46\x10\x8f\xa3\x9e\x6a\x75\x6d\xcc\xa5\x91\x46\x5c\xe7\x12\x15\x09\xef\x4f\xa1\xef\x4c\xd6\x28\x79\x0b\x01\x39\x85\x41\x29\x43\xed\xab\x8f\x04\x03\x8d\xd3\x72\xad\x3d\xc9\x8b\x76\x65\x18\x4a\x36\xfa\x9c\x45\x79\x9a\x00\xd2\x35\x92\x07\xe9\x38\x44\x9b\x93\xad\xc0\xc6\xb4\x2e\x2c\x2a\x09\x31\x68\xfd\x53\x79\xc0\x6d\x84\xfa\x1a\xa2\xb4\xff\x5e\xa5\x69\xe0\x2e\x8d\xb9\x97\x61\x56\xca\x35\xa4\x16\x45\x63\xdb\x8e\xa3\x6a\x24\x3e\x12\xf6\x45\xc2\x88\x74\x59\xa2\x4e\x03\x05\x8b\x41\xff\x39\x50\x70\x7b\x4d\x14\x8c\x2a\x7b\x38\x26\xf2\x02\x0a\xaa\xb8\xa4\xd1\x0f\x18\x38\x0d\xb1\xe8\xe3\xda\x58\x78\x6c\x28\x1b\xaf\x38\xd1\x61\x69\xca\x58\x9a\x4a\xbe\x46\x8e\x53\x5f\x77\x61\xfe\x53\x30\x4f\x8a\x39\x9e\xc4\xe8\xde\xdb\x2f\x04\xb6\x10\xde\x26\x24\x7f\xb1\x2b\xf5\x71\x3c\x05\x80\x3a\x9f\xc5\x64\x0e\x55\x85\x00\xa6\x01\x70\x15\x47\x3f\x83\xa2\x3d\x25\xb5\xe5\x9a\xcf\x6e\x7a\xc7\x94\xb7\xdf\x69\x57\xd2\x76\x5f\xec\x35\x8c\xf1\x91\xf5\xec\x89\x5a\x1c\xc9\xc3\xe2\x69\x53\x04\x5e\x6c\x09\x32\x7c\x91\xf0\xb6\x33\x90\x06\x16\xbb\xba\x22\x11\x76\x1c\xe1\x42\x90\x49\xa7\x91\x6b\x53\x51\xd9\x00\x0b\x05\xf4\xfa\xc2\xc5\x6a\xf6\x12\x05\xb1\x45\x5e\xc3\xfa\xc8\x7e\x74\x24\x41\x5d\xa6\xc0\x4d\x61\x50\x0b\xe4\x20\xf1\x12\xb1\x0c\x50\x6a\x98\x10\xfb\x49\x3a\x1b\x28\x73\x9e\xe6\x09\x99\x9c\x2d\xe1\x14\x67\xf6\x4d\xae\x8c\xc7\xaa\x1f\x62\x24\xed\x16\x20\x77\x99\x46\x8c\xc1\x3a\x1a\x81\x5c\x57\x23\xb2\xc2\x1c\x33\xa9\xb4\xaa\xad\xe6\x98\x87\xbb\x87\xae\x89\xb4\x9d\xd2\xce\x15\x12\x41\x56\x6a\x47\x6e\x17\x97\x2f\x40\x77\x49\x44\x40\xc2\xba\xc1\x7a\x14\xdc\xc4\x6d\x75\x5d\x99\x5c\xf8\xe8\x16\xb0\x68\x1b\x71\x0e\xdd\x53\xd2\x56\x60\xc9\x03\x5f\x30\xbc\x94\x3c\x67\x44\x21\xf3\xea\x29\xb2\xe9\x7e\x3b\xe8\x04\xbb\xc1\x9e\xb4\x5b\x0a\xac\x44\x59\x94\x11\xec\x63\x56\xed\x8d\x55\xd3\x97\x40\xb2\xfc\xc0\xf0\xb7\x76\xb5\xe7\x52\xfb\x39\x26\xa3\x48\xd3\x6a\x1f\x34\xec\x9b\xa6\x56\x75\x1e\x49\x03\x95\x20\xc2\x24\xff\x3d\xea\xc6\x70\x55\x92\x5e\x3a\x38\x9a\x15\x29\xbe\x5b\x81\x58\x9d\xe2\x22\x06\x50\xb2\x11\x8a\x54\xd8\x2d\x87\x2f\x07\xd9\x6d\xe2\xc9\x99\xe1\x08\x4a\x49\xe1\xad\x91\xb4\xc3\x69\x92\x40\xb1\xaf\x1d\x1f\xa2\x33\x11\xbb\x56\x1c\x73\x19\xca\xb6\x22\xe0\x4f\x67\x2a\x85\x6f\x49\x1f\x01\x96\x09\x87\xd1\xfb\x22\x4e\x94\x76\xef\x60\x67\x9d\xb6\x59\x93\xbd\x5c\xf3\xda\xfd\xc4\x78\x37\x40\x18\x1a\x7a\xea\xb9\x00\x8f\x4d\x66\x1e\x3d\x18\x80\x18\x05\x70\x1c\xfb\xa5\x01\x5a\x2e\xbd\xc4\x02\x7c\x5f\x6b\x86\x3a\xcc\xe1\x0c\xce\x7b\x4b\xbf\x39\xd0\x14\x86\xc6\x96\x65\x5b\x9d\x5e\xf7\x7f\x77\xc3\xaa\xe9\xeb\x92\x09\xf6\x27\x0f\x12\x7c\xf2\x7e\xac\x1e\x8e\x3f\x64\x5f\x37\x4a\x42\x73\x06\x2a\xa0\x4e\x0a\x3e\xac\x62\x92\x44\xd5\xcc\xc6\x82\x86\xb1\xaa\x4e\xc7\x21\xc3\x29\xee\x09\x02\x3a\x94\x85\x99\xd1\x3a\x0b\x06\x8c\x99\x82\xb7\xa4\x54\x3e\x03\xb1\xf7\x47\x34\x9e\xbc\x9b\x10\x41\xce\xd9\x62\xd0\x79\xf1\x36\x16\xaa\x54\xfd\xc6\xbd\xf4\xd8\xb6\x3c\xeb\x5c\x98\x36\x9f\x5d\x0a\xd3\xab\x2f\x24\x73\x65\x3c\x87\x2c\xac\x2f\x3d\xaa\x05\x14\xf0\x66\x10\xc5\x1f\x20\x00\x01\x17\x63\x3e\x3f\x82\x2c\x28\x90\x95\x6d\x74\x74\xcc\xd5\xa6\x09\x93\x13\x16\x94\xbe\x5c\xda\xb9\xa0\x41\x66\x21\xa0\xb7\x68\x7c\x44\x53\x13\x88\xd7\x83\x19\xa1\x4b\x76\xe9\x1e\xcc\xc8\x4f\x81\x58\x8c\x3e\x7a\x96\x90\x27\xe3\x36\xdd\xe5\xd1\x5d\x1f\x0a\xba\x8c\xc4\x48\x7a\x72\xff\xa3\x0d\xd0\x24\x1c\x9f\x8b\xb8\x89\x88\x0e\x98\xe0\x3b\x76\xcc\x44\xab\x64\x34\x41\x2c\x8b\x55\x24\xde\x45\x2d\x47\x4c\x33\xc6\xb6\x8c\x0a\x49\x74\x55\x20\x87\x22\xb8\x87\xa0\x61\x79\x21\x2a\x7f\xae\x1f\x05\xf7\x89\x17\xe9\xbc\x6f\x43\xe4\xa8\x8e\x98\xb3\x43\xe5\x7c\x06\xcc\x11\x63\x23\x54\xcc\x80\x70\x85\xb1\x61\x59\x05\x63\x44\x68\x26\x4f\x54\xdb\x21\xaa\x81\xd7\xf7\xaa\xd4\xbb\x21\x18\x49\x61\xe2\xa4\x51\x6c\x4b\x65\x6f\x6e\xbe\x1a\x47\x7d\x54\x5f\xf5\x43\xe3\xae\x32\x97\x1b\x4e\x14\x61\x3f\x4b\xf3\xdc\x70\x60\x09\x4a\x5d\xf1\x2e\xab\x9e\x54\xc1\xa6\x22\xf1\xaa\x57\xb3\x3b\xde\x1d\xf2\x89\xcd\x35\x5b\x60\xa9\xb6\xe5\x2e\x10\x9c\x81\x34\x95\x87\xc2\x07\x84\xa1\xc5\xf0\xaa\x71\xf8\xd1\xa1\x3f\x8b\x64\x7e\x2b\x2f\x57\xb1\xaa\x2b\xcf\xe3\xe1\x04\xf9\x6c\x01\xc8\x92\x68\x8b\xd8\x8c\x24\xdb\x9a\x43\x06\x5a\xea\x78\x0d\xb1\x12\x28\xdd\x71\xc2\x1b\xcc\x1b\x4b\xea\x2d\x82\xaf\xca\xd3\x23\x54\x79\x33\x76\x75\x24\x25\x59\xd8\x2f\x58\x47\x85\x45\x0c\x9d\x1f\xbe\x07\x63\x47\x90\xec\x1a\xdb\xb1\xcd\x2a\x2d\xc6\x1e\x95\xde\x84\x3e\x54\x96\x57\x2a\xc5\xd0\x55\x52\x71\xe2\x40\x26\xc7\x68\x75\x80\xcd\x35\x11\xae\x0c\xec\xa3\x6e\x60\x2b\x2c\x26\xfc\x27\x2b\xec\x92\x5c\x55\xb7\x07\x5b\x1a\x0f\xfa\xd8\xd9\x08\x8e\xbe\x70\xd8\xdc\x2a\xcd\xb4\x7c\xe1\xa0\xa1\x03\xc4\x4b\x24\x86\x00\x15\xe5\x52\x59\xec\x21\x7d\x45\x56\x65\x67\x67\xec\x8b\x14\xa6\x8a\x90\xb8\xbf\xf3\x36\xf6\x85\x16\x78\x1c\x97\xab\xec\xba\xaa\xb4\xb9\x8e\xc4\x05\xd4\xeb\x13\x69\x77\x16\x52\xab\x03\xc7\x90\xef\x2a\x0b\xe4\x02\xa3\x21\xd1\x03\x34\x36\x2b\x4c\x4f\xa7\x9c\xda\x02\xa8\x47\xad\x42\x2e\x78\xfa\x49\xea\xb1\xab\xa2\x59\x90\x44\x25\xd9\xd4\x20\xa0\x7a\x40\x83\x34\xaa\xa0\x83\x4b\xde\x2b\x15\x07\x69\xea\x58\x60\x58\x81\x47\x27\x57\x00\xb5\x45\x4e\x9c\x02\xdd\xeb\x28\xe2\x23\x39\x45\xc5\x63\x88\x37\xbe\x66\x64\xa1\xe6\xae\xe1\x2f\xe1\x6b\xcc\xc9\xaf\x6e\xe1\x18\x10\x4b\xd1\x90\xe2\xbc\xed\x2f\xa7\x78\x11\x64\xa4\x8a\xfe\xe8\xd5\x2d\xda\xe8\x73\x2d\x8f\x49\x89\x88\xac\x0e\x20\x5f\x18\x89\xa8\xa7\xe0\x1b\x35\x5d\xef\x64\x62\x65\x08\xfb\x96\x95\x56\xf3\xca\x80\x11\x86\x47\xf1\x45\xe9\x34\x4d\xd2\x21\xba\x70\xe3\x69\x8d\x13\x10\x93\x9b\xac\xea\x08\xd0\x4f\xb8\x14\x02\x00\xb8\xca\xa8\x81\x3b\xa4\xbd\x00\x01\x0a\x0a\xc0\x07\x99\x04\x4d\x60\x4d\xd9\x03\x9c\xbd\xd1\x48\x69\x37\xcd\xe2\x94\x9e\xad\xc3\x1c\xf1\xb7\x87\x08\x84\x18\x04\x82\x8d\x78\x8c\xfb\x41\x8e\x47\xa4\xb2\x41\x17\xf2\x0c\xaf\x00\xa0\x7f\x4a\x21\xe1\x00\x9b\x05\x7e\x42\x34\x41\x24\xfe\xd4\x38\x76\xe7\x51\x21\x39\x26\x71\xf5\x40\xbe\x97\x40\x49\xc0\x42\x33\x91\xc5\xdb\x07\x76\x39\xc2\x36\x2c\x2a\x10\xc7\x04\xec\x6d\x67\x8f\x45\x7f\xc2\x4e\xac\xde\x4f\x59\x22\x21\xcb\x48\xc1\xcd\x00\x25\xcc\xf0\x11\xaf\xd2\xbb\x5a\x78\x9e\xfa\x95\xa7\xdb\xd0\x50\x0d\x2b\x74\xf4\xe6\x0d\xf2\xef\xcb\xbc\x35\x26\xa2\xa4\xe3\xf3\x5b\x1d\x99\x26\x5e\xeb\x72\x3c\x4a\x26\xca\x99\x48\xd9\x18\xcf\x95\xdd\x1a\x1b\x85\x31\x83\x3b\x16\x3e\xe8\xd2\xa9\x9f\xd6\xf4\xf7\xbe\xc5\xb7\xf9\x36\xdf\xe6\xdb\x7c\x9b\x6f\xf3\x6d\xbe\xcd\xb7\xf9\x36\xdf\x72\xdf\xff\x02\x4f\xe5\x59\x22\x00\x68\x01\x00\x01\x00\x00\xff\xff\x47\x73\x25\x81\x15\x45\x00\x00")
func chartsTraefik103001TgzBytes() ([]byte, error) {
return bindataRead(
_chartsTraefik103001Tgz,
"charts/traefik-10.3.001.tgz",
)
}
func chartsTraefik103001Tgz() (*asset, error) {
bytes, err := chartsTraefik103001TgzBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "charts/traefik-10.3.001.tgz", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
var _chartsTraefikCrd103001Tgz = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x00\xe4\x1c\x1b\xe3\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\x03\xed\x3d\xdb\x72\xdb\xb8\x92\xe7\x39\x5f\xc1\x9a\x79\x98\x64\xcb\x92\xe3\x5b\xb2\x9b\x37\xc7\x4e\x76\x52\x93\x64\x5c\x96\x77\x66\xb7\xce\x39\x55\x03\x91\x90\x84\x35\x45\x72\x79\xb1\xa3\xd9\xda\x7f\xdf\xee\x06\x49\x91\x14\x71\xd1\xc5\xb2\x3d\x41\x3f\x24\x32\x09\x34\x81\x46\xa3\xd1\xdd\x68\x34\xf2\x94\xf1\x89\xb8\x1d\xf8\x69\x70\xf8\xb7\x07\x82\xd7\x00\x6f\xcf\xce\xe8\x7f\x80\xee\xff\xf4\xfb\xe8\xe4\xed\xdb\x93\xd3\x37\x27\xf4\xfc\xe8\xf5\xd1\xe9\xf1\xdf\xbc\xb3\x87\x6a\x50\x13\x8a\x2c\x67\xa9\xe7\xed\xe3\x53\x4f\x11\xf2\xc6\xf8\x5f\xcc\x58\x9a\x0f\x17\x6c\x1e\xee\xf6\x1b\x38\xc0\x6f\x4e\x4f\x95\xe3\x7f\x7c\xf2\xb6\x33\xfe\xc7\xaf\xdf\x1e\xfd\xcd\x7b\xbd\xdb\x66\xf4\xc3\x77\x3e\xfe\x2c\x11\xbf\xf1\x34\x13\x71\xf4\xce\xbb\x3b\x7e\x71\x57\xfd\x3e\x7a\x3d\x3c\x19\xbe\x7e\x7d\xf4\x22\xe0\x99\x9f\x8a\x24\xa7\xa7\x9f\x22\xa0\x56\x18\x66\x5e\x3e\xe3\xde\xc5\xf5\x65\xe6\x4d\xe2\xd4\x2b\x79\x68\xf8\x22\x62\x73\xfe\xce\x6b\xb0\xd4\x8b\x7c\x91\xc0\x13\x96\x24\xa1\xf0\x19\xe2\x78\xc1\xa2\x28\xce\xe9\x67\xf6\xee\x85\xe7\x4d\x42\xce\xf3\x21\xbc\xcb\x43\x3e\x14\xf1\xe1\xb8\x88\x82\x90\x0f\x44\xf0\xce\xbb\x3d\xc9\x5e\x3c\x36\x79\xfe\xf2\xd0\x9c\xff\xd7\x1f\xce\x2f\xbf\x7c\x18\xce\x83\x1d\x7f\xc3\x38\xff\x8f\x4e\x3b\xf3\xff\xe8\xf4\xe8\x8d\x9b\xff\xfb\x80\x1f\x5b\xd3\xf5\xdc\xbb\x66\x91\x3f\xe3\xa9\xe7\xe3\x5a\x00\xd3\x9c\xe5\x9e\x58\x99\xf4\x45\xc6\x03\x6f\xbc\xf0\xfe\x5e\xd6\xfd\xe7\xcb\x59\x9e\x27\xd9\xbb\xc3\xc3\xa9\xc8\x67\xc5\x78\xe8\xc7\xf3\xc3\x54\x62\x3a\x0c\xf8\xdd\x80\xb0\x65\x87\x79\xca\xf9\xe1\x9c\x65\x39\x3c\x4e\x98\x7f\xcb\xa6\x1c\x1f\x12\x8e\x57\x43\x37\xd9\x1f\x01\x9a\xf3\x3f\xe7\xf3\x24\x64\x39\x8c\xc9\x6e\xbf\xb1\xbe\xfe\x07\x12\xe1\xb5\xd3\xff\xf6\x01\xfd\xe3\x2f\xa2\x69\xca\xb3\x2c\x8d\x8b\x9c\x6f\xaf\x10\xea\xe5\xff\xd1\xdb\x93\xa3\xb3\xce\xf8\x9f\x9d\xbd\x39\x75\xf2\x7f\x1f\xd0\xd4\xff\xe0\x37\xff\x96\xf3\x08\xff\xca\x86\xb7\xff\x9a\xa1\x42\x76\x77\xf4\xe2\x56\x44\xa0\x8e\x5d\x00\xa5\xe2\xf9\x35\xcf\xe2\x22\xf5\xf9\x25\x70\x4d\x24\x48\xa3\x9b\xf3\x9c\x05\x2c\x67\xa8\xce\x75\xb4\x3b\xcf\xf3\xe3\x28\x4f\xe3\x30\xe4\xe9\x60\xca\xa3\xe1\x6d\x31\xe6\xe3\x42\x84\x01\x4f\x09\x79\xad\x7a\xbe\x1e\x9e\x0e\x8f\xa0\x86\x9f\x72\xaa\x7e\x23\xe6\x1c\x46\x66\x9e\xbc\xf3\xa2\x22\x0c\xe1\x8d\x54\x2e\x9b\x9c\x99\x0d\x2b\xcd\x13\xbf\xc2\x44\x14\x0f\x8b\xec\x45\x96\x70\x1f\xbf\x3d\x85\x32\x49\xad\x8d\xb6\x8a\x48\x64\x65\x0b\x65\xef\x3e\x49\xbc\xd7\x88\x97\x1e\x87\x22\xcb\x7f\x59\x79\xf5\x19\x9e\xd2\xeb\x24\x2c\x52\x16\x76\xda\x43\x6f\x32\x78\x54\x84\x2c\x6d\xbf\x83\x57\x99\x1f\xa3\x32\xfc\x15\x3f\x0d\xab\x1f\x0f\xe0\x59\x49\x00\x6a\xca\xa0\xec\xe2\xdd\x11\x0b\x93\x19\x3b\x92\xd8\x60\x0d\x9d\x33\xd9\x52\xcf\x03\x04\xd1\xf9\xd5\xa7\xdf\x4e\x46\xad\xc7\x9e\xd7\xd1\xd2\x97\x0d\xf6\x44\x06\x83\x52\x3d\xc2\xe5\xdb\x43\x02\x89\x49\xa9\x8f\x0f\x6b\x14\x49\x0a\xd8\xd3\x5c\x54\x74\x91\xd0\xe0\x8f\xc6\xd3\xce\x07\x7f\xc2\x36\xc9\x52\xf0\x02\x18\x83\x4b\x65\xa1\xec\x1d\x28\x0b\xb2\x1b\x5e\x3c\x81\xe7\xd0\xa2\x94\x27\xd0\x1c\x1e\x49\x56\x69\x21\xf6\xb0\x10\xb4\x38\x1e\xff\x37\xf7\xf3\xa1\x37\xe2\x29\xa2\xf1\xb2\x59\x5c\x84\x01\xf2\x13\xfc\x99\x03\x06\x3f\x9e\x46\xe2\xcf\x1a\x37\x7c\x31\xa6\x8f\x92\x04\xcb\x3b\x38\x45\x04\x3a\x47\xc4\x42\xef\x8e\x85\x05\x3f\x80\x0f\x04\xde\x9c\x2d\x00\x0d\x7e\xc5\x2b\xa2\x06\x3e\x2a\x92\x0d\xbd\x2f\x71\x0a\xe4\x8b\x26\xf1\x3b\xaf\xa1\xde\x54\xf3\x02\x54\x9c\x79\x01\x33\x60\x71\x48\x2c\x2e\xc6\x45\x1e\xa7\x19\x6a\x3b\x3c\x3c\xcc\xc4\x74\xc0\x52\x7f\x26\x72\xc0\x5e\xa4\xfc\x10\xc8\x38\xa0\xa6\x47\x34\x37\x40\xc7\xfe\x31\x2d\x67\x52\xf6\x53\xab\xad\xd2\x62\xca\x00\x63\x34\x6d\xbc\x20\x2e\xd5\x8c\x00\xb2\x2a\x8d\x75\x59\x55\xf6\x62\x49\x68\x7c\x84\xd4\xb9\xfe\x30\xba\xf1\xaa\x4f\xd3\x60\x74\xa9\x4f\x74\x5f\x56\xcc\x96\x43\x80\x04\x03\x7a\x80\x76\x48\x83\x38\x49\xe3\x39\xe1\xe4\x51\x90\xc4\x40\x61\xfa\xc3\x0f\x05\xd4\xea\x20\xcd\x8a\xf1\x5c\xe4\x38\xee\xff\x03\xa4\xcd\x71\xac\x86\xde\x05\x09\x0b\x6f\xcc\xbd\x22\x01\xf9\xc1\x83\x21\xb0\x29\x3c\x9d\xf3\xf0\x82\x65\xfc\xc1\x07\x00\x29\x9d\x0d\x90\xb0\x76\x43\xd0\x94\x73\xdd\xc2\x92\x6a\x8d\x17\x95\x14\x52\x8c\x57\x73\x8a\x8e\xa0\x68\x39\x74\xcd\x99\x49\xa6\x35\x5b\x29\xd9\xa1\x6c\x35\x94\xc3\xd6\xf3\xfe\xb9\x8c\x00\xfd\x4f\x17\x57\x38\x5a\x2b\xaf\x60\x96\x80\x06\xd0\xf3\x58\x49\x91\xe6\x4b\x96\xa6\x6c\xd1\x6d\x1c\x49\xc5\x35\xbe\xd3\xa2\x91\x94\x5f\xa5\xdc\x96\x02\x25\xe3\x39\x8a\x87\x52\xfa\xf7\x20\x50\x77\x5c\xc2\xea\x34\x5a\x02\x8f\x8a\xb9\xea\xdd\xc0\xbb\x2e\x42\xae\x78\xa9\xa5\x0e\xc2\x9c\xe5\xfe\x4c\x85\xda\x5c\x5b\x04\x41\xc8\xef\x59\xaa\xea\x94\x86\xa0\x12\x5a\x64\xfd\x52\xa3\xbb\xe6\x13\xc9\x77\x29\xfc\x28\xa5\xe7\xf2\x6d\xcd\x5a\xbd\x84\x96\x60\x22\xb7\x04\x5a\xd6\x34\xef\x2d\x68\xb0\x44\x44\x4b\xe7\x0e\xb0\xa1\x28\x12\x29\x57\x72\x43\xb5\x1e\x2b\x5f\x2b\x26\xfe\x6a\x91\xbe\x99\x21\x21\x49\x45\x9c\x82\x04\xd3\xb3\x06\xae\x5d\x53\x9e\xf6\x96\xc9\x40\x34\x0b\x7f\x37\x8c\x31\x92\xb8\xea\x15\x1c\x16\xe1\x22\x01\x2a\x72\x36\x47\xee\x80\xb1\xfe\xb6\x40\x75\x6a\x02\x12\x6a\x5b\x96\xd0\xcd\x43\x09\xba\xd9\x28\x61\x50\xb5\xd8\x50\xea\x46\x6a\x80\x36\x85\xd7\xe0\x43\x7d\xdb\x5a\x84\x45\x8d\xaf\x9e\x68\x3c\xe5\x11\xae\xbc\x31\xfc\xf9\x0b\x68\xc3\x69\xc4\x41\x96\x59\x75\xa5\x5e\x9c\x5f\xca\xa5\x21\x8c\x59\x30\x18\xb3\x90\x01\xc2\x14\xe5\x62\x26\x17\xea\x57\x07\x1e\x7a\x65\xf1\x0b\x6b\xf4\x7d\x89\xbd\x64\xaa\x36\xfe\x03\x90\x43\x69\x1a\x23\x65\x0e\x3c\x9e\xfb\xaf\x86\xde\xcd\xcc\x84\x31\x10\x13\xea\x70\x2e\xe4\x92\x36\xe6\xf9\x3d\xe7\x11\x49\x9a\xfc\x3e\x46\xa2\x94\x6b\x1e\xe8\x5d\x82\x9e\x1b\x30\x92\x9e\x03\xc5\xc3\x40\xcd\x82\x08\x8f\x20\x51\x80\xf7\x59\x96\xfd\x1c\x67\xf9\xcf\x9c\x81\x81\x63\x83\x72\x1c\xc7\x21\x67\x5d\x0d\xb8\x85\x33\x4e\x73\x3d\x26\x16\x2d\x7e\x9d\x98\xa6\x8a\x59\x94\x74\xcb\x1a\x7b\xeb\x79\xdf\x06\xb7\x35\x07\x0f\x00\xf7\x20\x4e\x07\xb2\x1a\xda\x5d\x85\x6e\x2c\x61\x71\x49\x40\x0f\xe3\x1f\xe3\x14\x16\x9b\x00\xab\xd8\x4f\xa8\xeb\x95\xca\xde\x2c\x0e\x83\x0c\x95\x85\x89\x98\x82\x65\x56\x29\x50\x06\x6e\x42\x3e\x9c\x48\x24\xd2\x36\xe1\x75\xc3\xf4\xec\x65\x27\xe5\x10\x26\x61\x91\xcd\x3e\xa1\xf9\x01\x1a\xb9\xa9\xb0\x35\xa7\x59\x2d\x3f\x08\x64\x1c\xed\x86\xbb\x4b\xe9\x02\x22\x25\xca\xcc\x5c\x69\x8b\x34\x17\xfe\xad\x72\x01\x94\xd0\x5e\xa5\xa8\x42\x39\xde\xa4\x13\xca\x07\xad\xa1\xdf\xd5\xe0\xf9\x71\x7c\x2b\x0c\xd4\x5b\x69\xe1\x05\x55\x32\xb4\xd0\x88\xd2\xf3\xc6\x0c\xdd\xec\xc0\xc7\xb2\x15\xfa\x3e\xad\xd7\x2f\x04\x34\xa9\x7e\x8d\x42\x03\xed\x2b\xb0\x15\x57\x15\x98\x97\xc8\x36\x66\x0b\x8e\x97\x90\x01\xe2\x11\xa8\x35\x0f\x83\x9c\xfb\x60\x2c\x3e\x04\x45\x2c\xa7\xab\xfd\xbc\x06\x45\x0c\x64\xb9\x61\xf4\x2c\xbb\x7f\xcf\xc5\x74\x66\x98\xcf\x2d\x16\xff\x9d\x2a\x54\x3e\x99\x18\xb8\x08\xcd\xf8\xe5\x52\x7e\x3f\x83\x45\xfe\xab\x4e\x7b\x96\x50\xeb\x43\xd9\x8a\xae\x52\x6b\x23\xe8\xaa\x01\x5d\x06\xf0\x27\x29\xe0\xcf\xf8\x81\x49\x8d\x89\xb8\xdc\xb8\xe2\xf3\x31\x0f\x10\xb3\x6c\x2e\xb4\x0b\x4c\xca\x08\xff\x1d\x8b\xe8\x95\x8d\xfe\x60\x5a\x2f\x1f\xdf\x88\xd0\xb5\x60\x40\x6a\x76\xef\x0b\xb2\x48\x95\x86\xbe\xa2\x3d\xea\x96\xe4\x61\x8f\xc8\x69\x71\xcc\x0f\x37\x9f\x47\x6d\x5b\x9e\x1e\xa0\xb8\x22\x87\x07\xef\x2e\xde\x72\x39\xee\xeb\xb1\x34\xfe\xbd\x9b\x18\x6c\x04\x36\x0e\xb9\xf7\x99\xe7\x3f\x65\xde\x87\xc8\x4f\x17\x49\x7e\x80\xbb\x93\x68\xbb\xf0\x79\x92\x2f\xe8\x2b\xc0\xff\x85\x0f\x2f\xf8\x70\x3a\x84\x21\xed\xc1\xf9\x5f\xe7\x5f\x3e\xbf\xf3\xfe\x11\x79\xff\xc8\xa9\x2f\xde\xff\xfe\x9f\xf7\x23\x14\x0d\xc1\x10\x42\x05\x01\xc8\xd5\x7c\x0b\xff\x93\xac\x48\x79\xfe\x55\x21\xe7\x7e\xf4\xc6\x61\xec\xdf\x96\x95\x7f\x58\x29\xa1\x97\xd5\x48\x17\xf4\xb4\x87\x77\x2a\x35\xd2\x30\xb3\x83\x78\x8e\x84\xee\xaf\xab\x35\x0a\x5b\xa3\x76\x49\x68\xca\xa5\x8c\x95\x58\x89\xa3\xbd\x7b\x91\xcf\xbc\xd1\xf9\x57\xa5\x77\xc0\x66\x35\x42\x74\x3a\xb1\x63\x25\xbf\x32\xa6\xea\xa8\x04\x83\x0d\x6c\xfd\x1d\xb3\x3d\x6f\x31\x9f\xf5\x28\xe2\xa4\xb1\x75\xd2\x85\xd6\xc8\xfc\x2a\x4b\xf6\x9a\x95\xc0\xf3\xf2\xf5\x81\x94\x83\x95\x5c\xee\xba\x7a\xeb\x36\xc1\x6c\x4c\x58\x0a\xa3\x9a\xa3\x9b\xb7\x54\x84\xcb\x09\x1b\x41\x4f\x94\x1a\x95\x79\x8c\xf5\x7a\x80\x05\xdd\x2d\x2c\x34\x23\x16\xbd\x98\xd6\x88\x68\xc3\x70\xea\x45\xc0\x8a\x7b\xa5\x2a\x8c\x83\x86\x14\xa6\x79\x54\x9b\x1d\xe5\x10\x06\x0d\xaf\x80\xa2\xc7\x12\x13\x8e\xb5\x1c\xd9\x85\x74\xbc\x2f\x65\x29\x7c\x18\xe4\x6c\xa8\x98\x9a\x06\x72\x65\x79\xac\x52\x81\x3a\x8a\x38\xb9\xe7\xfb\xf9\x8f\x5e\x6e\xcd\x7e\xd4\x14\xc7\x79\x8a\x85\xb8\xe7\x65\xff\xe7\x06\x5e\x63\x7b\x52\x8b\x63\xb5\xfe\xa0\xde\xf6\x68\x3c\xca\x9a\xfb\x10\x2b\x88\xc8\x4e\x0c\x1a\x1e\x00\x1c\x46\x36\xe5\xe5\x93\x2c\x67\x79\x41\xa3\xc6\x7c\x9f\x27\xa0\x96\x7d\xed\x6e\xc6\xfe\x20\xd7\xcb\x6a\x8f\x95\xfe\x04\x51\x14\x08\x29\x1d\xbd\xbf\xff\xf3\x45\xc9\xa7\xc1\x6f\xd5\x06\x2a\x3e\x7c\xec\xcd\xf4\x67\x08\xe6\xf8\x8f\xdc\x4f\xb6\x0c\x01\x31\xc4\x7f\x9c\xbc\x59\x89\xff\x7b\x73\xec\xe2\x7f\xf7\x03\xcf\x39\xfe\x03\x38\xf3\xa1\x42\x40\x6e\x2e\xae\xe8\x4d\x7f\x14\x08\xbc\xd5\x06\x82\x60\xc3\xa4\xe4\xeb\x8d\x05\x81\xd7\x2f\x1e\x29\x1c\x04\x5a\xee\x22\x42\x08\x5c\x44\x88\x8b\x08\x79\xaa\x11\x21\x30\x4b\xed\x83\x42\xca\xc2\x1d\xfa\xfe\xd5\xe3\x42\x50\x90\xed\x36\x34\xe4\xc1\x83\x34\x14\x51\x18\xd9\xb2\x1f\x0d\x77\x28\x88\xae\x65\x91\x6a\x35\xea\x03\x63\x94\xc6\x3a\x21\x00\xbf\x12\xd3\x5e\xd7\xe6\x1c\x31\x20\xac\xdb\x3c\x15\xfe\x8a\x95\x27\x57\x55\x8d\xbb\xa4\x9f\x07\x9b\xe0\xe2\x47\xb6\x8a\x1f\x79\x80\xe8\x0f\x9c\x57\xfb\x08\x00\x79\x92\x63\xfa\x97\xdd\x6d\xa7\xa1\xbb\x4a\xe3\x3c\xf6\x63\xc3\x26\x74\x8b\x29\xae\x9a\xf5\x1a\x3b\x9a\xed\xe7\x0f\xb2\xf5\x7a\xd7\xa7\x63\xf6\x81\x3d\xad\x2d\xb7\xd6\x40\x2f\x9c\x8b\x88\x3a\x73\xc9\x43\x66\xb5\xc5\x66\xfe\xbc\xcd\x1e\xdb\x9e\x36\x9d\x06\xc4\xe6\x0f\x27\x98\xf4\x7b\x52\x8f\xb2\xf5\xb4\xa2\x2e\x18\x77\x9f\xfa\x5a\x88\x7e\xda\xad\x76\x9f\x7a\x90\x8a\xc8\xed\x3e\x35\x6a\xba\xdd\x27\x43\x31\xb7\xfb\x54\xc3\x77\xbd\x07\x20\xa3\x0d\xf3\x19\x08\xa4\xa9\xc2\x6c\x31\x45\xa8\xb8\xed\x2b\xb7\x7d\xe5\xb6\xaf\xbe\xef\xed\x2b\xf3\xfe\x4f\x11\x3c\xec\xfe\xcf\xeb\xb3\xb3\xe3\xe3\xd5\xfd\x9f\x63\xb7\xff\xb3\x0f\x78\xce\xfb\x3f\xc0\x99\x0f\xb5\xff\xf3\x1f\x97\xba\xfd\x1f\x78\xab\xdd\xff\xc1\x86\x49\xd1\xd1\xbb\xff\x03\xaf\x5f\x3c\xd2\xfe\x0f\xb4\xdc\xed\xff\x10\xb8\xfd\x1f\xb7\xff\xf3\x54\xf7\x7f\x60\x96\xda\xef\xff\x94\x85\x3b\xf4\xfd\xab\xef\xff\xa0\x20\xdb\xed\xfe\xcf\x03\x38\xd3\xb1\x91\xce\x99\xae\x81\x67\xe9\x4c\xff\x8e\x3c\xb9\x1b\xf9\x64\x9d\x15\xf7\x8c\xa0\xdf\xfe\x6b\xec\x66\xef\x20\x1f\xa8\xde\xfe\x3b\x3b\x3d\x7d\xdb\x8d\xff\x3b\x3b\x39\x75\xf6\xdf\x5e\xe0\xb9\xd9\x7f\x4d\xce\xdc\x9d\xe9\xb7\x0c\xb5\xe8\x58\x7d\xcb\x17\x2b\x06\x5f\xa3\x25\x1d\x5b\x6f\xde\xc4\xb6\x37\x33\xaf\x91\xb9\x43\xa9\x37\xf6\x64\xf7\x70\xe6\x9e\x33\xf7\xda\xd4\x77\xe6\xde\x1e\xcc\xbd\xe5\x4c\x24\x4b\x6f\x19\x5a\xd1\x98\xa2\x9a\xb8\x0a\xb5\x5d\xc0\x82\xe0\x2a\x85\x29\xf8\xcd\xb0\x2b\x7e\x5e\x95\x6b\x7c\x7b\xf9\xcc\x10\xd2\xa1\x37\x4b\x12\xc5\xf7\x35\xa4\xd5\x92\x12\x61\xcc\x32\xe1\x9f\x17\x79\xcf\x56\x57\xab\x5b\xef\xab\x72\x8d\x6e\xfd\x7c\x73\x73\x25\x11\x78\x0c\xde\x20\x07\x94\x52\x71\xab\x6e\xce\x28\xcb\xc7\x47\xcc\x45\xb2\xd1\x2e\x39\xac\x74\xa1\x22\xc5\x8d\xb1\xe6\x3c\xbe\xe3\xba\x2c\x23\x76\x5b\x7f\x3b\x1e\xa2\x02\x93\xbd\xf4\x66\xf2\x68\x0f\x51\x55\xae\x31\x44\xa5\x5c\x38\xac\xb2\x6f\x2c\x91\x6d\x37\x46\x73\xf6\xed\x5a\xa2\x7e\x1f\x07\x8b\xf7\x8b\x5e\xb7\x03\x82\x8c\x94\x20\x93\xed\xcd\xa9\x86\x2c\x6a\x93\x8e\x3e\x25\x5b\xff\xf0\xdf\xe2\xf3\xbd\x75\x0b\x3f\xb5\xa7\x6e\x01\x4b\xa6\x8b\x0f\xdf\x70\xe1\x51\xc7\x7e\x6d\xca\x9e\xfe\xac\x37\xdc\xa3\x9d\xba\x63\xd6\x0c\x38\xa1\x1a\xa8\x8d\x34\x95\xce\x75\x19\xd0\x14\x17\x6c\x1f\x0e\xb3\xbb\xc4\x6d\x36\x5e\x25\x93\x4f\xc9\xca\xd3\x62\xe5\x4f\xb2\xc0\x64\x72\x94\x68\xdd\x24\x5b\x45\xc5\xe8\x18\x4a\xa4\x7e\x21\xf2\xf7\x20\xc7\x6f\xfb\xe4\x70\x9b\xb3\x5a\x85\x1b\x92\xaf\xc4\xe2\x8d\xcb\x37\x5b\x89\x3b\xfe\x50\x73\x27\x9e\x13\x62\x53\x27\xcb\x62\xcd\xee\x55\x8f\xb6\xec\x97\x1f\x16\x01\x0f\x2e\x40\xd1\x83\xe5\xfb\x06\x15\xb7\x0d\xa6\x93\x91\xd3\x36\xe5\x84\x65\xb3\x8c\x14\xaa\x4b\x36\x44\x03\xf0\x2f\x98\x67\x40\x1a\xcc\xbc\x8f\x6a\x32\xa8\xb5\x20\xdd\xbd\x3f\x40\x5d\x89\x2f\x39\xaa\xb0\x7f\xf4\xb4\x57\xc6\x6b\x95\xae\x30\x0a\x51\xc1\xc4\x2b\x84\x04\x84\x42\xc8\xa5\x3e\xfe\x47\xf9\xcd\x01\x7e\xf4\x8f\x52\x6b\x39\xf0\x44\x5f\x84\x23\xf0\xe1\x0c\x0c\x29\xa9\x95\xf3\x88\x9c\xea\x63\x19\x9f\x33\x66\xfe\x2d\xe8\xf9\x07\xa8\xae\x63\xb3\x40\xd4\x83\x16\x15\x86\x0b\x2a\x44\x81\x33\x64\x71\xf4\x60\x85\xef\x89\x3b\xb0\xab\x6a\x7b\xa1\xa4\x57\xb6\x92\x73\xcb\x3b\x47\xa9\x46\xbe\xf1\x03\x7a\x03\xc6\x24\x2b\xc2\xbe\x69\x3b\xe6\x33\x76\x27\x80\x6c\xa5\x5d\x38\xe6\xb2\xd3\xec\x8e\x53\x4d\xd9\x4f\x8f\x85\x60\x7e\x1e\xc0\x17\xa6\x2c\x05\x6a\x67\xf4\xcd\xfb\x19\xeb\x43\xd9\xe8\xa6\x17\x88\x40\x06\x19\x82\xbd\xe1\xfd\x1c\xdf\x83\x51\x91\xca\x26\x81\xe5\x03\x76\x06\xe8\x92\x65\xdb\xbc\x7b\x69\x7a\xb2\xf0\x9e\x2d\xfa\x82\x84\x90\x5a\x83\x80\x46\x91\xcc\xce\xac\x1c\x98\xb2\x85\x62\x82\x64\x47\x24\x91\x08\xa5\x61\x0a\x7f\x83\x78\x9f\xc6\x64\xb3\xc5\xbd\x9d\xf7\x6e\x79\x52\xde\xcf\x70\x4f\x56\x19\x70\x50\x20\x07\x3e\x2b\x12\xf4\x0a\x63\x24\x2a\xd8\x6c\x7e\x91\x62\x02\xbe\x10\x4d\xdd\x70\x81\x18\x7b\x93\x4f\x61\x37\x6f\xd0\xa8\x6b\x70\x25\xff\x26\xa4\xa9\x56\x85\xba\xca\xb1\x03\x7c\x7e\xbe\xa4\x7f\x01\xea\x74\xd8\xd7\xed\x1c\x47\x23\xcb\x9b\xe3\x48\xb9\x79\x7c\x86\x89\x00\x71\x75\x8d\xa6\x32\xed\x1f\xf3\x26\x05\x1a\x69\x95\xbf\x60\x5d\xf1\xb0\x9c\x26\x9b\x68\xc3\x9a\x89\x1d\x88\x29\xe8\x58\x16\x66\xc7\x65\x5d\xb0\x21\xfb\xe4\x43\x69\x7e\x38\xc3\x43\xc2\x43\x18\x1e\x1c\xf3\x52\x9a\xd6\xa6\x0f\x58\xe8\x8a\x4d\x9b\x89\xd9\x7c\x72\x69\xca\xfa\x5e\x82\xef\xb6\x1a\x17\x10\xd9\xa9\xe2\xb4\x80\x29\x68\x51\xee\x17\x5a\x85\x61\x6e\x9b\xa4\xd5\xac\xfc\xe9\x93\xb3\xea\x13\xb3\x9a\x93\xb2\x5a\x27\x64\xb5\x8c\x71\xb4\xdc\x92\x5d\x23\x09\xab\xf9\x88\xc8\x03\x24\x60\xdd\x20\xf9\x6a\x37\xbd\xaa\x06\xbb\x45\xe2\x55\xab\xd4\xaa\x7b\x8a\x3b\xb5\x4f\xa5\x6a\x93\x85\x4f\xbf\x0f\x6d\xd8\x83\xb6\xdd\x7f\xb6\xdc\x7b\xde\x70\xdf\x79\x9d\x74\xa9\x1b\xa6\x4a\x35\x24\xdf\x5d\x3b\x4d\xaa\x5d\xe8\xc2\x1a\xe9\x51\xad\xe8\x6b\xb1\xe5\x6d\x4a\x89\x6a\xf1\x1d\xfb\x54\xa8\x36\xc8\x0c\x29\x50\x77\x99\xfe\xd4\x6e\x50\x6c\xd2\x9e\xae\x9f\xf2\x54\x26\x34\xd5\x22\xf5\x6c\xd3\x9d\xda\x9f\x23\xb4\x4f\x73\x6a\x23\x4a\x2a\xb0\x4b\x6f\x6a\x1d\x8d\xb2\x4e\x5a\xd3\x75\x90\x5a\xa6\x33\xb5\xef\xb9\xd5\x11\x4a\x9b\x39\x68\x4c\x5f\x6a\xd1\x4d\x53\x20\xce\x66\x29\x4b\x1b\xa7\xf0\x35\xa4\x58\x23\x5d\x69\x37\x21\xa9\x16\xeb\x9a\xa9\x4a\xcd\x6b\xd3\x83\xe5\xbe\xab\x23\x68\x56\xe1\x51\x9c\x30\xe5\x02\x65\x61\xac\x7d\x5c\x96\x6c\x88\x2c\x14\x14\xf5\x2a\xb7\x4b\x73\x8d\x05\x41\xbf\xef\xcc\x82\x1a\xd8\x8e\xd2\xd3\x2f\x35\xa1\xbd\x53\xbc\x6a\x83\x5c\xee\x9f\x52\x23\xae\x81\xe7\x37\xd9\x65\x54\x9c\x19\x46\x68\x2f\x6a\xb4\x91\x8d\x07\xb9\x24\x93\xd0\x91\xae\x32\xa8\xa2\xcd\x11\x60\x55\x64\xe5\xbe\xf7\xa6\x36\x97\xcf\xe4\xa1\x49\x9d\x12\x64\x23\xa6\x7d\x36\xd2\x58\xd8\x4b\x2c\x5a\xc1\x8a\x67\xf1\x76\x80\x46\x44\x72\x01\x1a\xdd\x8a\xe4\x37\x9e\x8a\x89\x51\xdc\xeb\xfa\x66\x90\x47\xa0\x3c\x67\x79\x39\xb3\x37\x77\x50\x68\x3e\x32\x53\xb1\x7e\x8b\x69\x4a\xde\x5c\x75\x32\x94\xde\xbe\xed\x24\x89\x0f\x8b\x52\x76\x21\x43\xaa\xce\xc3\x30\xbe\xbf\x00\xc9\x4e\xf6\x9f\x15\x47\x9f\xeb\xea\xa3\x4d\x48\xcb\x22\x28\xe2\x22\x40\xb7\x24\xda\x23\xaa\xa5\x67\x02\x35\xc8\x9e\x16\xd3\x08\x43\x20\x75\x67\x35\xd5\xa3\xba\xda\x21\xad\x80\x31\x74\xa6\xa2\xfd\x1c\x28\x4e\x51\x25\x99\x34\x6e\x2b\x5b\x45\x25\x99\x62\xf2\x39\xf3\x49\x48\x1a\x42\xb9\x15\x2d\xbd\xbf\xf2\x23\x83\xf2\x2b\x83\x52\x14\x0f\xaa\x2f\x65\x5c\x31\xe1\x1f\x54\x14\xae\x74\xfc\x0b\xcf\x67\x71\xb0\x19\xd1\xca\xba\x7b\x20\x9a\xfc\xd2\x53\xa1\xd9\xaf\xa9\x98\x8a\x08\xa3\xfa\x36\x22\xdb\xb2\xba\x74\x2a\x61\xc8\x20\x05\xae\xe1\x4b\x74\x96\x2b\xfa\x11\x53\xbd\x8c\x82\x9f\xa0\x70\x46\x9a\x22\x03\xba\x85\x81\x4f\xe6\x35\xbd\xf7\x7e\xf8\x97\x1f\x9e\x16\x91\x34\x8b\xad\x35\xa5\x08\xc7\x46\xe4\xf2\xee\x53\x91\xe7\x1c\xfd\x14\x58\xa1\x8e\x65\xe3\x14\x71\xe9\x2d\x43\x06\xbc\x6c\x11\xe5\xec\x9b\xb7\xbc\x90\x3a\x0e\x59\x34\x1d\xc6\xe9\xf4\x30\xb9\x9d\x1e\xa6\xd8\x84\xe4\x50\xa1\x51\xef\x8d\xb8\xd0\xe0\xd8\xa0\x4c\xa9\xa9\xda\xaa\x8c\xd3\x29\x2b\x45\x76\xb9\x42\x69\xee\xbd\x31\x7b\x6e\xf6\x46\x82\x2f\xec\xdb\xf9\xd4\xc6\xe3\x7d\xbe\x5a\x4b\x76\x9a\x7c\x99\x62\x5e\xda\x56\x0d\x69\xa4\xf4\xa1\x49\x19\x85\x51\x8c\xb8\xff\xc4\xfc\x99\x6a\xdd\xda\x3e\x94\x05\x94\xfe\xdf\x58\xba\xd0\x69\x22\xdd\x70\xbc\x65\xf9\x2a\x6a\x3a\xa3\x55\x18\xba\x89\xaf\xea\xfd\xc2\xac\xbd\xff\xaa\xe8\x2d\x34\x80\x07\x87\x65\x60\xa5\xb4\x6e\x29\xc4\xcf\x20\xc9\x94\xd2\xd9\xb8\x94\x23\x2e\x1e\xa0\xcf\xf6\x11\xcc\x83\x71\x1a\xdf\x67\x3c\xfd\xcf\x2c\xfb\x28\xc2\x7c\xd3\xcd\xa9\x72\x5f\x7a\x84\x5a\xab\xc8\x17\x57\x71\x28\xfc\xcd\x36\x74\x1a\x11\x01\x5f\xe3\x2c\x12\x13\x85\x87\xd9\xd8\x22\x52\x1e\xdf\x97\xbd\x1b\x8d\x7e\xc3\x2d\xf6\xcd\x5a\x44\x98\x3e\x62\x9e\x8a\x32\x7b\xcb\xb6\xb8\x6c\xec\x52\xe0\x42\x21\xcd\x9a\x2b\xa3\x09\x64\xc9\x02\x4a\x0b\xa0\x6a\x95\x85\xa5\xba\xcf\x66\x4d\x38\xc3\xbd\xed\x2d\x98\x09\xa4\x91\xcf\x47\x37\xa3\x6d\xf6\x5d\x27\x38\xf0\x97\x3c\xd2\xb6\x40\x5d\x7d\x86\xd3\x9a\xb2\xbe\x3d\x9a\x07\x40\x64\x97\x18\xd9\x1d\x27\x73\x98\x58\x9b\xf5\x22\x29\xc6\x30\x08\xbf\xf0\xcd\x86\x81\xfc\x82\x29\x4f\xb7\x18\xc9\x2c\x0b\x3f\xe2\x60\xa2\x94\xb4\x58\x14\x7e\xba\xc4\x20\x7c\x4c\x89\x13\xbc\xa3\xc4\x62\xd7\x3c\x10\x29\x65\xe8\x24\x4d\x2a\xca\x72\x18\x8d\xe1\x4f\x1b\x11\x03\xda\xb2\xbf\x66\xe8\x28\x62\x66\xab\x7d\x4e\x57\x68\x50\xd5\xbd\x4d\x48\xf3\xa1\x3e\x47\x0d\xfc\x22\xd1\x50\x3e\xb9\xb4\x26\x1a\x1d\xe2\x51\xe9\xbb\x5b\x0f\xe9\x0d\x9f\x27\x71\x0a\xea\xc2\xf3\xed\x44\x9e\x7d\x8a\x28\xdc\x6f\x54\x8c\xb5\x99\xe4\x2c\x30\x5d\xa5\x1c\x37\xd2\x37\xae\x0f\xaa\x40\x1c\xa9\x2c\xeb\x6d\xb5\x44\x0d\x27\x8a\xe8\x23\xa9\xb2\xb0\xc6\x1a\x1c\x4e\x9f\x96\x25\xc1\xaa\xa2\x43\x36\x94\x9c\xab\x98\x8f\x65\x4c\x42\x7d\xe4\x66\xcc\xd1\x6e\x4a\xd2\x18\x35\xc0\xde\x8d\x38\x19\x9d\x86\x07\x67\x51\x81\xa9\x23\xc7\xd6\x76\x53\xcd\xe3\x42\x25\xa2\xb7\xd7\xac\x65\x6c\xf3\x05\x28\x68\x3c\x55\x06\xba\xb6\xf7\x4c\xdb\x35\xea\xd8\x19\x0c\x03\xf4\xfc\xfa\x31\x68\xbf\xe4\xfc\xe8\x8d\xb8\x43\xa0\x33\x85\x4b\x7a\xb2\xac\x34\x4f\x19\x1d\xae\xa2\xe8\x46\x86\x01\xaf\x73\xb4\x43\xe5\xe1\x3a\xef\xd3\xc4\x8b\xe2\x48\x35\x53\x30\xca\x0e\x34\xee\x56\xac\x23\xa5\x57\x8b\x69\x1e\x36\x4e\x46\xfc\x94\x51\xf0\x55\xce\xab\xed\x04\x95\x5f\x8e\xe2\x37\x3c\xd0\xec\xe5\xcf\x8c\xbe\x31\x2f\xf2\x82\x62\x35\x29\x90\x36\x13\x77\x1b\xa7\x25\x13\xc9\xc8\xb8\x75\xd7\xe6\xcf\xab\xaa\x42\xc3\x27\x2a\x92\x7a\x07\x70\xb7\x3b\xd7\x01\x4f\xfa\xb6\x7f\x9a\x60\x9b\x4e\xa0\x8a\x39\xfe\x74\x65\xd8\x63\xb6\xc8\xf0\xb8\xc6\xae\xad\x5e\x09\x6a\x96\xd1\x6e\xb0\xa6\x4d\xe5\x5c\x9d\xe2\xcf\xb2\x69\x15\x36\xa5\xbe\xb0\xc4\xb3\xb1\x0b\x5f\x27\x0d\x93\xdf\xf1\x20\x5f\xbf\xb3\xae\xc3\x6d\x75\xc9\x36\xbb\xdd\xe3\x63\xe9\x76\xda\xca\x05\x6f\xe2\xff\x9d\xf3\xbe\x99\xef\x0d\x3c\x6f\xc3\xef\x56\xbc\x6e\xe4\xf3\x35\x02\x74\xd4\xfc\x6d\x52\xcf\x48\xae\x5e\x63\xe8\xf0\x13\xda\x7b\xc6\x68\xb5\x9b\xcf\x23\xb9\x63\x78\xc1\xfb\x02\x82\xda\x79\xb5\xbb\xe5\x1b\xfc\x41\x99\x4f\xe9\x05\xed\xc3\xd5\x5e\xbd\xed\xd8\x16\x0f\xd1\x9a\x19\xb6\xd5\xa6\x32\x1b\xe7\x27\xa8\xda\xdc\xce\x92\x4d\xeb\xe4\x4f\x56\x6a\x82\x50\x77\x27\x4c\x2e\xb2\xac\xd0\xc5\x01\xae\x74\xe3\x73\xa7\x1b\x97\x5f\x95\x1d\xd1\xb0\x74\x2b\x2b\x29\x08\x0f\x3c\xe3\x2f\xb2\x19\x68\x09\x94\xfd\xb4\xa7\x7f\x9e\x3f\xa9\x0f\x2b\xe7\x20\x0b\xb3\xa1\xe0\xf9\x84\x1c\xd0\xb3\x7c\x1e\x1e\xa6\x13\xff\xe4\xed\xc9\xbf\x6d\xb9\xce\x49\x2d\x43\x2f\xd7\x11\x6c\x23\x7b\x7c\xd4\xd8\x54\xc1\xcb\xeb\x22\x93\x4a\x3b\x1e\x06\x02\xcd\x47\xa5\x08\xae\x8b\x34\x8c\x7d\x16\x8a\x7c\x47\x4d\x84\xe1\x60\x91\xf8\x93\x86\x6c\x37\x18\x61\xdc\xee\x44\x64\x97\x39\xc9\x8c\x0d\xb4\x70\xc1\xc2\xaf\xa4\xc4\xef\x02\xa3\x85\xc6\x10\xc5\xf9\xf9\x44\xe9\x3a\xb5\xfd\x14\x60\x79\xcf\x27\xca\x14\xbc\xb6\x68\xf4\x59\xb5\xad\x30\x58\x51\xd0\x0a\x53\x41\x64\x73\xb2\xa7\x6a\x97\x93\x3d\x5b\x61\x73\xb2\x67\xbd\x02\x09\xd7\x1e\x56\xda\x20\x94\x27\x09\x8b\x69\xdf\x59\x70\x5b\x3f\x63\x2b\x88\x9f\xd2\xa4\xa4\x77\x7c\x50\x44\xb7\x51\x7c\x1f\x0d\xa4\xdd\xad\x08\xe7\xd7\xb4\x0a\x6d\x82\xcf\xe8\xc1\x31\x28\x8f\xd7\x55\xb9\x66\xfe\x04\x46\xd6\x0d\x3c\x5c\x49\x99\x20\x53\xff\xf4\xf4\x62\x2a\xee\x78\x24\xb3\xb2\xa5\x6b\xbb\x78\xee\x38\x65\x53\x7b\x20\x1f\xcf\xb8\x48\x55\x96\xe6\xf6\xc8\x13\x74\xf7\x28\x5c\x82\x9a\xc3\x20\x36\x07\x41\x2c\x0e\x81\x6c\x70\x00\xc4\x79\xbc\x9c\xc7\x4b\x09\xce\xe3\x55\xc2\x5f\xca\xe3\x95\x36\x77\xd8\x4c\xeb\x41\x6b\x37\xae\x99\x53\x67\xb9\x75\xb3\x5d\x3e\x27\x0e\x22\x57\xad\x4d\x99\xa8\x90\x6e\x1c\xa7\x9d\xf2\x24\x64\x3e\x37\x6d\xf8\xae\x7f\x62\x38\x6d\x6d\x63\x59\xd2\x57\x16\x6e\x1e\x72\x92\x0f\x9e\x0a\x9d\xd5\x07\xd1\x4c\x7b\xb0\x9a\x03\x71\x9b\x53\x98\x86\xee\x8a\x19\x4f\x63\x5c\x2f\x4b\x36\x68\xdb\x7c\xba\x1d\x51\x7b\x5b\xb0\xa3\xae\xd9\x4d\xcf\x76\xf1\xfe\x4e\xca\x57\x5b\xf5\xf4\x49\xce\xb3\x5e\xc3\xae\x43\x9f\x3c\x5d\xb4\xc4\x16\xfe\xbd\x5d\x9c\x7c\x8e\xa9\x6c\x55\x71\x68\xa6\xe5\x90\xf2\x9a\xb2\x50\x7f\x28\xf5\x09\x2a\x8a\x9a\x71\xc0\x7a\x89\x55\x5a\xc0\xd1\xb2\x64\x63\x4c\x9a\x4f\xb7\x1a\x19\x19\xca\x14\xb2\x6c\xc3\x2b\xab\x64\x6a\xc1\x27\x95\xfc\xa7\x41\x5b\x1b\x71\x30\xea\x14\xef\xa7\xf2\xc3\x8a\x83\x47\xa0\x53\xef\x0b\x97\xe9\xfb\xd1\xc1\x98\xff\x3b\xf7\xb7\xbd\xfe\xc9\x74\xff\xd3\xc9\xdb\xd3\x37\xdd\xfb\x9f\x5e\x9f\x9e\xb9\xfc\xdf\xfb\x80\xe7\x9b\xff\x1b\x18\xf3\x61\x52\x80\x57\xb7\xad\xf7\x65\x01\x87\x77\x9a\x44\xe0\xd8\x24\x29\x82\x7a\x72\x81\xc3\xcb\x17\x8f\x92\x0e\x1c\xef\xbe\xb5\xc8\x08\x8e\xc5\x5c\x52\x70\x97\x14\xdc\x25\x05\x7f\x02\x49\xc1\x61\x32\x2a\xf3\x82\x97\x77\x59\xaf\x9f\x1a\xdc\x3e\x8c\x0a\x3e\xd1\x1f\x49\x45\xb2\x64\x77\xd1\x54\x4f\x31\x9a\xe7\xfb\xd2\x54\xfb\xf5\xbf\x32\xbf\x51\x5e\xe5\x37\xda\xee\x16\x18\x83\xfe\x07\x8a\xdf\x51\x57\xff\x7b\xf3\xc6\xe9\x7f\x7b\x81\xe7\xa6\xff\xad\x72\xe6\xee\x54\xc0\x51\x27\xab\x57\x47\x0b\xec\xbe\x5e\x51\x04\x57\xda\xd6\xd1\x05\xbb\xef\xf7\xa9\x0e\x76\x1b\xaf\xd6\x08\x57\x4a\x3a\xa5\xd0\x29\x85\x4e\x29\x7c\x34\xa5\xb0\x3b\x1f\x49\x2f\x94\xa9\xbf\x89\x91\x2b\x05\x8c\x72\xad\x63\x37\xcb\xd9\x5c\xa6\xfc\xec\x90\xb7\xcc\x5a\x46\x5c\x2d\xaf\xcd\x24\xec\xb6\x2a\x64\x23\x26\x2d\x93\x19\x82\x8c\x39\xe1\x1b\x35\x48\xc2\xcc\x6f\x3e\x8f\x56\xf5\xc4\x5d\x5f\x31\x1a\x88\x0c\xd3\x4a\x60\xee\xe5\x63\x43\x0b\x2f\x65\x51\xca\xd3\x7c\x78\x4c\x6d\x04\x9a\x46\x72\x47\x2f\x93\x29\x4d\xaa\xb4\xe0\xbd\xe4\x5a\xb6\xa4\xdf\x71\x3d\xa9\x73\x6f\xe2\xaa\x16\x17\x46\x9a\x55\xc5\xa8\x2d\xf5\x44\x28\xd1\x50\xbc\x46\x2b\x57\xb9\xb2\x51\x7a\xe5\x3b\x10\x2c\x2c\xbf\xf4\x58\x7b\x1c\xed\x5e\x43\x97\xe4\xd1\x31\x12\xfe\x94\xd9\x21\xf6\xee\x99\xc8\x65\x8e\x71\x0a\x35\xa9\xc6\x45\x1d\xb3\xc2\x3a\x74\xa9\xf2\x8d\xa3\x3a\x31\x0e\x29\x80\x92\x82\x54\xfe\xe4\x69\x7c\xe0\x45\x31\x7d\x09\x68\x50\xa6\x3b\xef\x8f\xc7\xd8\x20\x6a\x47\x80\xb9\x76\x01\xed\x7d\x6a\x24\x9e\xb3\x6f\x62\x5e\xcc\xcb\x30\x28\x62\x31\x30\xe7\xfc\x19\xae\xa4\xd8\x66\x99\xaf\xfc\x96\xf3\x64\xc0\x42\x71\xa7\x0a\xe4\x69\x0c\xc5\xbd\x08\x43\x8c\xd5\xa1\xdb\x52\x40\x1b\x01\x6a\x63\x04\x30\xc8\xf7\x18\x95\x1f\xbc\xd1\x80\x87\x93\x9d\x11\x36\x6d\x9d\xf0\x7f\x6a\xe4\x55\x70\xb0\xd4\xad\x24\x47\x52\x64\x93\x36\xbb\x52\x75\xe6\x83\x61\x40\xb6\x37\x29\x30\x90\x09\x53\xe0\x54\x4b\x73\x95\xca\xe4\xa5\xa0\x93\xb2\x25\x91\x41\xfc\x04\x8b\x03\x05\x4a\x81\xaa\xd2\xe2\xd5\x1e\x58\x5f\x7b\xac\xd5\x9c\x11\xae\x57\x36\x8f\x46\x9f\x5b\xf1\xd0\x77\x58\x79\xe5\xc2\xfa\x76\x0b\xfa\x85\x31\xb0\xff\xa7\x72\x66\x66\x57\x3c\xed\x0f\xf0\x69\xc7\x73\x51\x40\xdb\x40\x12\xad\xce\xcc\x92\x37\xa6\x12\x4d\x9b\x97\xcb\x29\xf3\xaa\x5f\x3a\x61\x01\x9c\x76\x03\xcc\xa6\xd0\x18\x88\x4b\x19\x06\xf7\x65\xb5\x65\x55\x84\x9e\xaa\x8b\xfd\xcc\x9b\xc6\x71\x7e\x71\x6e\xb9\x3e\x9f\x07\x81\x3c\x67\x34\x11\xd0\x09\x64\x53\x9c\xac\x03\xd0\x8a\x50\x19\x6f\xd0\xfc\xe1\xd7\x6c\x39\x3b\xfa\x43\xb7\x7a\x54\x22\x4a\xd6\x5a\xc6\x2f\xca\xbb\xc0\xfd\xbc\xa1\xd3\xa8\x68\xd6\xd3\xac\xef\xcb\xe5\xd3\x82\x7e\xff\x4f\x1e\x66\xa5\x86\xb9\x83\xeb\x7f\x4d\xfe\x9f\xb3\xb7\x27\x27\xdd\xfb\x7f\x8f\xcf\xdc\xfd\xbf\x7b\x81\xe7\xe6\xff\x69\x70\xe6\xee\x1c\x3f\x60\x91\xc8\x9c\x48\x1d\x8f\x4f\xfd\x7c\xc5\xd5\xb3\x6c\x46\xc7\xc7\x53\xbf\xd8\xa7\x73\xa7\x6e\xa7\xda\xab\xb3\x2c\xe2\xdc\x39\xce\x9d\xe3\xdc\x39\x8f\xe6\xce\xa9\x27\x22\xf9\x71\x6a\xe7\x8d\x4c\x2f\x4d\x73\x35\x02\xb2\x62\x08\x24\x51\xd6\xda\x31\x23\x92\x19\x4f\x47\x85\xe8\xbd\x9e\x73\xd7\x9a\x9a\x1c\x6a\x8b\x54\xeb\x17\x75\xc1\x96\x78\x48\x18\xa6\x23\xcb\x91\xb5\xca\xfb\x44\xca\x13\x91\xed\xbc\xeb\x3d\x0d\x86\x9a\x79\x55\x87\xce\xa0\xd7\x66\xe8\x41\x69\xe2\xac\xeb\x06\x59\xf6\xe5\x06\xfb\x6b\xb6\xf2\x2e\x5a\x15\x5a\xfd\xb2\xed\x45\x45\x5d\x4a\xa3\x9b\x24\x7d\x29\x76\x10\xd4\x57\x2d\x0d\xbc\xaf\xf1\xf2\xa4\xbe\xa2\x48\x99\xa0\xcf\x58\x4e\x5a\x62\xcb\x62\x9f\x26\xff\x8e\x47\xc3\x34\x58\x41\x2f\x3e\x8f\x82\x6e\xbd\xde\x0a\xa6\xd8\x73\x32\x52\x1a\x5a\x6f\x17\x3a\x8a\x7f\x55\x9a\xce\x0c\x61\x9e\x23\xfc\x5d\xdf\x49\x53\x5e\xdc\x10\x34\x2e\x75\x52\x50\x5f\x62\xa2\xeb\xf2\x68\xb5\x94\x97\x1c\xb6\x8e\xdc\xc2\xa4\x17\xa1\xc2\x28\x7e\x9c\x6b\x26\x0b\x90\xc6\x57\xcb\xeb\x29\x1e\x7e\x9e\x83\x81\xdb\xab\x03\x18\x50\xce\x45\xb4\x49\xb5\x84\xba\x26\xd7\x9c\x0b\xad\x3c\xd3\x19\xf8\x59\x24\x30\x90\xb6\xef\xc0\xb6\xba\xda\x77\x6b\x01\x2a\xed\x3f\x42\xb8\x13\xf3\xcf\x64\xff\x9d\x1e\x9f\xbc\xed\xda\x7f\xaf\xdf\x9e\x3a\xfb\x6f\x1f\xf0\x0c\xed\xbf\x92\x33\x77\x6a\xfe\x8d\x10\xe7\xaa\xf5\x47\x8f\xfb\x8c\x3f\xd9\x86\x55\xdb\x2f\x2b\xf1\xec\xd3\xf4\xa3\x46\x6a\x2d\x3f\x59\xc2\x19\x7e\xce\xf0\x73\x86\xdf\x63\x1a\x7e\x34\x0f\xbb\x76\x9f\x76\x86\x22\xa8\xcd\x97\xf2\xfc\x7c\x63\x8b\xdd\xb4\x9d\xb2\x52\xa1\x8c\xed\x64\xa5\x22\x2e\xf5\xe9\xf2\x36\xc8\x8e\x5f\xac\x0d\xfd\x6d\xd5\xb7\x97\xa8\x54\xab\xf0\xcf\x49\xdf\xd7\xea\xcf\xea\x2b\xd6\x06\x8d\xee\xda\x6b\xfa\xfd\xf8\x06\x3d\xc3\xed\x74\x58\x07\xdb\x81\x42\xff\x97\x4f\xcb\xbb\x88\xb7\xb5\x02\xf4\xfa\xff\xf1\x1b\x30\x00\xba\xf1\xbf\xc7\xc7\x4e\xff\xdf\x0b\x3c\x3b\xfd\xbf\xc3\x99\x3b\xb4\x02\x56\x6f\xea\x6e\xd8\x02\xad\x97\xab\x16\x41\xbb\x55\x5d\xbb\xa0\xf5\x76\xaf\xd6\x41\xfb\x8e\xd2\x72\x0d\xed\xb3\x12\xaa\x6b\xc7\x5f\xca\xcb\x74\x22\xd0\xcb\xa6\x98\x74\xe7\x1a\xd3\x43\x35\x96\x19\x5a\x73\x51\x99\x7b\x25\xaf\xdd\xa1\x00\x05\x99\x69\x07\x35\x3b\xe6\x81\xd2\x3d\x17\xa8\x76\xd7\x18\xc5\x10\x14\x3b\xf9\x32\x89\x83\xd6\x35\xe9\x4d\xab\x23\x83\x3e\xe1\xc5\x46\x81\xea\x5a\x76\xbc\xf3\x9c\x55\xc9\x82\x32\xb6\x38\xf0\x30\x32\xac\xfc\x78\xdd\xae\x06\xca\x18\x3a\x9b\x7a\x2f\x41\x25\x0c\x03\xd0\x13\x5e\x55\x4d\xca\x0e\xca\x88\x27\x78\x3f\xcf\x78\x78\x07\xdf\x28\x83\xc4\xda\x04\xc3\x74\x47\xd5\xd7\x33\x67\x36\x39\xb3\xa9\x4d\x7d\x67\x36\xed\x29\xfc\x19\xa6\x1f\x59\x4d\xcb\x34\x66\x9c\xa6\xf6\xca\x25\xcc\xf2\x6e\xb9\x98\x05\x83\x31\x0b\x19\x58\x06\xdd\xd0\xa8\x52\xf8\x54\x73\x9b\x79\x73\x91\xa6\x31\xb1\x41\xf9\xdc\xd6\xf6\xaa\x2b\x1a\x4c\xae\x2f\xf5\x07\xaa\xd6\xf7\x7c\xb4\x12\x48\x02\xd3\x3c\xcf\x49\x0a\xf6\x98\x21\x38\xdd\x3d\x8a\xb2\x6c\x75\x51\xce\xd1\xe5\x9d\x7a\x12\xfb\xda\xe1\xc0\xab\xf3\xa6\x02\xdd\x36\x54\x73\xb9\x5c\x7d\xdb\xb3\xa6\x76\xc1\xb0\x59\x32\x67\xdf\xde\xc7\xc1\x62\x04\xf2\xa6\xbf\x09\xdb\xe7\x03\x2c\xe9\xb5\xc1\x01\xc4\x9e\x81\xae\x38\xb1\x1a\x6c\xbc\xfb\xbb\x34\x57\xcb\xef\xc8\x61\xac\xd9\x42\x81\xda\xeb\xe7\xc7\x25\xd8\x64\x87\x53\x0f\xa9\x04\xf5\xc0\x4a\xd0\x0f\x6f\x55\xc6\x62\x90\x25\x58\x65\x84\x8b\x0c\x79\x65\x5b\x34\xaf\x3c\x03\x6c\xe9\x0b\x50\x69\x0f\x1a\x94\xb5\x28\x7f\x29\x75\xa1\xd6\xec\x6a\xe9\x2b\x28\x33\x08\xbf\x75\x9f\x97\xb8\x2b\x7d\xa8\x33\x77\x6b\x59\x70\xe0\xf1\xdc\x7f\x35\xa4\x20\xe2\x40\x4c\xa8\x3b\xb9\xd0\xed\x20\x4b\x28\xcf\x79\xc8\xeb\x10\xef\x63\xba\xc4\xaf\xbe\xd5\x5e\xc8\xe7\xb4\xe6\xc9\x44\x89\xbb\x18\x1f\x52\x5a\x75\x83\x64\x85\x09\x93\xe6\x63\x90\xab\xee\x5a\xb3\x26\x3a\x53\x9e\x59\x98\x0e\xbe\x21\xe5\xaf\x5d\x26\x44\x75\x22\x35\x09\x9a\xa8\x72\x09\x36\xb1\xe5\xed\x92\xc6\x34\x89\x1b\xc4\xc9\x4b\xa8\x22\xcd\x3f\xd6\x87\x50\xac\x67\xd7\xf5\x4a\xd5\xd2\x5b\xb8\x92\x5f\x56\xcb\xa0\xc8\x80\xe5\xd9\x95\xa5\xe7\x4e\x77\x07\xa9\x04\xbb\xf4\x97\xb0\x00\x84\x45\x36\xd3\x27\xeb\x6a\xb4\xc5\x8e\xda\x16\xe9\x26\xf5\x69\xf3\xd6\xf8\x5a\xd6\x39\xdf\xb5\x3d\xc2\x5c\xf8\xb7\xda\x5c\xd5\x9d\x84\x54\x58\xbc\x99\xd6\x50\x3e\xb0\xcc\x52\x6a\x3f\x50\x7e\x1c\xdf\x0a\x43\x8e\xea\x6e\xa8\x0d\x55\x31\xb4\xcd\x1b\xb3\x7e\x7d\xa9\x0d\x94\x9d\x11\xd1\xe9\xfa\xb2\x4e\x7f\x10\x50\xdb\xfe\x35\x0a\x0d\x99\xc1\x25\xd8\xe6\xdf\x96\x60\x5a\x07\xdb\x58\x2d\xd2\xac\x22\x64\x80\x74\x24\xfa\xb6\x09\xb6\x45\x4c\xc7\x3a\x76\x4d\x05\xab\x69\x68\x3b\x5b\x8d\x49\x7c\x2d\xbb\x7c\xcf\xf1\xba\x35\xeb\xe9\xf5\x3b\x15\xaf\x8c\xf2\x18\x78\x05\xed\xb8\xe5\x0a\x4d\x37\xf3\x92\x1e\xb3\x0c\xec\xd1\x92\x65\xc5\xe8\xa9\x14\x0c\x3a\xd2\x49\xf7\x89\xe3\x6d\x7e\x22\x03\xab\x02\xb5\x4f\x72\xd3\xf0\xf9\x98\x07\x26\xbc\xb2\xa9\xd0\xa6\xeb\xb8\x88\xf0\xdf\xb1\x88\x14\x37\x65\x37\xe9\xa5\x5f\xe5\xd4\x9b\x23\x12\xa4\xaf\x4b\xf1\xd2\x32\x75\xbc\x2a\x77\xb0\x7a\x0e\x6d\xaa\x45\xea\xb9\x6c\x53\xfd\xd1\x80\x75\xf7\x9a\xe3\x2e\x74\x46\xc3\x64\x31\xe8\x89\x86\xda\x36\xba\xe1\xe6\xf9\x70\x9f\x60\xf2\x4e\x7b\x45\x6d\x43\x15\xad\xa9\x86\x29\xb8\xc2\x4a\x39\x33\xaf\x8e\x96\x0a\x99\x65\x94\xa4\xfa\x26\xb0\x4d\xb3\x16\xdb\xaa\x5c\x26\x24\x1a\x35\x6b\x57\x0a\x96\x99\xd8\x26\xa5\x6a\x53\x75\xca\xd3\x5a\x9d\x26\x45\xca\x4e\x85\xb2\x53\x9e\x6c\x15\x06\xb3\xc2\x64\xa9\xd1\xd8\x29\x49\xb6\xc8\x2c\x14\x23\xbb\x1e\x5a\xae\x85\xea\xf9\xa2\x55\x80\x0c\xdd\xd1\x29\x3d\x0f\xa5\xee\xec\x5e\xd1\x59\x4b\xc5\xd1\xaf\x03\xba\x98\x8f\x5e\x85\x46\x33\x3a\xf7\x65\xa3\x0c\x9e\xe4\xaa\xed\xd4\x74\x6a\x79\xc3\xa5\xdc\xab\x73\xb4\xb7\xad\x2a\x30\xc5\xe7\xc8\x7a\xdb\xba\x43\xbb\x8e\x50\x16\x79\x45\x02\xfc\xc5\xd9\x1c\x07\x2f\xc1\x4b\xbf\x71\x67\x74\x32\x11\xbe\xf3\x71\xb6\xc1\xf9\x38\x3b\xe0\x7c\x9c\x15\x3e\xe7\x9a\x74\xae\x49\xe7\x9a\x74\xae\x49\xe7\x9a\x74\xae\xc9\x15\x70\xae\xc9\x0e\xbd\x9e\xae\x6b\xd2\x79\x0f\x9c\xf7\xe0\xf9\x78\x0f\x5c\xb2\x6c\x07\x0e\x1c\x38\x70\xe0\xc0\x81\x03\x07\x0e\x1c\x38\x70\xe0\xc0\x81\x03\x07\x0e\x1c\x38\x70\xe0\xc0\x81\x03\x07\x0e\x1c\x38\x70\xe0\xc0\x81\x03\x07\x0e\x1c\x38\x70\xe0\xc0\x81\x83\xa7\x02\xff\x0f\x31\x30\x29\x97\x00\x40\x01\x00\x01\x00\x00\xff\xff\x68\x6e\xa4\xd0\xe4\x1c\x00\x00")
func chartsTraefikCrd103001TgzBytes() ([]byte, error) {
return bindataRead(
_chartsTraefikCrd103001Tgz,
"charts/traefik-crd-10.3.001.tgz",
)
}
func chartsTraefikCrd103001Tgz() (*asset, error) {
bytes, err := chartsTraefikCrd103001TgzBytes()
if err != nil {
return nil, err
}
info := bindataFileInfo{name: "charts/traefik-crd-10.3.001.tgz", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
// Asset loads and returns the asset for the given name.
// It returns an error if the asset could not be found or
// could not be loaded.
func Asset(name string) ([]byte, error) {
cannonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[cannonicalName]; ok {
a, err := f()
if err != nil {
return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
}
return a.bytes, nil
}
return nil, fmt.Errorf("Asset %s not found", name)
}
// MustAsset is like Asset but panics when Asset would return an error.
// It simplifies safe initialization of global variables.
func MustAsset(name string) []byte {
a, err := Asset(name)
if err != nil {
panic("asset: Asset(" + name + "): " + err.Error())
}
return a
}
// AssetInfo loads and returns the asset info for the given name.
// It returns an error if the asset could not be found or
// could not be loaded.
func AssetInfo(name string) (os.FileInfo, error) {
cannonicalName := strings.Replace(name, "\\", "/", -1)
if f, ok := _bindata[cannonicalName]; ok {
a, err := f()
if err != nil {
return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
}
return a.info, nil
}
return nil, fmt.Errorf("AssetInfo %s not found", name)
}
// AssetNames returns the names of the assets.
func AssetNames() []string {
names := make([]string, 0, len(_bindata))
for name := range _bindata {
names = append(names, name)
}
return names
}
// _bindata is a table, holding each asset generator, mapped to its name.
var _bindata = map[string]func() (*asset, error){
"charts/traefik-10.3.001.tgz": chartsTraefik103001Tgz,
"charts/traefik-crd-10.3.001.tgz": chartsTraefikCrd103001Tgz,
}
// AssetDir returns the file names below a certain
// directory embedded in the file by go-bindata.
// For example if you run go-bindata on data/... and data contains the
// following hierarchy:
// data/
// foo.txt
// img/
// a.png
// b.png
// then AssetDir("data") would return []string{"foo.txt", "img"}
// AssetDir("data/img") would return []string{"a.png", "b.png"}
// AssetDir("foo.txt") and AssetDir("notexist") would return an error
// AssetDir("") will return []string{"data"}.
func AssetDir(name string) ([]string, error) {
node := _bintree
if len(name) != 0 {
cannonicalName := strings.Replace(name, "\\", "/", -1)
pathList := strings.Split(cannonicalName, "/")
for _, p := range pathList {
node = node.Children[p]
if node == nil {
return nil, fmt.Errorf("Asset %s not found", name)
}
}
}
if node.Func != nil {
return nil, fmt.Errorf("Asset %s not found", name)
}
rv := make([]string, 0, len(node.Children))
for childName := range node.Children {
rv = append(rv, childName)
}
return rv, nil
}
type bintree struct {
Func func() (*asset, error)
Children map[string]*bintree
}
var _bintree = &bintree{nil, map[string]*bintree{
"charts": &bintree{nil, map[string]*bintree{
"traefik-10.3.001.tgz": &bintree{chartsTraefik103001Tgz, map[string]*bintree{}},
"traefik-crd-10.3.001.tgz": &bintree{chartsTraefikCrd103001Tgz, map[string]*bintree{}},
}},
}}
// RestoreAsset restores an asset under the given directory
func RestoreAsset(dir, name string) error {
data, err := Asset(name)
if err != nil {
return err
}
info, err := AssetInfo(name)
if err != nil {
return err
}
err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
if err != nil {
return err
}
err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
if err != nil {
return err
}
err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
if err != nil {
return err
}
return nil
}
// RestoreAssets restores an asset under the given directory recursively
func RestoreAssets(dir, name string) error {
children, err := AssetDir(name)
// File
if err != nil {
return RestoreAsset(dir, name)
}
// Dir
for _, child := range children {
err = RestoreAssets(dir, filepath.Join(name, child))
if err != nil {
return err
}
}
return nil
}
func _filePath(dir, name string) string {
cannonicalName := strings.Replace(name, "\\", "/", -1)
return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...)
}
| 1 | 10,720 | Did go change their tags with the new version? | k3s-io-k3s | go |
@@ -330,11 +330,15 @@ func (s *Server) newEndpointsServer(ctx context.Context, catalog catalog.Catalog
}
func (s *Server) newBundleManager(cat catalog.Catalog, metrics telemetry.Metrics) *bundle_client.Manager {
+ log := s.config.Log.WithField(telemetry.SubsystemName, "bundle_client")
return bundle_client.NewManager(bundle_client.ManagerConfig{
- Log: s.config.Log.WithField(telemetry.SubsystemName, "bundle_client"),
+ Log: log,
Metrics: metrics,
DataStore: cat.GetDataStore(),
- Source: bundle_client.TrustDomainConfigMap(s.config.Federation.FederatesWith),
+ Source: bundle_client.MergeTrustDomainConfigSources(
+ bundle_client.TrustDomainConfigMap(s.config.Federation.FederatesWith),
+ bundle_client.DataStoreTrustDomainConfigSource(log, cat.GetDataStore()),
+ ),
})
}
| 1 | package server
import (
"context"
"errors"
"fmt"
"net/http"
_ "net/http/pprof" //nolint: gosec // import registers routes on DefaultServeMux
"net/url"
"os"
"runtime"
"sync"
"github.com/andres-erbsen/clock"
bundlev1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/bundle/v1"
server_util "github.com/spiffe/spire/cmd/spire-server/util"
"github.com/spiffe/spire/pkg/common/health"
"github.com/spiffe/spire/pkg/common/profiling"
"github.com/spiffe/spire/pkg/common/telemetry"
"github.com/spiffe/spire/pkg/common/uptime"
"github.com/spiffe/spire/pkg/common/util"
"github.com/spiffe/spire/pkg/server/authpolicy"
bundle_client "github.com/spiffe/spire/pkg/server/bundle/client"
"github.com/spiffe/spire/pkg/server/ca"
"github.com/spiffe/spire/pkg/server/catalog"
"github.com/spiffe/spire/pkg/server/datastore"
"github.com/spiffe/spire/pkg/server/endpoints"
"github.com/spiffe/spire/pkg/server/hostservice/agentstore"
"github.com/spiffe/spire/pkg/server/hostservice/identityprovider"
"github.com/spiffe/spire/pkg/server/registration"
"github.com/spiffe/spire/pkg/server/svid"
"google.golang.org/grpc"
)
const (
invalidTrustDomainAttestedNode = "An attested node with trust domain '%v' has been detected, " +
"which does not match the configured trust domain of '%v'. Agents may need to be reconfigured to use new trust domain"
invalidTrustDomainRegistrationEntry = "a registration entry with trust domain '%v' has been detected, " +
"which does not match the configured trust domain of '%v'. If you want to change the trust domain, " +
"please delete all existing registration entries"
invalidSpiffeIDRegistrationEntry = "registration entry with id %v is malformed because invalid SPIFFE ID: %v"
invalidSpiffeIDAttestedNode = "could not parse SPIFFE ID, from attested node"
pageSize = 1
)
type Server struct {
config Config
}
// Run the server
// This method initializes the server, including its plugins,
// and then blocks until it's shut down or an error is encountered.
func (s *Server) Run(ctx context.Context) error {
if err := s.run(ctx); err != nil {
s.config.Log.WithError(err).Error("Fatal run error")
return err
}
return nil
}
func (s *Server) run(ctx context.Context) (err error) {
// create the data directory if needed
s.config.Log.Infof("Data directory: %q", s.config.DataDir)
if err := os.MkdirAll(s.config.DataDir, 0755); err != nil {
return err
}
if s.config.ProfilingEnabled {
stopProfiling := s.setupProfiling(ctx)
defer stopProfiling()
}
metrics, err := telemetry.NewMetrics(&telemetry.MetricsConfig{
FileConfig: s.config.Telemetry,
Logger: s.config.Log.WithField(telemetry.SubsystemName, telemetry.Telemetry),
ServiceName: telemetry.SpireServer,
})
if err != nil {
return err
}
telemetry.EmitVersion(metrics)
uptime.ReportMetrics(ctx, metrics)
// Create the identity provider host service. It will not be functional
// until the call to SetDeps() below. There is some tricky initialization
// stuff going on since the identity provider host service requires plugins
// to do its job. RPC's from plugins to the identity provider before
// SetDeps() has been called will fail with a PreCondition status.
identityProvider := identityprovider.New(identityprovider.Config{
TrustDomain: s.config.TrustDomain,
})
healthChecker := health.NewChecker(s.config.HealthChecks, s.config.Log)
// Create the agent store host service. It will not be functional
// until the call to SetDeps() below.
agentStore := agentstore.New()
cat, err := s.loadCatalog(ctx, metrics, identityProvider, agentStore, healthChecker)
if err != nil {
return err
}
defer cat.Close()
err = s.validateTrustDomain(ctx, cat.GetDataStore())
if err != nil {
return err
}
serverCA := s.newCA(metrics, healthChecker)
// CA manager needs to be initialized before the rotator, otherwise the
// server CA plugin won't be able to sign CSRs
caManager, err := s.newCAManager(ctx, cat, metrics, serverCA, healthChecker)
if err != nil {
return err
}
svidRotator, err := s.newSVIDRotator(ctx, serverCA, metrics)
if err != nil {
return err
}
authPolicyEngine, err := authpolicy.NewEngineFromConfigOrDefault(ctx, s.config.AuthOpaPolicyEngineConfig)
if err != nil {
return fmt.Errorf("unable to obtain authpolicy engine: %w", err)
}
bundleManager := s.newBundleManager(cat, metrics)
endpointsServer, err := s.newEndpointsServer(ctx, cat, svidRotator, serverCA, metrics, caManager, authPolicyEngine, bundleManager)
if err != nil {
return err
}
// Set the identity provider dependencies
if err := identityProvider.SetDeps(identityprovider.Deps{
DataStore: cat.GetDataStore(),
X509IdentityFetcher: identityprovider.X509IdentityFetcherFunc(func(context.Context) (*identityprovider.X509Identity, error) {
// Return the server identity itself
state := svidRotator.State()
return &identityprovider.X509Identity{
CertChain: state.SVID,
PrivateKey: state.Key,
}, nil
}),
}); err != nil {
return fmt.Errorf("failed setting IdentityProvider deps: %w", err)
}
// Set the agent store dependencies
if err := agentStore.SetDeps(agentstore.Deps{
DataStore: cat.GetDataStore(),
}); err != nil {
return fmt.Errorf("failed setting AgentStore deps: %w", err)
}
registrationManager := s.newRegistrationManager(cat, metrics)
if err := healthChecker.AddCheck("server", s); err != nil {
return fmt.Errorf("failed adding healthcheck: %w", err)
}
err = util.RunTasks(ctx,
caManager.Run,
svidRotator.Run,
endpointsServer.ListenAndServe,
metrics.ListenAndServe,
bundleManager.Run,
registrationManager.Run,
util.SerialRun(s.waitForTestDial, healthChecker.ListenAndServe),
scanForBadEntries(s.config.Log, metrics, cat.GetDataStore()),
)
if errors.Is(err, context.Canceled) {
err = nil
}
return err
}
func (s *Server) setupProfiling(ctx context.Context) (stop func()) {
ctx, cancel := context.WithCancel(ctx)
var wg sync.WaitGroup
if runtime.MemProfileRate == 0 {
s.config.Log.Warn("Memory profiles are disabled")
}
if s.config.ProfilingPort > 0 {
grpc.EnableTracing = true
server := http.Server{
Addr: fmt.Sprintf("localhost:%d", s.config.ProfilingPort),
Handler: http.DefaultServeMux,
}
// kick off a goroutine to serve the pprof endpoints and one to
// gracefully shut down the server when profiling is being torn down
wg.Add(1)
go func() {
defer wg.Done()
if err := server.ListenAndServe(); err != nil {
s.config.Log.WithError(err).Warn("Unable to serve profiling server")
}
}()
wg.Add(1)
go func() {
defer wg.Done()
<-ctx.Done()
if err := server.Shutdown(ctx); err != nil {
s.config.Log.WithError(err).Warn("Unable to shutdown the server cleanly")
}
}()
}
if s.config.ProfilingFreq > 0 {
c := &profiling.Config{
Tag: "server",
Frequency: s.config.ProfilingFreq,
DebugLevel: 0,
RunGCBeforeHeapProfile: true,
Profiles: s.config.ProfilingNames,
}
wg.Add(1)
go func() {
defer wg.Done()
if err := profiling.Run(ctx, c); err != nil {
s.config.Log.WithError(err).Warn("Failed to run profiling")
}
}()
}
return func() {
cancel()
wg.Wait()
}
}
func (s *Server) loadCatalog(ctx context.Context, metrics telemetry.Metrics, identityProvider *identityprovider.IdentityProvider, agentStore *agentstore.AgentStore,
healthChecker health.Checker) (*catalog.Repository, error) {
return catalog.Load(ctx, catalog.Config{
Log: s.config.Log.WithField(telemetry.SubsystemName, telemetry.Catalog),
Metrics: metrics,
TrustDomain: s.config.TrustDomain,
PluginConfig: s.config.PluginConfigs,
IdentityProvider: identityProvider,
AgentStore: agentStore,
HealthChecker: healthChecker,
})
}
func (s *Server) newCA(metrics telemetry.Metrics, healthChecker health.Checker) *ca.CA {
return ca.NewCA(ca.Config{
Log: s.config.Log.WithField(telemetry.SubsystemName, telemetry.CA),
Metrics: metrics,
X509SVIDTTL: s.config.SVIDTTL,
JWTIssuer: s.config.JWTIssuer,
TrustDomain: s.config.TrustDomain,
CASubject: s.config.CASubject,
HealthChecker: healthChecker,
})
}
func (s *Server) newCAManager(ctx context.Context, cat catalog.Catalog, metrics telemetry.Metrics, serverCA *ca.CA, healthChecker health.Checker) (*ca.Manager, error) {
caManager := ca.NewManager(ca.ManagerConfig{
CA: serverCA,
Catalog: cat,
TrustDomain: s.config.TrustDomain,
Log: s.config.Log.WithField(telemetry.SubsystemName, telemetry.CAManager),
Metrics: metrics,
CATTL: s.config.CATTL,
CASubject: s.config.CASubject,
Dir: s.config.DataDir,
X509CAKeyType: s.config.CAKeyType,
JWTKeyType: s.config.JWTKeyType,
HealthChecker: healthChecker,
})
if err := caManager.Initialize(ctx); err != nil {
return nil, err
}
return caManager, nil
}
func (s *Server) newRegistrationManager(cat catalog.Catalog, metrics telemetry.Metrics) *registration.Manager {
registrationManager := registration.NewManager(registration.ManagerConfig{
DataStore: cat.GetDataStore(),
Log: s.config.Log.WithField(telemetry.SubsystemName, telemetry.RegistrationManager),
Metrics: metrics,
})
return registrationManager
}
func (s *Server) newSVIDRotator(ctx context.Context, serverCA ca.ServerCA, metrics telemetry.Metrics) (*svid.Rotator, error) {
svidRotator := svid.NewRotator(&svid.RotatorConfig{
ServerCA: serverCA,
Log: s.config.Log.WithField(telemetry.SubsystemName, telemetry.SVIDRotator),
Metrics: metrics,
TrustDomain: s.config.TrustDomain,
KeyType: s.config.CAKeyType,
})
if err := svidRotator.Initialize(ctx); err != nil {
return nil, err
}
return svidRotator, nil
}
func (s *Server) newEndpointsServer(ctx context.Context, catalog catalog.Catalog, svidObserver svid.Observer, serverCA ca.ServerCA, metrics telemetry.Metrics, caManager *ca.Manager, authPolicyEngine *authpolicy.Engine, bundleManager *bundle_client.Manager) (endpoints.Server, error) {
config := endpoints.Config{
TCPAddr: s.config.BindAddress,
UDSAddr: s.config.BindUDSAddress,
SVIDObserver: svidObserver,
TrustDomain: s.config.TrustDomain,
Catalog: catalog,
ServerCA: serverCA,
Log: s.config.Log.WithField(telemetry.SubsystemName, telemetry.Endpoints),
Metrics: metrics,
Manager: caManager,
RateLimit: s.config.RateLimit,
Uptime: uptime.Uptime,
Clock: clock.New(),
CacheReloadInterval: s.config.CacheReloadInterval,
AuditLogEnabled: s.config.AuditLogEnabled,
AuthPolicyEngine: authPolicyEngine,
BundleManager: bundleManager,
}
if s.config.Federation.BundleEndpoint != nil {
config.BundleEndpoint.Address = s.config.Federation.BundleEndpoint.Address
config.BundleEndpoint.ACME = s.config.Federation.BundleEndpoint.ACME
}
return endpoints.New(ctx, config)
}
func (s *Server) newBundleManager(cat catalog.Catalog, metrics telemetry.Metrics) *bundle_client.Manager {
return bundle_client.NewManager(bundle_client.ManagerConfig{
Log: s.config.Log.WithField(telemetry.SubsystemName, "bundle_client"),
Metrics: metrics,
DataStore: cat.GetDataStore(),
Source: bundle_client.TrustDomainConfigMap(s.config.Federation.FederatesWith),
})
}
func (s *Server) validateTrustDomain(ctx context.Context, ds datastore.DataStore) error {
trustDomain := s.config.TrustDomain.String()
// Get only first page with a single element
fetchResponse, err := ds.ListRegistrationEntries(ctx, &datastore.ListRegistrationEntriesRequest{
Pagination: &datastore.Pagination{
Token: "",
PageSize: pageSize,
}})
if err != nil {
return err
}
for _, entry := range fetchResponse.Entries {
id, err := url.Parse(entry.SpiffeId)
if err != nil {
return fmt.Errorf(invalidSpiffeIDRegistrationEntry, entry.EntryId, err)
}
if id.Host != trustDomain {
return fmt.Errorf(invalidTrustDomainRegistrationEntry, id.Host, trustDomain)
}
}
// Get only first page with a single element
nodesResponse, err := ds.ListAttestedNodes(ctx, &datastore.ListAttestedNodesRequest{
Pagination: &datastore.Pagination{
Token: "",
PageSize: pageSize,
}})
if err != nil {
return err
}
for _, node := range nodesResponse.Nodes {
id, err := url.Parse(node.SpiffeId)
if err != nil {
s.config.Log.WithError(err).WithField(telemetry.SPIFFEID, node.SpiffeId).Warn(invalidSpiffeIDAttestedNode)
continue
}
if id.Host != trustDomain {
msg := fmt.Sprintf(invalidTrustDomainAttestedNode, id.Host, trustDomain)
s.config.Log.Warn(msg)
}
}
return nil
}
// waitForTestDial calls health.WaitForTestDial to wait for a connection to the
// SPIRE Server API socket. This function always returns nil, even if
// health.WaitForTestDial exited due to a timeout.
func (s *Server) waitForTestDial(ctx context.Context) error {
health.WaitForTestDial(ctx, s.config.BindUDSAddress)
return nil
}
// CheckHealth is used as a top-level health check for the Server.
func (s *Server) CheckHealth() health.State {
err := s.tryGetBundle()
// The API is served only after the server CA has been
// signed by upstream. Hence, both live and ready checks
// are determined by whether the bundles are received or not.
// TODO: Better live check for server.
return health.State{
Ready: err == nil,
Live: err == nil,
ReadyDetails: serverHealthDetails{
GetBundleErr: errString(err),
},
LiveDetails: serverHealthDetails{
GetBundleErr: errString(err),
},
}
}
func (s *Server) tryGetBundle() error {
client, err := server_util.NewServerClient(s.config.BindUDSAddress.Name)
if err != nil {
return errors.New("cannot create registration client")
}
defer client.Release()
bundleClient := client.NewBundleClient()
// Currently using the ability to fetch a bundle as the health check. This
// **could** be problematic if the Upstream CA signing process is lengthy.
// As currently coded however, the API isn't served until after
// the server CA has been signed by upstream.
if _, err := bundleClient.GetBundle(context.Background(), &bundlev1.GetBundleRequest{}); err != nil {
return errors.New("unable to fetch bundle")
}
return nil
}
type serverHealthDetails struct {
GetBundleErr string `json:"get_bundle_err,omitempty"`
}
func errString(err error) string {
if err != nil {
return err.Error()
}
return ""
}
| 1 | 17,883 | I don't recall what we decided here in terms of which source would get priority. As written, the static configuration will overwrite datastore results. | spiffe-spire | go |
@@ -192,7 +192,7 @@ func replaceSequenceLabel(state *core.BuildState, target *core.BuildTarget, labe
}
func checkAndReplaceSequence(state *core.BuildState, target, dep *core.BuildTarget, in string, runnable, multiple, dir, outPrefix, hash, test, allOutputs, tool bool) string {
- if allOutputs && !multiple && len(dep.Outputs()) != 1 {
+ if allOutputs && !multiple && len(dep.Outputs()) > 1 {
// Label must have only one output.
panic(fmt.Sprintf("Rule %s can't use %s; %s has multiple outputs.", target.Label, in, dep.Label))
} else if runnable && !dep.IsBinary { | 1 | // Replacement of sequences in genrule commands.
//
// Genrules can contain certain replacement variables which Please substitutes
// with locations of the actual thing before running.
// The following replacements are currently made:
//
// $(location //path/to:target)
// Expands to the output of the given build rule. The rule can only have one
// output (use $locations if there are multiple).
//
// $(locations //path/to:target)
// Expands to all the outputs (space separated) of the given build rule.
// Equivalent to $(location ...) for rules with a single output.
//
// $(exe //path/to:target)
// Expands to a command to run the output of the given target from within a
// genrule or test directory. For example,
// java -jar path/to/target.jar.
// The rule must be tagged as 'binary'.
//
// $(out_exe //path/to:target)
// Expands to a command to run the output of the given target. For example,
// java -jar plz-out/bin/path/to/target.jar.
// The rule must be tagged as 'binary'.
//
// $(dir //path/to:target)
// Expands to the package directory containing the outputs of the given target.
// Useful for rules that have multiple outputs where you only need to know
// what directory they're in.
//
// $(out_location //path/to:target)
// Expands to a path to the output of the given target, with the preceding plz-out/gen
// or plz-out/bin etc. Useful when these things will be run by a user.
//
// $(worker //path/to:target)
// Indicates that this target will be run by a remote worker process. The following
// arguments are sent to the remote worker.
// This is subject to some additional rules: it must appear initially in the command,
// and if "&&" appears subsequently in the command, that part is run locally after
// the worker has completed. All workers must be listed as tools of the rule.
//
// In general it's a good idea to use these where possible in genrules rather than
// hardcoding specific paths.
package build
import (
"encoding/base64"
"fmt"
"path"
"path/filepath"
"regexp"
"strings"
"core"
)
var locationReplacement = regexp.MustCompile(`\$\(location ([^\)]+)\)`)
var locationsReplacement = regexp.MustCompile(`\$\(locations ([^\)]+)\)`)
var exeReplacement = regexp.MustCompile(`\$\(exe ([^\)]+)\)`)
var outExeReplacement = regexp.MustCompile(`\$\(out_exe ([^\)]+)\)`)
var outReplacement = regexp.MustCompile(`\$\(out_location ([^\)]+)\)`)
var dirReplacement = regexp.MustCompile(`\$\(dir ([^\)]+)\)`)
var hashReplacement = regexp.MustCompile(`\$\(hash ([^\)]+)\)`)
var workerReplacement = regexp.MustCompile(`^(.*)\$\(worker ([^\)]+)\) *([^&]*)(?: *&& *(.*))?$`)
// ReplaceSequences replaces escape sequences in the given string.
func ReplaceSequences(state *core.BuildState, target *core.BuildTarget, command string) string {
return replaceSequencesInternal(state, target, command, false)
}
// ReplaceTestSequences replaces escape sequences in the given string when running a test.
func ReplaceTestSequences(state *core.BuildState, target *core.BuildTarget, command string) string {
if command == "" {
// An empty test command implies running the test binary.
return replaceSequencesInternal(state, target, fmt.Sprintf("$(exe :%s)", target.Label.Name), true)
} else if strings.HasPrefix(command, "$(worker") {
_, _, command = workerAndArgs(state, target, command)
return command
}
return replaceSequencesInternal(state, target, command, true)
}
// TestWorkerCommand returns the worker & its arguments (if any) for a test, and the command to run for the test itself.
func TestWorkerCommand(state *core.BuildState, target *core.BuildTarget) (string, string, string) {
return workerAndArgs(state, target, target.GetTestCommand(state))
}
// workerCommandAndArgs returns the worker & its command (if any) and subsequent local command for the rule.
func workerCommandAndArgs(state *core.BuildState, target *core.BuildTarget) (string, string, string) {
return workerAndArgs(state, target, target.GetCommand(state))
}
func workerAndArgs(state *core.BuildState, target *core.BuildTarget, command string) (string, string, string) {
match := workerReplacement.FindStringSubmatch(command)
if match == nil {
return "", "", ReplaceSequences(state, target, command)
} else if match[1] != "" {
panic("$(worker) replacements cannot have any commands preceding them.")
}
return replaceSequence(state, target, core.ExpandHomePath(match[2]), true, false, false, true, false, false),
replaceSequencesInternal(state, target, strings.TrimSpace(match[3]), false),
replaceSequencesInternal(state, target, match[4], false)
}
func replaceSequencesInternal(state *core.BuildState, target *core.BuildTarget, command string, test bool) string {
cmd := locationReplacement.ReplaceAllStringFunc(command, func(in string) string {
return replaceSequence(state, target, in[11:len(in)-1], false, false, false, false, false, test)
})
cmd = locationsReplacement.ReplaceAllStringFunc(cmd, func(in string) string {
return replaceSequence(state, target, in[12:len(in)-1], false, true, false, false, false, test)
})
cmd = exeReplacement.ReplaceAllStringFunc(cmd, func(in string) string {
return replaceSequence(state, target, in[6:len(in)-1], true, false, false, false, false, test)
})
cmd = outReplacement.ReplaceAllStringFunc(cmd, func(in string) string {
return replaceSequence(state, target, in[15:len(in)-1], false, false, false, true, false, test)
})
cmd = outExeReplacement.ReplaceAllStringFunc(cmd, func(in string) string {
return replaceSequence(state, target, in[10:len(in)-1], true, false, false, true, false, test)
})
cmd = dirReplacement.ReplaceAllStringFunc(cmd, func(in string) string {
return replaceSequence(state, target, in[6:len(in)-1], false, true, true, false, false, test)
})
cmd = hashReplacement.ReplaceAllStringFunc(cmd, func(in string) string {
if !target.Stamp {
panic(fmt.Sprintf("Target %s can't use $(hash ) replacements without stamp=True", target.Label))
}
return replaceSequence(state, target, in[7:len(in)-1], false, true, true, false, true, test)
})
if state.Config.Bazel.Compatibility {
// Bazel allows several obscure Make-style variable expansions.
// Our replacement here is not very principled but should work better than not doing it at all.
cmd = strings.Replace(cmd, "$<", "$SRCS", -1)
cmd = strings.Replace(cmd, "$(<)", "$SRCS", -1)
cmd = strings.Replace(cmd, "$@D", "$TMP_DIR", -1)
cmd = strings.Replace(cmd, "$(@D)", "$TMP_DIR", -1)
cmd = strings.Replace(cmd, "$@", "$OUTS", -1)
cmd = strings.Replace(cmd, "$(@)", "$OUTS", -1)
// It also seemingly allows you to get away with this syntax, which means something
// fairly different in Bash, but never mind.
cmd = strings.Replace(cmd, "$(SRCS)", "$SRCS", -1)
cmd = strings.Replace(cmd, "$(OUTS)", "$OUTS", -1)
}
// We would ideally check for this when doing matches above, but not easy in
// Go since its regular expressions are actually regular and principled.
return strings.Replace(cmd, "\\$", "$", -1)
}
// replaceSequence replaces a single escape sequence in a command.
func replaceSequence(state *core.BuildState, target *core.BuildTarget, in string, runnable, multiple, dir, outPrefix, hash, test bool) string {
if core.LooksLikeABuildLabel(in) {
label := core.ParseBuildLabel(in, target.Label.PackageName)
return replaceSequenceLabel(state, target, label, in, runnable, multiple, dir, outPrefix, hash, test, true)
}
for _, src := range sourcesOrTools(target, runnable) {
if label := src.Label(); label != nil && src.String() == in {
return replaceSequenceLabel(state, target, *label, in, runnable, multiple, dir, outPrefix, hash, test, false)
} else if runnable && src.String() == in {
return src.String()
}
}
if hash {
return base64.RawURLEncoding.EncodeToString(state.PathHasher.MustHash(path.Join(target.Label.PackageName, in)))
}
if strings.HasPrefix(in, "/") {
return in // Absolute path, probably on a tool or system src.
}
return quote(path.Join(target.Label.PackageName, in))
}
// sourcesOrTools returns either the tools of a target if runnable is true, otherwise its sources.
func sourcesOrTools(target *core.BuildTarget, runnable bool) []core.BuildInput {
if runnable {
return target.Tools
}
return target.AllSources()
}
func replaceSequenceLabel(state *core.BuildState, target *core.BuildTarget, label core.BuildLabel, in string, runnable, multiple, dir, outPrefix, hash, test, allOutputs bool) string {
// Check this label is a dependency of the target, otherwise it's not allowed.
if label == target.Label { // targets can always use themselves.
return checkAndReplaceSequence(state, target, target, in, runnable, multiple, dir, outPrefix, hash, test, allOutputs, false)
}
deps := target.DependenciesFor(label)
if len(deps) == 0 {
panic(fmt.Sprintf("Rule %s can't use %s; doesn't depend on target %s", target.Label, in, label))
}
// TODO(pebers): this does not correctly handle the case where there are multiple deps here
// (but is better than the previous case where it never worked at all)
return checkAndReplaceSequence(state, target, deps[0], in, runnable, multiple, dir, outPrefix, hash, test, allOutputs, target.IsTool(label))
}
func checkAndReplaceSequence(state *core.BuildState, target, dep *core.BuildTarget, in string, runnable, multiple, dir, outPrefix, hash, test, allOutputs, tool bool) string {
if allOutputs && !multiple && len(dep.Outputs()) != 1 {
// Label must have only one output.
panic(fmt.Sprintf("Rule %s can't use %s; %s has multiple outputs.", target.Label, in, dep.Label))
} else if runnable && !dep.IsBinary {
panic(fmt.Sprintf("Rule %s can't $(exe %s), it's not executable", target.Label, dep.Label))
} else if runnable && len(dep.Outputs()) == 0 {
panic(fmt.Sprintf("Rule %s is tagged as binary but produces no output.", dep.Label))
}
if hash {
return base64.RawURLEncoding.EncodeToString(mustOutputHash(state, dep))
}
output := ""
for _, out := range dep.Outputs() {
if allOutputs || out == in {
if tool {
abs, err := filepath.Abs(handleDir(dep.OutDir(), out, dir))
if err != nil {
log.Fatalf("Couldn't calculate relative path: %s", err)
}
output += quote(abs) + " "
} else {
output += quote(fileDestination(target, dep, out, dir, outPrefix, test)) + " "
}
if dir {
break
}
}
}
if runnable && dep.HasLabel("java_non_exe") {
// The target is a Java target that isn't self-executable, hence it needs something to run it.
output = "java -jar " + output
}
return strings.TrimRight(output, " ")
}
func fileDestination(target, dep *core.BuildTarget, out string, dir, outPrefix, test bool) string {
if outPrefix {
return handleDir(dep.OutDir(), out, dir)
}
if test && target == dep {
// Slightly fiddly case because tests put binaries in a possibly slightly unusual place.
return "./" + out
}
return handleDir(dep.Label.PackageName, out, dir)
}
// Encloses the given string in quotes if needed.
func quote(s string) string {
if strings.ContainsAny(s, "|&;()<>") {
return "\"" + s + "\""
}
return s
}
// handleDir chooses either the out dir or the actual output location depending on the 'dir' flag.
func handleDir(outDir, output string, dir bool) string {
if dir {
return outDir
}
return path.Join(outDir, output)
}
| 1 | 8,666 | I think it should still panic for when there are no outputs; might be nice to special-case that though so the message is more explicit. | thought-machine-please | go |
@@ -110,12 +110,14 @@ public class SolrMetricManager {
public static final int DEFAULT_CLOUD_REPORTER_PERIOD = 60;
- private MetricRegistry.MetricSupplier<Counter> counterSupplier;
- private MetricRegistry.MetricSupplier<Meter> meterSupplier;
- private MetricRegistry.MetricSupplier<Timer> timerSupplier;
- private MetricRegistry.MetricSupplier<Histogram> histogramSupplier;
+ private final MetricsConfig metricsConfig;
+ private final MetricRegistry.MetricSupplier<Counter> counterSupplier;
+ private final MetricRegistry.MetricSupplier<Meter> meterSupplier;
+ private final MetricRegistry.MetricSupplier<Timer> timerSupplier;
+ private final MetricRegistry.MetricSupplier<Histogram> histogramSupplier;
public SolrMetricManager() {
+ metricsConfig = new MetricsConfig.MetricsConfigBuilder().build();
counterSupplier = MetricSuppliers.counterSupplier(null, null);
meterSupplier = MetricSuppliers.meterSupplier(null, null);
timerSupplier = MetricSuppliers.timerSupplier(null, null); | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.metrics;
import java.io.IOException;
import java.lang.invoke.MethodHandles;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import java.util.regex.Pattern;
import java.util.regex.PatternSyntaxException;
import java.util.stream.Collectors;
import com.codahale.metrics.Counter;
import com.codahale.metrics.Gauge;
import com.codahale.metrics.Histogram;
import com.codahale.metrics.Meter;
import com.codahale.metrics.Metric;
import com.codahale.metrics.MetricFilter;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.MetricSet;
import com.codahale.metrics.SharedMetricRegistries;
import com.codahale.metrics.Timer;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.core.CoreContainer;
import org.apache.solr.core.MetricsConfig;
import org.apache.solr.core.PluginInfo;
import org.apache.solr.core.SolrCore;
import org.apache.solr.core.SolrInfoBean;
import org.apache.solr.core.SolrResourceLoader;
import org.apache.solr.logging.MDCLoggingContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.slf4j.MDC;
/**
* This class maintains a repository of named {@link MetricRegistry} instances, and provides several
* helper methods for managing various aspects of metrics reporting:
* <ul>
* <li>registry creation, clearing and removal,</li>
* <li>creation of most common metric implementations,</li>
* <li>management of {@link SolrMetricReporter}-s specific to a named registry.</li>
* </ul>
* {@link MetricRegistry} instances are automatically created when first referenced by name. Similarly,
* instances of {@link Metric} implementations, such as {@link Meter}, {@link Counter}, {@link Timer} and
* {@link Histogram} are automatically created and registered under hierarchical names, in a specified
* registry, when {@link #meter(SolrMetricsContext, String, String, String...)} and other similar methods are called.
* <p>This class enforces a common prefix ({@link #REGISTRY_NAME_PREFIX}) in all registry
* names.</p>
* <p>Solr uses several different registries for collecting metrics belonging to different groups, using
* {@link org.apache.solr.core.SolrInfoBean.Group} as the main name of the registry (plus the
* above-mentioned prefix). Instances of {@link SolrMetricManager} are created for each {@link org.apache.solr.core.CoreContainer},
* and most registries are local to each instance, with the exception of two global registries:
* <code>solr.jetty</code> and <code>solr.jvm</code>, which are shared between all {@link org.apache.solr.core.CoreContainer}-s</p>
*/
public class SolrMetricManager {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
/**
* Common prefix for all registry names that Solr uses.
*/
public static final String REGISTRY_NAME_PREFIX = "solr.";
/**
* Registry name for Jetty-specific metrics. This name is also subject to overrides controlled by
* system properties. This registry is shared between instances of {@link SolrMetricManager}.
*/
public static final String JETTY_REGISTRY = REGISTRY_NAME_PREFIX + SolrInfoBean.Group.jetty.toString();
/**
* Registry name for JVM-specific metrics. This name is also subject to overrides controlled by
* system properties. This registry is shared between instances of {@link SolrMetricManager}.
*/
public static final String JVM_REGISTRY = REGISTRY_NAME_PREFIX + SolrInfoBean.Group.jvm.toString();
private final ConcurrentMap<String, MetricRegistry> registries = new ConcurrentHashMap<>();
private final Map<String, Map<String, SolrMetricReporter>> reporters = new HashMap<>();
private final Lock reportersLock = new ReentrantLock();
private final Lock swapLock = new ReentrantLock();
public static final int DEFAULT_CLOUD_REPORTER_PERIOD = 60;
private MetricRegistry.MetricSupplier<Counter> counterSupplier;
private MetricRegistry.MetricSupplier<Meter> meterSupplier;
private MetricRegistry.MetricSupplier<Timer> timerSupplier;
private MetricRegistry.MetricSupplier<Histogram> histogramSupplier;
public SolrMetricManager() {
counterSupplier = MetricSuppliers.counterSupplier(null, null);
meterSupplier = MetricSuppliers.meterSupplier(null, null);
timerSupplier = MetricSuppliers.timerSupplier(null, null);
histogramSupplier = MetricSuppliers.histogramSupplier(null, null);
}
public SolrMetricManager(SolrResourceLoader loader, MetricsConfig metricsConfig) {
counterSupplier = MetricSuppliers.counterSupplier(loader, metricsConfig.getCounterSupplier());
meterSupplier = MetricSuppliers.meterSupplier(loader, metricsConfig.getMeterSupplier());
timerSupplier = MetricSuppliers.timerSupplier(loader, metricsConfig.getTimerSupplier());
histogramSupplier = MetricSuppliers.histogramSupplier(loader, metricsConfig.getHistogramSupplier());
}
// for unit tests
public MetricRegistry.MetricSupplier<Counter> getCounterSupplier() {
return counterSupplier;
}
public MetricRegistry.MetricSupplier<Meter> getMeterSupplier() {
return meterSupplier;
}
public MetricRegistry.MetricSupplier<Timer> getTimerSupplier() {
return timerSupplier;
}
public MetricRegistry.MetricSupplier<Histogram> getHistogramSupplier() {
return histogramSupplier;
}
/**
* An implementation of {@link MetricFilter} that selects metrics
* with names that start with one of prefixes.
*/
public static class PrefixFilter implements MetricFilter {
private final Set<String> prefixes = new HashSet<>();
private final Set<String> matched = new HashSet<>();
private boolean allMatch = false;
/**
* Create a filter that uses the provided prefixes.
*
* @param prefixes prefixes to use, must not be null. If empty then any
* name will match, if not empty then match on any prefix will
* succeed (logical OR).
*/
public PrefixFilter(String... prefixes) {
Objects.requireNonNull(prefixes);
if (prefixes.length > 0) {
this.prefixes.addAll(Arrays.asList(prefixes));
}
if (this.prefixes.isEmpty()) {
allMatch = true;
}
}
public PrefixFilter(Collection<String> prefixes) {
Objects.requireNonNull(prefixes);
this.prefixes.addAll(prefixes);
if (this.prefixes.isEmpty()) {
allMatch = true;
}
}
@Override
public boolean matches(String name, Metric metric) {
if (allMatch) {
matched.add(name);
return true;
}
for (String prefix : prefixes) {
if (name.startsWith(prefix)) {
matched.add(name);
return true;
}
}
return false;
}
/**
* Return the set of names that matched this filter.
*
* @return matching names
*/
public Set<String> getMatched() {
return Collections.unmodifiableSet(matched);
}
/**
* Clear the set of names that matched.
*/
public void reset() {
matched.clear();
}
@Override
public String toString() {
return "PrefixFilter{" +
"prefixes=" + prefixes +
'}';
}
}
/**
* An implementation of {@link MetricFilter} that selects metrics
* with names that match regular expression patterns.
*/
public static class RegexFilter implements MetricFilter {
private final Set<Pattern> compiledPatterns = new HashSet<>();
private final Set<String> matched = new HashSet<>();
private boolean allMatch = false;
/**
* Create a filter that uses the provided prefix.
*
* @param patterns regex patterns to use, must not be null. If empty then any
* name will match, if not empty then match on any pattern will
* succeed (logical OR).
*/
public RegexFilter(String... patterns) throws PatternSyntaxException {
this(patterns != null ? Arrays.asList(patterns) : Collections.emptyList());
}
public RegexFilter(Collection<String> patterns) throws PatternSyntaxException {
Objects.requireNonNull(patterns);
if (patterns.isEmpty()) {
allMatch = true;
return;
}
patterns.forEach(p -> {
Pattern pattern = Pattern.compile(p);
compiledPatterns.add(pattern);
});
if (patterns.isEmpty()) {
allMatch = true;
}
}
@Override
public boolean matches(String name, Metric metric) {
if (allMatch) {
matched.add(name);
return true;
}
for (Pattern p : compiledPatterns) {
if (p.matcher(name).matches()) {
matched.add(name);
return true;
}
}
return false;
}
/**
* Return the set of names that matched this filter.
*
* @return matching names
*/
public Set<String> getMatched() {
return Collections.unmodifiableSet(matched);
}
/**
* Clear the set of names that matched.
*/
public void reset() {
matched.clear();
}
@Override
public String toString() {
return "RegexFilter{" +
"compiledPatterns=" + compiledPatterns +
'}';
}
}
/**
* An implementation of {@link MetricFilter} that selects metrics
* that match any filter in a list of filters.
*/
public static class OrFilter implements MetricFilter {
List<MetricFilter> filters = new ArrayList<>();
public OrFilter(Collection<MetricFilter> filters) {
if (filters != null) {
this.filters.addAll(filters);
}
}
public OrFilter(MetricFilter... filters) {
if (filters != null) {
for (MetricFilter filter : filters) {
if (filter != null) {
this.filters.add(filter);
}
}
}
}
@Override
public boolean matches(String s, Metric metric) {
for (MetricFilter filter : filters) {
if (filter.matches(s, metric)) {
return true;
}
}
return false;
}
}
/**
* An implementation of {@link MetricFilter} that selects metrics
* that match all filters in a list of filters.
*/
public static class AndFilter implements MetricFilter {
List<MetricFilter> filters = new ArrayList<>();
public AndFilter(Collection<MetricFilter> filters) {
if (filters != null) {
this.filters.addAll(filters);
}
}
public AndFilter(MetricFilter... filters) {
if (filters != null) {
for (MetricFilter filter : filters) {
if (filter != null) {
this.filters.add(filter);
}
}
}
}
@Override
public boolean matches(String s, Metric metric) {
for (MetricFilter filter : filters) {
if (!filter.matches(s, metric)) {
return false;
}
}
return true;
}
}
/**
* Return a set of existing registry names.
*/
public Set<String> registryNames() {
Set<String> set = new HashSet<>();
set.addAll(registries.keySet());
set.addAll(SharedMetricRegistries.names());
return set;
}
/**
* Check whether a registry with a given name already exists.
*
* @param name registry name
* @return true if this name points to a registry that already exists, false otherwise
*/
public boolean hasRegistry(String name) {
Set<String> names = registryNames();
name = enforcePrefix(name);
return names.contains(name);
}
/**
* Return set of existing registry names that match a regex pattern
*
* @param patterns regex patterns. NOTE: users need to make sure that patterns that
* don't start with a wildcard use the full registry name starting with
* {@link #REGISTRY_NAME_PREFIX}
* @return set of existing registry names where at least one pattern matched.
*/
public Set<String> registryNames(String... patterns) throws PatternSyntaxException {
if (patterns == null || patterns.length == 0) {
return registryNames();
}
List<Pattern> compiled = new ArrayList<>();
for (String pattern : patterns) {
compiled.add(Pattern.compile(pattern));
}
return registryNames(compiled.toArray(new Pattern[compiled.size()]));
}
public Set<String> registryNames(Pattern... patterns) {
Set<String> allNames = registryNames();
if (patterns == null || patterns.length == 0) {
return allNames;
}
return allNames.stream().filter(s -> {
for (Pattern p : patterns) {
if (p.matcher(s).matches()) {
return true;
}
}
return false;
}).collect(Collectors.toSet());
}
/**
* Check for predefined shared registry names. This compares the input name
* with normalized names of predefined shared registries -
* {@link #JVM_REGISTRY} and {@link #JETTY_REGISTRY}.
*
* @param registry already normalized name
* @return true if the name matches one of shared registries
*/
private static boolean isSharedRegistry(String registry) {
return JETTY_REGISTRY.equals(registry) || JVM_REGISTRY.equals(registry);
}
/**
* Get (or create if not present) a named registry
*
* @param registry name of the registry
* @return existing or newly created registry
*/
public MetricRegistry registry(String registry) {
registry = enforcePrefix(registry);
if (isSharedRegistry(registry)) {
return SharedMetricRegistries.getOrCreate(registry);
} else {
swapLock.lock();
try {
return getOrCreateRegistry(registries, registry);
} finally {
swapLock.unlock();
}
}
}
private static MetricRegistry getOrCreateRegistry(ConcurrentMap<String, MetricRegistry> map, String registry) {
final MetricRegistry existing = map.get(registry);
if (existing == null) {
final MetricRegistry created = new MetricRegistry();
final MetricRegistry raced = map.putIfAbsent(registry, created);
if (raced == null) {
return created;
} else {
return raced;
}
} else {
return existing;
}
}
/**
* Remove a named registry.
*
* @param registry name of the registry to remove
*/
public void removeRegistry(String registry) {
// close any reporters for this registry first
closeReporters(registry, null);
// make sure we use a name with prefix
registry = enforcePrefix(registry);
if (isSharedRegistry(registry)) {
SharedMetricRegistries.remove(registry);
} else {
swapLock.lock();
try {
registries.remove(registry);
} finally {
swapLock.unlock();
}
}
}
/**
* Swap registries. This is useful eg. during
* {@link org.apache.solr.core.SolrCore} rename or swap operations. NOTE:
* this operation is not supported for shared registries.
*
* @param registry1 source registry
* @param registry2 target registry. Note: when used after core rename the target registry doesn't
* exist, so the swap operation will only rename the existing registry without creating
* an empty one under the previous name.
*/
public void swapRegistries(String registry1, String registry2) {
registry1 = enforcePrefix(registry1);
registry2 = enforcePrefix(registry2);
if (isSharedRegistry(registry1) || isSharedRegistry(registry2)) {
throw new UnsupportedOperationException("Cannot swap shared registry: " + registry1 + ", " + registry2);
}
swapLock.lock();
try {
MetricRegistry from = registries.get(registry1);
MetricRegistry to = registries.get(registry2);
if (from == to) {
return;
}
MetricRegistry reg1 = registries.remove(registry1);
MetricRegistry reg2 = registries.remove(registry2);
if (reg2 != null) {
registries.put(registry1, reg2);
}
if (reg1 != null) {
registries.put(registry2, reg1);
}
} finally {
swapLock.unlock();
}
}
/**
* Potential conflict resolution strategies when attempting to register a new metric that already exists
*/
public enum ResolutionStrategy {
/**
* The existing metric will be kept and the new metric will be ignored. If no metric exists, then the new metric
* will be registered.
*/
IGNORE,
/**
* The existing metric will be removed and replaced with the new metric
*/
REPLACE,
/**
* An exception will be thrown. This is the default implementation behavior.
*/
ERROR
}
/**
* Register all metrics in the provided {@link MetricSet}, optionally skipping those that
* already exist.
*
* @param registry registry name
* @param metrics metric set to register
* @param strategy the conflict resolution strategy to use if the named metric already exists.
* @param metricPath (optional) additional top-most metric name path elements
* @throws Exception if a metric with this name already exists.
*/
public void registerAll(String registry, MetricSet metrics, ResolutionStrategy strategy, String... metricPath) throws Exception {
MetricRegistry metricRegistry = registry(registry);
synchronized (metricRegistry) {
Map<String, Metric> existingMetrics = metricRegistry.getMetrics();
for (Map.Entry<String, Metric> entry : metrics.getMetrics().entrySet()) {
String fullName = mkName(entry.getKey(), metricPath);
if (strategy == ResolutionStrategy.REPLACE) {
metricRegistry.remove(fullName);
} else if (strategy == ResolutionStrategy.IGNORE && existingMetrics.containsKey(fullName)) {
continue;
} // strategy == ERROR will fail when we try to register
metricRegistry.register(fullName, entry.getValue());
}
}
}
/**
* Remove all metrics from a specified registry.
*
* @param registry registry name
*/
public void clearRegistry(String registry) {
registry(registry).removeMatching(MetricFilter.ALL);
}
/**
* Remove some metrics from a named registry
*
* @param registry registry name
* @param metricPath (optional) top-most metric name path elements. If empty then
* this is equivalent to calling {@link #clearRegistry(String)},
* otherwise non-empty elements will be joined using dotted notation
* to form a fully-qualified prefix. Metrics with names that start
* with the prefix will be removed.
* @return set of metrics names that have been removed.
*/
public Set<String> clearMetrics(String registry, String... metricPath) {
PrefixFilter filter;
if (metricPath == null || metricPath.length == 0) {
filter = new PrefixFilter("");
} else {
String prefix = MetricRegistry.name("", metricPath);
filter = new PrefixFilter(prefix);
}
registry(registry).removeMatching(filter);
return filter.getMatched();
}
/**
* Retrieve matching metrics and their names.
*
* @param registry registry name.
* @param metricFilter filter (null is equivalent to {@link MetricFilter#ALL}).
* @return map of matching names and metrics
*/
public Map<String, Metric> getMetrics(String registry, MetricFilter metricFilter) {
if (metricFilter == null || metricFilter == MetricFilter.ALL) {
return registry(registry).getMetrics();
}
return registry(registry).getMetrics().entrySet().stream()
.filter(entry -> metricFilter.matches(entry.getKey(), entry.getValue()))
.collect(Collectors.toMap(entry -> entry.getKey(), entry -> entry.getValue()));
}
/**
* Create or get an existing named {@link Meter}
*
* @param registry registry name
* @param metricName metric name, either final name or a fully-qualified name
* using dotted notation
* @param metricPath (optional) additional top-most metric name path elements
* @return existing or a newly created {@link Meter}
*/
public Meter meter(SolrMetricsContext context, String registry, String metricName, String... metricPath) {
final String name = mkName(metricName, metricPath);
if (context != null) {
context.registerMetricName(name);
}
return registry(registry).meter(name, meterSupplier);
}
/**
* Create or get an existing named {@link Timer}
*
* @param registry registry name
* @param metricName metric name, either final name or a fully-qualified name
* using dotted notation
* @param metricPath (optional) additional top-most metric name path elements
* @return existing or a newly created {@link Timer}
*/
public Timer timer(SolrMetricsContext context, String registry, String metricName, String... metricPath) {
final String name = mkName(metricName, metricPath);
if (context != null) {
context.registerMetricName(name);
}
return registry(registry).timer(name, timerSupplier);
}
/**
* Create or get an existing named {@link Counter}
*
* @param registry registry name
* @param metricName metric name, either final name or a fully-qualified name
* using dotted notation
* @param metricPath (optional) additional top-most metric name path elements
* @return existing or a newly created {@link Counter}
*/
public Counter counter(SolrMetricsContext context, String registry, String metricName, String... metricPath) {
final String name = mkName(metricName, metricPath);
if (context != null) {
context.registerMetricName(name);
}
return registry(registry).counter(name, counterSupplier);
}
/**
* Create or get an existing named {@link Histogram}
*
* @param registry registry name
* @param metricName metric name, either final name or a fully-qualified name
* using dotted notation
* @param metricPath (optional) additional top-most metric name path elements
* @return existing or a newly created {@link Histogram}
*/
public Histogram histogram(SolrMetricsContext context, String registry, String metricName, String... metricPath) {
final String name = mkName(metricName, metricPath);
if (context != null) {
context.registerMetricName(name);
}
return registry(registry).histogram(name, histogramSupplier);
}
/**
* @deprecated use {@link #registerMetric(SolrMetricsContext, String, Metric, ResolutionStrategy, String, String...)}
*/
@Deprecated
public void registerMetric(SolrMetricsContext context, String registry, Metric metric, boolean force, String metricName, String... metricPath) {
registerMetric(context, registry, metric, force ? ResolutionStrategy.REPLACE : ResolutionStrategy.IGNORE, metricName, metricPath);
}
/**
* Register an instance of {@link Metric}.
*
* @param registry registry name
* @param metric metric instance
* @param strategy the conflict resolution strategy to use if the named metric already exists.
* @param metricName metric name, either final name or a fully-qualified name
* using dotted notation
* @param metricPath (optional) additional top-most metric name path elements
*/
public void registerMetric(SolrMetricsContext context, String registry, Metric metric, ResolutionStrategy strategy, String metricName, String... metricPath) {
MetricRegistry metricRegistry = registry(registry);
String fullName = mkName(metricName, metricPath);
if (context != null) {
context.registerMetricName(fullName);
}
synchronized (metricRegistry) { // prevent race; register() throws if metric is already present
if (strategy == ResolutionStrategy.REPLACE) { // must remove any existing one if present
metricRegistry.remove(fullName);
} else if (strategy == ResolutionStrategy.IGNORE && metricRegistry.getMetrics().containsKey(fullName)) {
return;
} // strategy == ERROR will fail when we try to register
metricRegistry.register(fullName, metric);
}
}
/**
* This is a wrapper for {@link Gauge} metrics, which are usually implemented as
* lambdas that often keep a reference to their parent instance. In order to make sure that
* all such metrics are removed when their parent instance is removed / closed the
* metric is associated with an instance tag, which can be used then to remove
* wrappers with the matching tag using {@link #unregisterGauges(String, String)}.
*/
public static class GaugeWrapper<T> implements Gauge<T> {
private final Gauge<T> gauge;
private final String tag;
public GaugeWrapper(Gauge<T> gauge, String tag) {
this.gauge = gauge;
this.tag = tag;
}
@Override
public T getValue() {
return gauge.getValue();
}
public String getTag() {
return tag;
}
public Gauge<T> getGauge() {
return gauge;
}
}
/**
* @deprecated use {@link #registerGauge(SolrMetricsContext, String, Gauge, String, ResolutionStrategy, String, String...)}
*/
@Deprecated
public void registerGauge(SolrMetricsContext context, String registry, Gauge<?> gauge, String tag, boolean force, String metricName, String... metricPath) {
registerGauge(context, registry, gauge, tag, force ? ResolutionStrategy.REPLACE : ResolutionStrategy.ERROR, metricName, metricPath);
}
public <T> void registerGauge(SolrMetricsContext context, String registry, Gauge<T> gauge, String tag, ResolutionStrategy strategy, String metricName, String... metricPath) {
registerMetric(context, registry, new GaugeWrapper<>(gauge, tag), strategy, metricName, metricPath);
}
public int unregisterGauges(String registryName, String tagSegment) {
if (tagSegment == null) {
return 0;
}
MetricRegistry registry = registry(registryName);
if (registry == null) return 0;
AtomicInteger removed = new AtomicInteger();
registry.removeMatching((name, metric) -> {
if (metric instanceof GaugeWrapper) {
@SuppressWarnings({"rawtypes"})
GaugeWrapper wrapper = (GaugeWrapper) metric;
boolean toRemove = wrapper.getTag().contains(tagSegment);
if (toRemove) {
removed.incrementAndGet();
}
return toRemove;
}
return false;
});
return removed.get();
}
/**
* This method creates a hierarchical name with arbitrary levels of hierarchy
*
* @param name the final segment of the name, must not be null or empty.
* @param path optional path segments, starting from the top level. Empty or null
* segments will be skipped.
* @return fully-qualified name using dotted notation, with all valid hierarchy
* segments prepended to the name.
*/
public static String mkName(String name, String... path) {
return makeName(path == null || path.length == 0 ? Collections.emptyList() : Arrays.asList(path),
name);
}
public static String makeName(List<String> path, String name) {
if (name == null || name.isEmpty()) {
throw new IllegalArgumentException("name must not be empty");
}
if (path == null || path.size() == 0) {
return name;
} else {
StringBuilder sb = new StringBuilder();
for (String s : path) {
if (s == null || s.isEmpty()) {
continue;
}
if (sb.length() > 0) {
sb.append('.');
}
sb.append(s);
}
if (sb.length() > 0) {
sb.append('.');
}
sb.append(name);
return sb.toString();
}
}
/**
* Enforces the leading {@link #REGISTRY_NAME_PREFIX} in a name.
*
* @param name input name, possibly without the prefix
* @return original name if it contained the prefix, or the
* input name with the prefix prepended.
*/
public static String enforcePrefix(String name) {
if (name.startsWith(REGISTRY_NAME_PREFIX)) {
return name;
} else {
return new StringBuilder(REGISTRY_NAME_PREFIX).append(name).toString();
}
}
/**
* Helper method to construct a properly prefixed registry name based on the group.
*
* @param group reporting group
* @param names optional child elements of the registry name. If exactly one element is provided
* and it already contains the required prefix and group name then this value will be used,
* and the group parameter will be ignored.
* @return fully-qualified and prefixed registry name, with overrides applied.
*/
public static String getRegistryName(SolrInfoBean.Group group, String... names) {
String fullName;
String prefix = new StringBuilder(REGISTRY_NAME_PREFIX).append(group.name()).append('.').toString();
// check for existing prefix and group
if (names != null && names.length > 0 && names[0] != null && names[0].startsWith(prefix)) {
// assume the first segment already was expanded
if (names.length > 1) {
String[] newNames = new String[names.length - 1];
System.arraycopy(names, 1, newNames, 0, newNames.length);
fullName = MetricRegistry.name(names[0], newNames);
} else {
fullName = MetricRegistry.name(names[0]);
}
} else {
fullName = MetricRegistry.name(group.toString(), names);
}
return enforcePrefix(fullName);
}
// reporter management
/**
* Create and register {@link SolrMetricReporter}-s specific to a {@link org.apache.solr.core.SolrInfoBean.Group}.
* Note: reporters that specify neither "group" nor "registry" attributes are treated as universal -
* they will always be loaded for any group. These two attributes may also contain multiple comma- or
* whitespace-separated values, in which case the reporter will be loaded for any matching value from
* the list. If both attributes are present then only "group" attribute will be processed.
*
* @param pluginInfos plugin configurations
* @param loader resource loader
* @param coreContainer core container
* @param solrCore optional solr core
* @param tag optional tag for the reporters, to distinguish reporters logically created for different parent
* component instances.
* @param group selected group, not null
* @param registryNames optional child registry name elements
*/
public void loadReporters(PluginInfo[] pluginInfos, SolrResourceLoader loader, CoreContainer coreContainer, SolrCore solrCore, String tag, SolrInfoBean.Group group, String... registryNames) {
if (pluginInfos == null || pluginInfos.length == 0) {
return;
}
String registryName = getRegistryName(group, registryNames);
for (PluginInfo info : pluginInfos) {
String target = info.attributes.get("group");
if (target == null) { // no "group"
target = info.attributes.get("registry");
if (target != null) {
String[] targets = target.split("[\\s,]+");
boolean found = false;
for (String t : targets) {
t = enforcePrefix(t);
if (registryName.equals(t)) {
found = true;
break;
}
}
if (!found) {
continue;
}
} else {
// neither group nor registry specified.
// always register this plugin for all groups and registries
}
} else { // check groups
String[] targets = target.split("[\\s,]+");
boolean found = false;
for (String t : targets) {
if (group.toString().equals(t)) {
found = true;
break;
}
}
if (!found) {
continue;
}
}
try {
loadReporter(registryName, loader, coreContainer, solrCore, info, tag);
} catch (Exception e) {
log.warn("Error loading metrics reporter, plugin info: {}", info, e);
}
}
}
/**
* Convenience wrapper for {@link SolrMetricManager#loadReporter(String, SolrResourceLoader, CoreContainer, SolrCore, PluginInfo, String)}
* passing {@link SolrCore#getResourceLoader()} and {@link SolrCore#getCoreContainer()} as the extra parameters.
*/
public void loadReporter(String registry, SolrCore solrCore, PluginInfo pluginInfo, String tag) throws Exception {
loadReporter(registry,
solrCore.getResourceLoader(),
solrCore.getCoreContainer(),
solrCore,
pluginInfo,
tag);
}
/**
* Convenience wrapper for {@link SolrMetricManager#loadReporter(String, SolrResourceLoader, CoreContainer, SolrCore, PluginInfo, String)}
* passing {@link CoreContainer#getResourceLoader()} and null solrCore and tag.
*/
public void loadReporter(String registry, CoreContainer coreContainer, PluginInfo pluginInfo) throws Exception {
loadReporter(registry,
coreContainer.getResourceLoader(),
coreContainer,
null,
pluginInfo,
null);
}
/**
* Create and register an instance of {@link SolrMetricReporter}.
*
* @param registry reporter is associated with this registry
* @param loader loader to use when creating an instance of the reporter
* @param coreContainer core container
* @param solrCore optional solr core
* @param pluginInfo plugin configuration. Plugin "name" and "class" attributes are required.
* @param tag optional tag for the reporter, to distinguish reporters logically created for different parent
* component instances.
* @throws Exception if any argument is missing or invalid
*/
@SuppressWarnings({"rawtypes"})
public void loadReporter(String registry, SolrResourceLoader loader, CoreContainer coreContainer, SolrCore solrCore, PluginInfo pluginInfo, String tag) throws Exception {
if (registry == null || pluginInfo == null || pluginInfo.name == null || pluginInfo.className == null) {
throw new IllegalArgumentException("loadReporter called with missing arguments: " +
"registry=" + registry + ", loader=" + loader + ", pluginInfo=" + pluginInfo);
}
// make sure we use a name with prefix
registry = enforcePrefix(registry);
SolrMetricReporter reporter = loader.newInstance(
pluginInfo.className,
SolrMetricReporter.class,
new String[0],
new Class[]{SolrMetricManager.class, String.class},
new Object[]{this, registry}
);
// prepare MDC for plugins that want to use its properties
MDCLoggingContext.setCoreDescriptor(coreContainer, solrCore == null ? null : solrCore.getCoreDescriptor());
if (tag != null) {
// add instance tag to MDC
MDC.put("tag", "t:" + tag);
}
try {
if (reporter instanceof SolrCoreReporter) {
((SolrCoreReporter) reporter).init(pluginInfo, solrCore);
} else if (reporter instanceof SolrCoreContainerReporter) {
((SolrCoreContainerReporter) reporter).init(pluginInfo, coreContainer);
} else {
reporter.init(pluginInfo);
}
} catch (IllegalStateException e) {
throw new IllegalArgumentException("reporter init failed: " + pluginInfo, e);
} finally {
MDCLoggingContext.clear();
MDC.remove("tag");
}
registerReporter(registry, pluginInfo.name, tag, reporter);
}
private void registerReporter(String registry, String name, String tag, SolrMetricReporter reporter) throws Exception {
try {
if (!reportersLock.tryLock(10, TimeUnit.SECONDS)) {
throw new Exception("Could not obtain lock to modify reporters registry: " + registry);
}
} catch (InterruptedException e) {
throw new Exception("Interrupted while trying to obtain lock to modify reporters registry: " + registry);
}
try {
Map<String, SolrMetricReporter> perRegistry = reporters.get(registry);
if (perRegistry == null) {
perRegistry = new HashMap<>();
reporters.put(registry, perRegistry);
}
if (tag != null && !tag.isEmpty()) {
name = name + "@" + tag;
}
SolrMetricReporter oldReporter = perRegistry.get(name);
if (oldReporter != null) { // close it
log.info("Replacing existing reporter '{}' in registry'{}': {}", name, registry, oldReporter);
oldReporter.close();
}
perRegistry.put(name, reporter);
} finally {
reportersLock.unlock();
}
}
/**
* Close and unregister a named {@link SolrMetricReporter} for a registry.
*
* @param registry registry name
* @param name reporter name
* @param tag optional tag for the reporter, to distinguish reporters logically created for different parent
* component instances.
* @return true if a named reporter existed and was closed.
*/
public boolean closeReporter(String registry, String name, String tag) {
// make sure we use a name with prefix
registry = enforcePrefix(registry);
try {
if (!reportersLock.tryLock(10, TimeUnit.SECONDS)) {
log.warn("Could not obtain lock to modify reporters registry: {}", registry);
return false;
}
} catch (InterruptedException e) {
log.warn("Interrupted while trying to obtain lock to modify reporters registry: {}", registry);
return false;
}
try {
Map<String, SolrMetricReporter> perRegistry = reporters.get(registry);
if (perRegistry == null) {
return false;
}
if (tag != null && !tag.isEmpty()) {
name = name + "@" + tag;
}
SolrMetricReporter reporter = perRegistry.remove(name);
if (reporter == null) {
return false;
}
try {
reporter.close();
} catch (Exception e) {
log.warn("Error closing metric reporter, registry={}, name={}", registry, name, e);
}
return true;
} finally {
reportersLock.unlock();
}
}
/**
* Close and unregister all {@link SolrMetricReporter}-s for a registry.
*
* @param registry registry name
* @return names of closed reporters
*/
public Set<String> closeReporters(String registry) {
return closeReporters(registry, null);
}
/**
* Close and unregister all {@link SolrMetricReporter}-s for a registry.
*
* @param registry registry name
* @param tag optional tag for the reporter, to distinguish reporters logically created for different parent
* component instances.
* @return names of closed reporters
*/
public Set<String> closeReporters(String registry, String tag) {
// make sure we use a name with prefix
registry = enforcePrefix(registry);
try {
if (!reportersLock.tryLock(10, TimeUnit.SECONDS)) {
log.warn("Could not obtain lock to modify reporters registry: {}", registry);
return Collections.emptySet();
}
} catch (InterruptedException e) {
log.warn("Interrupted while trying to obtain lock to modify reporters registry: {}", registry);
return Collections.emptySet();
}
log.info("Closing metric reporters for registry={} tag={}", registry, tag);
try {
Map<String, SolrMetricReporter> perRegistry = reporters.get(registry);
if (perRegistry != null) {
Set<String> names = new HashSet<>(perRegistry.keySet());
Set<String> removed = new HashSet<>();
names.forEach(name -> {
if (tag != null && !tag.isEmpty() && !name.endsWith("@" + tag)) {
return;
}
SolrMetricReporter reporter = perRegistry.remove(name);
try {
reporter.close();
} catch (IOException ioe) {
log.warn("Exception closing reporter {}", reporter, ioe);
}
removed.add(name);
});
if (removed.size() == names.size()) {
reporters.remove(registry);
}
return removed;
} else {
return Collections.emptySet();
}
} finally {
reportersLock.unlock();
}
}
/**
* Get a map of reporters for a registry. Keys are reporter names, values are reporter instances.
*
* @param registry registry name
* @return map of reporters and their names, may be empty but never null
*/
public Map<String, SolrMetricReporter> getReporters(String registry) {
// make sure we use a name with prefix
registry = enforcePrefix(registry);
try {
if (!reportersLock.tryLock(10, TimeUnit.SECONDS)) {
log.warn("Could not obtain lock to modify reporters registry: {}", registry);
return Collections.emptyMap();
}
} catch (InterruptedException e) {
log.warn("Interrupted while trying to obtain lock to modify reporters registry: {}", registry);
return Collections.emptyMap();
}
try {
Map<String, SolrMetricReporter> perRegistry = reporters.get(registry);
if (perRegistry == null) {
return Collections.emptyMap();
} else {
// defensive copy - the original map may change after we release the lock
return Collections.unmodifiableMap(new HashMap<>(perRegistry));
}
} finally {
reportersLock.unlock();
}
}
private List<PluginInfo> prepareCloudPlugins(PluginInfo[] pluginInfos, String group,
Map<String, String> defaultAttributes,
Map<String, Object> defaultInitArgs) {
List<PluginInfo> result = new ArrayList<>();
if (pluginInfos == null) {
pluginInfos = new PluginInfo[0];
}
for (PluginInfo info : pluginInfos) {
String groupAttr = info.attributes.get("group");
if (!group.equals(groupAttr)) {
continue;
}
info = preparePlugin(info, defaultAttributes, defaultInitArgs);
if (info != null) {
result.add(info);
}
}
return result;
}
@SuppressWarnings({"unchecked", "rawtypes"})
private PluginInfo preparePlugin(PluginInfo info, Map<String, String> defaultAttributes,
Map<String, Object> defaultInitArgs) {
if (info == null) {
return null;
}
String classNameAttr = info.attributes.get("class");
Map<String, String> attrs = new HashMap<>(info.attributes);
defaultAttributes.forEach((k, v) -> {
if (!attrs.containsKey(k)) {
attrs.put(k, v);
}
});
attrs.put("class", classNameAttr);
Map<String, Object> initArgs = new HashMap<>();
if (info.initArgs != null) {
initArgs.putAll(info.initArgs.asMap(10));
}
defaultInitArgs.forEach((k, v) -> {
if (!initArgs.containsKey(k)) {
initArgs.put(k, v);
}
});
return new PluginInfo(info.type, attrs, new NamedList(initArgs), null);
}
public void loadShardReporters(PluginInfo[] pluginInfos, SolrCore core) {
// don't load for non-cloud cores
if (core.getCoreDescriptor().getCloudDescriptor() == null) {
return;
}
// prepare default plugin if none present in the config
Map<String, String> attrs = new HashMap<>();
attrs.put("name", "shardDefault");
attrs.put("group", SolrInfoBean.Group.shard.toString());
Map<String, Object> initArgs = new HashMap<>();
initArgs.put("period", DEFAULT_CLOUD_REPORTER_PERIOD);
String registryName = core.getCoreMetricManager().getRegistryName();
// collect infos and normalize
List<PluginInfo> infos = prepareCloudPlugins(pluginInfos, SolrInfoBean.Group.shard.toString(),
attrs, initArgs);
for (PluginInfo info : infos) {
try {
loadReporter(registryName, core, info, core.getMetricTag());
} catch (Exception e) {
log.warn("Could not load shard reporter, pluginInfo={}", info, e);
}
}
}
public void loadClusterReporters(PluginInfo[] pluginInfos, CoreContainer cc) {
// don't load for non-cloud instances
if (!cc.isZooKeeperAware()) {
return;
}
Map<String, String> attrs = new HashMap<>();
attrs.put("name", "clusterDefault");
attrs.put("group", SolrInfoBean.Group.cluster.toString());
Map<String, Object> initArgs = new HashMap<>();
initArgs.put("period", DEFAULT_CLOUD_REPORTER_PERIOD);
List<PluginInfo> infos = prepareCloudPlugins(pluginInfos, SolrInfoBean.Group.cluster.toString(),
attrs, initArgs);
String registryName = getRegistryName(SolrInfoBean.Group.cluster);
for (PluginInfo info : infos) {
try {
loadReporter(registryName, cc, info);
} catch (Exception e) {
log.warn("Could not load cluster reporter, pluginInfo={}", info, e);
}
}
}
}
| 1 | 37,713 | *NULL_DEREFERENCE:* object `null` is dereferenced by call to `meterSupplier(...)` at line 122. | apache-lucene-solr | java |
@@ -60,12 +60,7 @@ function _command(server, ns, cmd, options, callback) {
finalCmd.$clusterTime = clusterTime;
}
- if (
- isSharded(server) &&
- !shouldUseOpMsg &&
- readPreference &&
- readPreference.preference !== 'primary'
- ) {
+ if (isSharded(server) && !shouldUseOpMsg && readPreference && readPreference.mode !== 'primary') {
finalCmd = {
$query: finalCmd,
$readPreference: readPreference.toJSON() | 1 | 'use strict';
const { Query, Msg } = require('../commands');
const { getReadPreference, isSharded } = require('./shared');
const { isTransactionCommand } = require('../../transactions');
const { applySession } = require('../../sessions');
const { maxWireVersion, databaseNamespace } = require('../../utils');
const { MongoError, MongoNetworkError } = require('../../error');
function isClientEncryptionEnabled(server) {
const wireVersion = maxWireVersion(server);
return wireVersion && server.autoEncrypter;
}
function command(server, ns, cmd, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
if (cmd == null) {
return callback(new MongoError(`command ${JSON.stringify(cmd)} does not return a cursor`));
}
if (!isClientEncryptionEnabled(server)) {
_command(server, ns, cmd, options, callback);
return;
}
const wireVersion = maxWireVersion(server);
if (typeof wireVersion !== 'number' || wireVersion < 8) {
callback(new MongoError('Auto-encryption requires a minimum MongoDB version of 4.2'));
return;
}
_cryptCommand(server, ns, cmd, options, callback);
}
function _command(server, ns, cmd, options, callback) {
const pool = server.s.pool;
const readPreference = getReadPreference(cmd, options);
const shouldUseOpMsg = supportsOpMsg(server);
const session = options.session;
let clusterTime = server.clusterTime;
let finalCmd = Object.assign({}, cmd);
if (hasSessionSupport(server) && session) {
if (
session.clusterTime &&
session.clusterTime.clusterTime.greaterThan(clusterTime.clusterTime)
) {
clusterTime = session.clusterTime;
}
const err = applySession(session, finalCmd, options);
if (err) {
return callback(err);
}
}
// if we have a known cluster time, gossip it
if (clusterTime) {
finalCmd.$clusterTime = clusterTime;
}
if (
isSharded(server) &&
!shouldUseOpMsg &&
readPreference &&
readPreference.preference !== 'primary'
) {
finalCmd = {
$query: finalCmd,
$readPreference: readPreference.toJSON()
};
}
const commandOptions = Object.assign(
{
command: true,
numberToSkip: 0,
numberToReturn: -1,
checkKeys: false
},
options
);
// This value is not overridable
commandOptions.slaveOk = readPreference.slaveOk();
const cmdNs = `${databaseNamespace(ns)}.$cmd`;
const message = shouldUseOpMsg
? new Msg(cmdNs, finalCmd, commandOptions)
: new Query(cmdNs, finalCmd, commandOptions);
const inTransaction = session && (session.inTransaction() || isTransactionCommand(finalCmd));
const commandResponseHandler = inTransaction
? function(err) {
// We need to add a TransientTransactionError errorLabel, as stated in the transaction spec.
if (
err &&
err instanceof MongoNetworkError &&
!err.hasErrorLabel('TransientTransactionError')
) {
err.addErrorLabel('TransientTransactionError');
}
if (
!cmd.commitTransaction &&
err &&
err instanceof MongoError &&
err.hasErrorLabel('TransientTransactionError')
) {
session.transaction.unpinServer();
}
return callback.apply(null, arguments);
}
: callback;
try {
pool.write(message, commandOptions, commandResponseHandler);
} catch (err) {
commandResponseHandler(err);
}
}
function hasSessionSupport(topology) {
if (topology == null) return false;
if (topology.description) {
return topology.description.maxWireVersion >= 6;
}
return topology.ismaster == null ? false : topology.ismaster.maxWireVersion >= 6;
}
function supportsOpMsg(topologyOrServer) {
const description = topologyOrServer.ismaster
? topologyOrServer.ismaster
: topologyOrServer.description;
if (description == null) {
return false;
}
return description.maxWireVersion >= 6 && description.__nodejs_mock_server__ == null;
}
function _cryptCommand(server, ns, cmd, options, callback) {
const autoEncrypter = server.autoEncrypter;
function commandResponseHandler(err, response) {
if (err || response == null) {
callback(err, response);
return;
}
autoEncrypter.decrypt(response.result, options, (err, decrypted) => {
if (err) {
callback(err, null);
return;
}
response.result = decrypted;
response.message.documents = [decrypted];
callback(null, response);
});
}
autoEncrypter.encrypt(ns, cmd, options, (err, encrypted) => {
if (err) {
callback(err, null);
return;
}
_command(server, ns, encrypted, options, commandResponseHandler);
});
}
module.exports = command;
| 1 | 17,720 | switch over to .mode | mongodb-node-mongodb-native | js |
@@ -125,7 +125,7 @@ describe('HiddenColumns', () => {
});
it('should return correct visual indexes when columns sequence is non-contiguous ' +
- '(force desync between physical and visual indexes)', () => {
+ '(force desync between physical and visual indexes)', () => {
const hot = handsontable({
data: Handsontable.helper.createSpreadsheetData(1, 10),
colHeaders: true, | 1 | describe('HiddenColumns', () => {
const id = 'testContainer';
beforeEach(function() {
this.$container = $(`<div id="${id}"></div>`).appendTo('body');
});
afterEach(function() {
if (this.$container) {
destroy();
this.$container.remove();
}
});
describe('public API', () => {
describe('hideColumn()', () => {
it('should hide column by passing the visual column index', () => {
handsontable({
data: Handsontable.helper.createSpreadsheetData(1, 3),
hiddenColumns: true,
});
expect(getCell(0, 1).innerText).toBe('B1');
getPlugin('hiddenColumns').hideColumn(1);
render();
expect(getCell(0, 1)).toBe(null);
});
});
describe('showColumn()', () => {
it('should show column by passing the visual column index', () => {
handsontable({
data: Handsontable.helper.createSpreadsheetData(1, 3),
hiddenColumns: {
columns: [1],
},
});
expect(getCell(0, 1)).toBe(null);
getPlugin('hiddenColumns').showColumn(1);
render();
expect(getCell(0, 1).innerText).toBe('B1');
});
});
describe('showColumns', () => {
it('should update the table width, when calling `showColumns` after running `hideColumns` beforehand', () => {
handsontable({
data: Handsontable.helper.createSpreadsheetData(5, 7),
colHeaders: true,
rowHeaders: true,
hiddenColumns: {
columns: [],
},
});
const initialHiderWidth = $(hot().view.wt.wtTable.hider).width();
getPlugin('hiddenColumns').hideColumns([2, 3, 4, 5]);
render();
getPlugin('hiddenColumns').showColumns([2, 3, 4, 5]);
render();
expect($(hot().view.wt.wtTable.hider).width()).toEqual(initialHiderWidth);
});
});
describe('isHidden()', () => {
it('should return `true` for hidden column', () => {
handsontable({
data: Handsontable.helper.createSpreadsheetData(3, 3),
hiddenColumns: {
columns: [1],
},
});
const plugin = getPlugin('hiddenColumns');
expect(plugin.isHidden(0)).toBe(false);
expect(plugin.isHidden(1)).toBe(true);
expect(plugin.isHidden(2)).toBe(false);
getPlugin('hiddenColumns').showColumn(1);
render();
expect(plugin.isHidden(0)).toBe(false);
expect(plugin.isHidden(1)).toBe(false);
expect(plugin.isHidden(2)).toBe(false);
getPlugin('hiddenColumns').hideColumn(2);
render();
expect(plugin.isHidden(0)).toBe(false);
expect(plugin.isHidden(1)).toBe(false);
expect(plugin.isHidden(2)).toBe(true);
});
});
describe('getHiddenColumns()', () => {
it('should return collection of hidden visual column indexes', () => {
handsontable({
data: Handsontable.helper.createSpreadsheetData(3, 3),
hiddenColumns: {
columns: [1],
},
});
const plugin = getPlugin('hiddenColumns');
expect(plugin.getHiddenColumns()).toEqual([1]);
getPlugin('hiddenColumns').showColumn(1);
render();
expect(plugin.getHiddenColumns()).toEqual([]);
getPlugin('hiddenColumns').hideColumns([0, 2]);
render();
expect(plugin.getHiddenColumns()).toEqual([0, 2]);
});
it('should return correct visual indexes when columns sequence is non-contiguous ' +
'(force desync between physical and visual indexes)', () => {
const hot = handsontable({
data: Handsontable.helper.createSpreadsheetData(1, 10),
colHeaders: true,
hiddenColumns: {
columns: [1],
indicators: true,
},
});
hot.columnIndexMapper.setIndexesSequence([0, 9, 1, 2, 3, 4, 5, 6, 7, 8]);
const plugin = getPlugin('hiddenColumns');
expect(plugin.getHiddenColumns()).toEqual([2]);
getPlugin('hiddenColumns').showColumn(2);
render();
expect(plugin.getHiddenColumns()).toEqual([]);
getPlugin('hiddenColumns').hideColumns([3, 6]);
render();
expect(plugin.getHiddenColumns()).toEqual([3, 6]);
});
});
describe('isValidConfig()', () => {
it('should return `false` for columns passed as not a number', () => {
handsontable({
data: Handsontable.helper.createSpreadsheetData(3, 3),
hiddenColumns: true,
});
const plugin = getPlugin('hiddenColumns');
expect(plugin.isValidConfig()).toBe(false);
expect(plugin.isValidConfig(null)).toBe(false);
expect(plugin.isValidConfig(void 0)).toBe(false);
expect(plugin.isValidConfig(1)).toBe(false);
expect(plugin.isValidConfig({ index: 1 })).toBe(false);
expect(plugin.isValidConfig([])).toBe(false);
expect(plugin.isValidConfig([[]])).toBe(false);
expect(plugin.isValidConfig([null])).toBe(false);
expect(plugin.isValidConfig([void 0])).toBe(false);
expect(plugin.isValidConfig(['1'])).toBe(false);
expect(plugin.isValidConfig([{ index: 1 }])).toBe(false);
});
it('should return `true` for columns, which are within the range of the table size', () => {
handsontable({
data: Handsontable.helper.createSpreadsheetData(3, 3),
hiddenColumns: true,
});
const plugin = getPlugin('hiddenColumns');
expect(plugin.isValidConfig([0])).toBe(true);
expect(plugin.isValidConfig([1, 2])).toBe(true);
expect(plugin.isValidConfig([0, 1, 2])).toBe(true);
expect(plugin.isValidConfig([-1])).toBe(false);
expect(plugin.isValidConfig([-1, 0])).toBe(false);
expect(plugin.isValidConfig([0, 1, 2, 3])).toBe(false);
expect(plugin.isValidConfig([3])).toBe(false);
});
});
describe('clear()', () => {
it('should clear the data from hidden column when hidden column is second last one', () => {
const col = 2;
const hot = handsontable({
data: Handsontable.helper.createSpreadsheetData(2, col),
hiddenColumns: {
columns: [col - 1], // hide penultimate column
}
});
hot.clear();
const emptyData = hot.getData();
const empyDataComparision = [[null, null], [null, null]];
expect(emptyData).toEqual(empyDataComparision);
});
});
});
});
| 1 | 17,330 | I guess your IDE did some auto-fixing here | handsontable-handsontable | js |
@@ -32,6 +32,7 @@ import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.DisjunctionMaxQuery;
import org.apache.lucene.search.FuzzyQuery;
+import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.TermQuery;
public class TestRamUsageEstimator extends LuceneTestCase { | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.util;
import static org.apache.lucene.util.RamUsageEstimator.*;
import static org.apache.lucene.util.RamUsageTester.sizeOf;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Random;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.DisjunctionMaxQuery;
import org.apache.lucene.search.FuzzyQuery;
import org.apache.lucene.search.TermQuery;
public class TestRamUsageEstimator extends LuceneTestCase {
static final String[] strings = new String[] {
"test string",
"hollow",
"catchmaster"
};
public void testSanity() {
assertTrue(sizeOf("test string") > shallowSizeOfInstance(String.class));
Holder holder = new Holder();
holder.holder = new Holder("string2", 5000L);
assertTrue(sizeOf(holder) > shallowSizeOfInstance(Holder.class));
assertTrue(sizeOf(holder) > sizeOf(holder.holder));
assertTrue(
shallowSizeOfInstance(HolderSubclass.class) >= shallowSizeOfInstance(Holder.class));
assertTrue(
shallowSizeOfInstance(Holder.class) == shallowSizeOfInstance(HolderSubclass2.class));
assertTrue(sizeOf(strings) > shallowSizeOf(strings));
}
public void testStaticOverloads() {
Random rnd = random();
{
byte[] array = new byte[rnd.nextInt(1024)];
assertEquals(sizeOf(array), sizeOf((Object) array));
}
{
boolean[] array = new boolean[rnd.nextInt(1024)];
assertEquals(sizeOf(array), sizeOf((Object) array));
}
{
char[] array = new char[rnd.nextInt(1024)];
assertEquals(sizeOf(array), sizeOf((Object) array));
}
{
short[] array = new short[rnd.nextInt(1024)];
assertEquals(sizeOf(array), sizeOf((Object) array));
}
{
int[] array = new int[rnd.nextInt(1024)];
assertEquals(sizeOf(array), sizeOf((Object) array));
}
{
float[] array = new float[rnd.nextInt(1024)];
assertEquals(sizeOf(array), sizeOf((Object) array));
}
{
long[] array = new long[rnd.nextInt(1024)];
assertEquals(sizeOf(array), sizeOf((Object) array));
}
{
double[] array = new double[rnd.nextInt(1024)];
assertEquals(sizeOf(array), sizeOf((Object) array));
}
}
public void testStrings() {
long actual = sizeOf(strings);
long estimated = RamUsageEstimator.sizeOf(strings);
assertEquals(actual, estimated);
}
public void testBytesRefHash() {
BytesRefHash bytes = new BytesRefHash();
for (int i = 0; i < 100; i++) {
bytes.add(new BytesRef("foo bar " + i));
bytes.add(new BytesRef("baz bam " + i));
}
long actual = sizeOf(bytes);
long estimated = RamUsageEstimator.sizeOf(bytes);
assertEquals((double)actual, (double)estimated, (double)actual * 0.1);
}
//@AwaitsFix(bugUrl="https://issues.apache.org/jira/browse/LUCENE-8898")
public void testMap() {
Map<String, Object> map = new HashMap<>();
map.put("primitive", 1234L);
map.put("string", "string");
for (int i = 0; i < 100; i++) {
map.put("complex " + i, new Term("foo " + i, "bar " + i));
}
double errorFactor = COMPRESSED_REFS_ENABLED ? 0.2 : 0.3;
long actual = sizeOf(map);
long estimated = RamUsageEstimator.sizeOfObject(map);
assertEquals((double)actual, (double)estimated, (double)actual * errorFactor);
// test recursion
map.put("self", map);
actual = sizeOf(map);
estimated = RamUsageEstimator.sizeOfObject(map);
assertEquals((double)actual, (double)estimated, (double)actual * errorFactor);
}
public void testCollection() {
List<Object> list = new ArrayList<>();
list.add(1234L);
list.add("string");
for (int i = 0; i < 100; i++) {
list.add(new Term("foo " + i, "term " + i));
}
long actual = sizeOf(list);
long estimated = RamUsageEstimator.sizeOfObject(list);
assertEquals((double)actual, (double)estimated, (double)actual * 0.1);
// test recursion
list.add(list);
actual = sizeOf(list);
estimated = RamUsageEstimator.sizeOfObject(list);
assertEquals((double)actual, (double)estimated, (double)actual * 0.1);
}
public void testQuery() {
DisjunctionMaxQuery dismax = new DisjunctionMaxQuery(
Arrays.asList(new TermQuery(new Term("foo1", "bar1")), new TermQuery(new Term("baz1", "bam1"))), 1.0f);
BooleanQuery bq = new BooleanQuery.Builder()
.add(new TermQuery(new Term("foo2", "bar2")), BooleanClause.Occur.SHOULD)
.add(new FuzzyQuery(new Term("foo3", "baz3")), BooleanClause.Occur.MUST_NOT)
.add(dismax, BooleanClause.Occur.MUST)
.build();
long actual = sizeOf(bq);
long estimated = RamUsageEstimator.sizeOfObject(bq);
// sizeOfObject uses much lower default size estimate than we normally use
// but the query-specific default is so large that the comparison becomes meaningless.
assertEquals((double)actual, (double)estimated, (double)actual * 0.5);
}
public void testReferenceSize() {
assertTrue(NUM_BYTES_OBJECT_REF == 4 || NUM_BYTES_OBJECT_REF == 8);
if (Constants.JRE_IS_64BIT) {
assertEquals("For 64 bit JVMs, reference size must be 8, unless compressed references are enabled",
COMPRESSED_REFS_ENABLED ? 4 : 8, NUM_BYTES_OBJECT_REF);
} else {
assertEquals("For 32bit JVMs, reference size must always be 4", 4, NUM_BYTES_OBJECT_REF);
assertFalse("For 32bit JVMs, compressed references can never be enabled", COMPRESSED_REFS_ENABLED);
}
}
public void testHotspotBean() {
assumeTrue("testHotspotBean only works on 64bit JVMs.", Constants.JRE_IS_64BIT);
try {
Class.forName(MANAGEMENT_FACTORY_CLASS);
} catch (ClassNotFoundException e) {
assumeNoException("testHotspotBean does not work on Java 8+ compact profile.", e);
}
try {
Class.forName(HOTSPOT_BEAN_CLASS);
} catch (ClassNotFoundException e) {
assumeNoException("testHotspotBean only works on Hotspot (OpenJDK, Oracle) virtual machines.", e);
}
assertTrue("We should have been able to detect Hotspot's internal settings from the management bean.", JVM_IS_HOTSPOT_64BIT);
}
/** Helper to print out current settings for debugging {@code -Dtests.verbose=true} */
public void testPrintValues() {
assumeTrue("Specify -Dtests.verbose=true to print constants of RamUsageEstimator.", VERBOSE);
System.out.println("JVM_IS_HOTSPOT_64BIT = " + JVM_IS_HOTSPOT_64BIT);
System.out.println("COMPRESSED_REFS_ENABLED = " + COMPRESSED_REFS_ENABLED);
System.out.println("NUM_BYTES_OBJECT_ALIGNMENT = " + NUM_BYTES_OBJECT_ALIGNMENT);
System.out.println("NUM_BYTES_OBJECT_REF = " + NUM_BYTES_OBJECT_REF);
System.out.println("NUM_BYTES_OBJECT_HEADER = " + NUM_BYTES_OBJECT_HEADER);
System.out.println("NUM_BYTES_ARRAY_HEADER = " + NUM_BYTES_ARRAY_HEADER);
System.out.println("LONG_SIZE = " + LONG_SIZE);
System.out.println("LONG_CACHE_MIN_VALUE = " + LONG_CACHE_MIN_VALUE);
System.out.println("LONG_CACHE_MAX_VALUE = " + LONG_CACHE_MAX_VALUE);
}
@SuppressWarnings("unused")
private static class Holder {
long field1 = 5000L;
String name = "name";
Holder holder;
long field2, field3, field4;
Holder() {}
Holder(String name, long field1) {
this.name = name;
this.field1 = field1;
}
}
@SuppressWarnings("unused")
private static class HolderSubclass extends Holder {
byte foo;
int bar;
}
private static class HolderSubclass2 extends Holder {
// empty, only inherits all fields -> size should be identical to superclass
}
}
| 1 | 31,342 | This is an unused import failing recommit still. | apache-lucene-solr | java |
@@ -133,8 +133,12 @@ module Beaker
#move to the host
logger.debug "Using scp to transfer #{source_path} to #{target_path}"
scp_to host, source_path, target_module_dir, {:ignore => ignore_list}
+
#rename to the selected module name, if not correct
cur_path = File.join(target_module_dir, source_name)
+ if host.is_powershell? #make sure our slashes are correct
+ cur_path = cur_path.gsub(/\//,'\\')
+ end
host.mv cur_path, target_path unless cur_path == target_path
when 'rsync'
logger.debug "Using rsync to transfer #{source_path} to #{target_path}" | 1 | module Beaker
module DSL
module InstallUtils
#
# This module contains methods to help install puppet modules
#
# To mix this is into a class you need the following:
# * a method *hosts* that yields any hosts implementing
# {Beaker::Host}'s interface to act upon.
# * a method *options* that provides an options hash, see {Beaker::Options::OptionsHash}
# * the module {Beaker::DSL::Roles} that provides access to the various hosts implementing
# {Beaker::Host}'s interface to act upon
# * the module {Beaker::DSL::Wrappers} the provides convenience methods for {Beaker::DSL::Command} creation
module ModuleUtils
# The directories in the module directory that will not be scp-ed to the test system when using
# `copy_module_to`
PUPPET_MODULE_INSTALL_IGNORE = ['.bundle', '.git', '.idea', '.vagrant', '.vendor', 'vendor', 'acceptance',
'bundle', 'spec', 'tests', 'log']
# Install the desired module on all hosts using either the PMT or a
# staging forge
#
# @see install_dev_puppet_module
def install_dev_puppet_module_on( host, opts )
if options[:forge_host]
with_forge_stubbed_on( host ) do
install_puppet_module_via_pmt_on( host, opts )
end
else
copy_module_to( host, opts )
end
end
alias :puppet_module_install_on :install_dev_puppet_module_on
# Install the desired module on all hosts using either the PMT or a
# staging forge
#
# Passes options through to either `install_puppet_module_via_pmt_on`
# or `copy_module_to`
#
# @param opts [Hash]
#
# @example Installing a module from the local directory
# install_dev_puppet_module( :source => './', :module_name => 'concat' )
#
# @example Installing a module from a staging forge
# options[:forge_host] = 'my-forge-api.example.com'
# install_dev_puppet_module( :source => './', :module_name => 'concat' )
#
# @see install_puppet_module_via_pmt
# @see copy_module_to
def install_dev_puppet_module( opts )
block_on( hosts ) {|h| install_dev_puppet_module_on( h, opts ) }
end
alias :puppet_module_install :install_dev_puppet_module
# Install the desired module with the PMT on a given host
#
# @param opts [Hash]
# @option opts [String] :module_name The short name of the module to be installed
# @option opts [String] :version The version of the module to be installed
def install_puppet_module_via_pmt_on( host, opts = {} )
block_on host do |h|
version_info = opts[:version] ? "-v #{opts[:version]}" : ""
if opts[:source]
author_name, module_name = parse_for_modulename( opts[:source] )
modname = "#{author_name}-#{module_name}"
else
modname = opts[:module_name]
end
puppet_opts = {}
if host[:default_module_install_opts].respond_to? :merge
puppet_opts = host[:default_module_install_opts].merge( puppet_opts )
end
on h, puppet("module install #{modname} #{version_info}", puppet_opts)
end
end
# Install the desired module with the PMT on all known hosts
# @see #install_puppet_module_via_pmt_on
def install_puppet_module_via_pmt( opts = {} )
install_puppet_module_via_pmt_on(hosts, opts)
end
# Install local module for acceptance testing
# should be used as a presuite to ensure local module is copied to the hosts you want, particularly masters
# @param [Host, Array<Host>, String, Symbol] one_or_more_hosts
# One or more hosts to act upon,
# or a role (String or Symbol) that identifies one or more hosts.
# @option opts [String] :source ('./')
# The current directory where the module sits, otherwise will try
# and walk the tree to figure out
# @option opts [String] :module_name (nil)
# Name which the module should be installed under, please do not include author,
# if none is provided it will attempt to parse the metadata.json and then the Module file to determine
# the name of the module
# @option opts [String] :target_module_path (host['distmoduledir']/modules)
# Location where the module should be installed, will default
# to host['distmoduledir']/modules
# @option opts [Array] :ignore_list
# @option opts [String] :protocol
# Name of the underlying transfer method. Valid options are 'scp' or 'rsync'.
# @raise [ArgumentError] if not host is provided or module_name is not provided and can not be found in Modulefile
#
def copy_module_to(one_or_more_hosts, opts = {})
block_on one_or_more_hosts do |host|
opts = {:source => './',
:target_module_path => host['distmoduledir'],
:ignore_list => PUPPET_MODULE_INSTALL_IGNORE}.merge(opts)
ignore_list = build_ignore_list(opts)
target_module_dir = on( host, "echo #{opts[:target_module_path]}" ).stdout.chomp
source_path = File.expand_path( opts[:source] )
source_dir = File.dirname(source_path)
source_name = File.basename(source_path)
if opts.has_key?(:module_name)
module_name = opts[:module_name]
else
_, module_name = parse_for_modulename( source_path )
end
target_path = File.join(target_module_dir, module_name)
if host.is_powershell? #make sure our slashes are correct
target_path = target_path.gsub(/\//,'\\')
end
opts[:protocol] ||= 'scp'
case opts[:protocol]
when 'scp'
#move to the host
logger.debug "Using scp to transfer #{source_path} to #{target_path}"
scp_to host, source_path, target_module_dir, {:ignore => ignore_list}
#rename to the selected module name, if not correct
cur_path = File.join(target_module_dir, source_name)
host.mv cur_path, target_path unless cur_path == target_path
when 'rsync'
logger.debug "Using rsync to transfer #{source_path} to #{target_path}"
rsync_to host, source_path, target_path, {:ignore => ignore_list}
else
logger.debug "Unsupported transfer protocol, returning nil"
nil
end
end
end
alias :copy_root_module_to :copy_module_to
#Recursive method for finding the module root
# Assumes that a Modulefile exists
# @param [String] possible_module_directory
# will look for Modulefile and if none found go up one level and try again until root is reached
#
# @return [String,nil]
def parse_for_moduleroot(possible_module_directory)
if File.exists?("#{possible_module_directory}/Modulefile") || File.exists?("#{possible_module_directory}/metadata.json")
possible_module_directory
elsif possible_module_directory === '/'
logger.error "At root, can't parse for another directory"
nil
else
logger.debug "No Modulefile or metadata.json found at #{possible_module_directory}, moving up"
parse_for_moduleroot File.expand_path(File.join(possible_module_directory,'..'))
end
end
#Parse root directory of a module for module name
# Searches for metadata.json and then if none found, Modulefile and parses for the Name attribute
# @param [String] root_module_dir
# @return [String] module name
def parse_for_modulename(root_module_dir)
author_name, module_name = nil, nil
if File.exists?("#{root_module_dir}/metadata.json")
logger.debug "Attempting to parse Modulename from metadata.json"
module_json = JSON.parse(File.read "#{root_module_dir}/metadata.json")
if(module_json.has_key?('name'))
author_name, module_name = get_module_name(module_json['name'])
end
end
if !module_name && File.exists?("#{root_module_dir}/Modulefile")
logger.debug "Attempting to parse Modulename from Modulefile"
if /^name\s+'?(\w+-\w+)'?\s*$/i.match(File.read("#{root_module_dir}/Modulefile"))
author_name, module_name = get_module_name(Regexp.last_match[1])
end
end
if !module_name && !author_name
logger.debug "Unable to determine name, returning null"
end
return author_name, module_name
end
#Parse modulename from the pattern 'Auther-ModuleName'
#
# @param [String] author_module_name <Author>-<ModuleName> pattern
#
# @return [String,nil]
#
def get_module_name(author_module_name)
split_name = split_author_modulename(author_module_name)
if split_name
return split_name[:author], split_name[:module]
end
end
#Split the Author-Name into a hash
# @param [String] author_module_attr
#
# @return [Hash<Symbol,String>,nil] :author and :module symbols will be returned
#
def split_author_modulename(author_module_attr)
result = /(\w+)-(\w+)/.match(author_module_attr)
if result
{:author => result[1], :module => result[2]}
else
nil
end
end
# Build an array list of files/directories to ignore when pushing to remote host
# Automatically adds '..' and '.' to array. If not opts of :ignore list is provided
# it will use the static variable PUPPET_MODULE_INSTALL_IGNORE
#
# @param opts [Hash]
# @option opts [Array] :ignore_list A list of files/directories to ignore
def build_ignore_list(opts = {})
ignore_list = opts[:ignore_list] || PUPPET_MODULE_INSTALL_IGNORE
if !ignore_list.kind_of?(Array) || ignore_list.nil?
raise ArgumentError "Ignore list must be an Array"
end
ignore_list << '.' unless ignore_list.include? '.'
ignore_list << '..' unless ignore_list.include? '..'
ignore_list
end
end
end
end
end
| 1 | 11,605 | I'm concerned that we're conflating multiple things here. There are 2 things that Beaker should really care about: - Network transport - i.e. `ssh` vs `winrm` - Interpreter - i.e. `bash`, `cmd`, `powershell`, etc The problem is that @cowofevil is running Bitvise SSH, and he assumed we should be setting `is_cygwin: false` in node definitions. But that doesn't really accurately convey what we care about, and since Beaker doesn't appear to track network transport apart from interpreter, we end up in a strange state. `is_cygwin: false` appears to end up setting `is_powershell?` to `true`, which IMHO is not accurate or appropriate. Bitvise templates use `ssh` (like Cygwin), but the interpreter used is `cmd`, not `powershell`. | voxpupuli-beaker | rb |
@@ -52,6 +52,6 @@
<%= tinymce :content_css => asset_path('application.css') %>
<!-- alert for the default template-->
-<div id="edit_guidance_alert_dialog" class="modal" style="display:none">
+<div id="edit_guidance_alert_dialog" class="modal" role="dialog" aria-label="<%=_("Missing Fields Alert")%>" style="display:none">
<ul id="missing_fields_edit_guidance"></ul>
</div> | 1 | <% javascript 'views/guidances/admin_edit.js' %>
<h1>
<%= _('Guidance') %>
</h1>
<div class="content">
<%= form_for(@guidance, url: admin_update_guidance_path(@guidance), html: { method: :put , id: 'edit_guidance_form', class: 'roadmap-form bordered'}) do |f| %>
<fieldset class="side-by-side">
<div class="form-input">
<%= f.label _('Text'), for: @guidance.text %>
<%= text_area_tag("guidance-text", @guidance.text, class: "tinymce") %>
<div class="inline">
<a href="#" data-toggle="popover" rel: "popover" data-html: "true"
data-content="<%= _('Enter your guidance here. You can include links where needed.') %>">
<span class="fa fa-question-circle"></span></a>
</div>
</div>
<div class="form-input">
<%= f.label _('Themes'), for: :theme_ids %>
<%= f.collection_select(:theme_ids, @themes, :id, :title,
{prompt: false, include_blank: 'None'}, {multiple: true})%>
<div class="inline">
<a href="#" data-toggle="popover" rel: "popover" data-html: "true"
data-content="<%= _('Select which theme(s) this guidance relates to.') %>">
<span class="fa fa-question-circle"></span></a>
</div>
</div>
<div class="form-input">
<%= f.label :published %>
<%= f.check_box :published , as: :check_boxes%>
</div>
<div class="form-input">
<%= f.label _('Guidance group'), for: :guidance_group_id %>
<%= f.collection_select(:guidance_group_id, @guidance_groups,
:id, :name, {prompt: false, include_blank: 'None'}, {multiple: false})%>
<div class="inline">
<a href="#" data-toggle="popover" rel: "popover" data-html: "true"
data-content="<%= _('Select which group this guidance relates to.') %>">
<span class="fa fa-question-circle"></span></a>
</div>
</div>
<br />
<div class="form-input">
<div class="button-spacer"> </div>
<input type="submit" id="edit_guidance_submit" href="#edit_guidance_alert_dialog" class="btn btn-primary" value="<%= _('Save')%>" />
</div>
</fieldset>
<%end%>
<%= tinymce :content_css => asset_path('application.css') %>
<!-- alert for the default template-->
<div id="edit_guidance_alert_dialog" class="modal" style="display:none">
<ul id="missing_fields_edit_guidance"></ul>
</div> | 1 | 16,740 | I think this is ok for now. This ties into the larger issue of the site not having a consistent method for relaying form input errors. Please make sure the focus gets set on the close button when the dialog opens. | DMPRoadmap-roadmap | rb |
@@ -44,9 +44,9 @@ const COLUMN_SIZE_MAP_NAME = 'autoColumnSize';
* autoColumnSize: {syncLimit: '40%'},
* ```
*
- * The plugin uses {@link GhostTable} and {@link SamplesGenerator} for calculations.
- * First, {@link SamplesGenerator} prepares samples of data with its coordinates.
- * Next {@link GhostTable} uses coordinates to get cells' renderers and append all to the DOM through DocumentFragment.
+ * The plugin uses GhostTable and SamplesGenerator for calculations.
+ * First, SamplesGenerator prepares samples of data with its coordinates.
+ * Next GhostTable uses coordinates to get cells' renderers and append all to the DOM through DocumentFragment.
*
* Sampling accepts additional options:
* - *samplingRatio* - Defines how many samples for the same length will be used to calculate. Default is `3`. | 1 | import { BasePlugin } from '../base';
import { arrayEach, arrayFilter, arrayReduce, arrayMap } from '../../helpers/array';
import { cancelAnimationFrame, requestAnimationFrame } from '../../helpers/feature';
import GhostTable from '../../utils/ghostTable';
import Hooks from '../../pluginHooks';
import { isObject, hasOwnProperty } from '../../helpers/object';
import { valueAccordingPercent, rangeEach } from '../../helpers/number';
import SamplesGenerator from '../../utils/samplesGenerator';
import { isPercentValue } from '../../helpers/string';
import { ViewportColumnsCalculator } from '../../3rdparty/walkontable/src';
import { PhysicalIndexToValueMap as IndexToValueMap } from '../../translations';
Hooks.getSingleton().register('modifyAutoColumnSizeSeed');
export const PLUGIN_KEY = 'autoColumnSize';
export const PLUGIN_PRIORITY = 10;
const privatePool = new WeakMap();
const COLUMN_SIZE_MAP_NAME = 'autoColumnSize';
/* eslint-disable jsdoc/require-description-complete-sentence */
/**
* @plugin AutoColumnSize
* @class AutoColumnSize
*
* @description
* This plugin allows to set column widths based on their widest cells.
*
* By default, the plugin is declared as `undefined`, which makes it enabled (same as if it was declared as `true`).
* Enabling this plugin may decrease the overall table performance, as it needs to calculate the widths of all cells to
* resize the columns accordingly.
* If you experience problems with the performance, try turning this feature off and declaring the column widths manually.
*
* Column width calculations are divided into sync and async part. Each of this parts has their own advantages and
* disadvantages. Synchronous calculations are faster but they block the browser UI, while the slower asynchronous
* operations don't block the browser UI.
*
* To configure the sync/async distribution, you can pass an absolute value (number of columns) or a percentage value to a config object:
*
* ```js
* // as a number (300 columns in sync, rest async)
* autoColumnSize: {syncLimit: 300},.
*
* // as a string (percent)
* autoColumnSize: {syncLimit: '40%'},
* ```
*
* The plugin uses {@link GhostTable} and {@link SamplesGenerator} for calculations.
* First, {@link SamplesGenerator} prepares samples of data with its coordinates.
* Next {@link GhostTable} uses coordinates to get cells' renderers and append all to the DOM through DocumentFragment.
*
* Sampling accepts additional options:
* - *samplingRatio* - Defines how many samples for the same length will be used to calculate. Default is `3`.
*
* ```js
* autoColumnSize: {
* samplingRatio: 10,
* }
* ```
*
* - *allowSampleDuplicates* - Defines if duplicated values might be used in sampling. Default is `false`.
*
* ```js
* autoColumnSize: {
* allowSampleDuplicates: true,
* }
* ```
*
* To configure this plugin see {@link options#autocolumnsize Options#autoColumnSize}.
*
* @example
*
* ```js
* const hot = new Handsontable(document.getElementById('example'), {
* data: getData(),
* autoColumnSize: true
* });
* // Access to plugin instance:
* const plugin = hot.getPlugin('autoColumnSize');
*
* plugin.getColumnWidth(4);
*
* if (plugin.isEnabled()) {
* // code...
* }
* ```
*/
/* eslint-enable jsdoc/require-description-complete-sentence */
export class AutoColumnSize extends BasePlugin {
static get PLUGIN_KEY() {
return PLUGIN_KEY;
}
static get PLUGIN_PRIORITY() {
return PLUGIN_PRIORITY;
}
static get CALCULATION_STEP() {
return 50;
}
static get SYNC_CALCULATION_LIMIT() {
return 50;
}
constructor(hotInstance) {
super(hotInstance);
privatePool.set(this, {
/**
* Cached column header names. It is used to diff current column headers with previous state and detect which
* columns width should be updated.
*
* @private
* @type {Array}
*/
cachedColumnHeaders: [],
});
/**
* Instance of {@link GhostTable} for rows and columns size calculations.
*
* @private
* @type {GhostTable}
*/
this.ghostTable = new GhostTable(this.hot);
/**
* Instance of {@link SamplesGenerator} for generating samples necessary for columns width calculations.
*
* @private
* @type {SamplesGenerator}
* @fires Hooks#modifyAutoColumnSizeSeed
*/
this.samplesGenerator = new SamplesGenerator((row, column) => {
const cellMeta = this.hot.getCellMeta(row, column);
let cellValue = '';
if (!cellMeta.spanned) {
cellValue = this.hot.getDataAtCell(row, column);
}
let bundleSeed = '';
if (this.hot.hasHook('modifyAutoColumnSizeSeed')) {
bundleSeed = this.hot.runHooks('modifyAutoColumnSizeSeed', bundleSeed, cellMeta, cellValue);
}
return { value: cellValue, bundleSeed };
});
/**
* `true` only if the first calculation was performed.
*
* @private
* @type {boolean}
*/
this.firstCalculation = true;
/**
* `true` if the size calculation is in progress.
*
* @type {boolean}
*/
this.inProgress = false;
/**
* Number of already measured columns (we already know their sizes).
*
* @type {number}
*/
this.measuredColumns = 0;
/**
* PhysicalIndexToValueMap to keep and track widths for physical column indexes.
*
* @private
* @type {PhysicalIndexToValueMap}
*/
this.columnWidthsMap = new IndexToValueMap();
this.hot.columnIndexMapper.registerMap(COLUMN_SIZE_MAP_NAME, this.columnWidthsMap);
// Leave the listener active to allow auto-sizing the columns when the plugin is disabled.
// This is necesseary for width recalculation for resize handler doubleclick (ManualColumnResize).
this.addHook('beforeColumnResize',
(size, column, isDblClick) => this.onBeforeColumnResize(size, column, isDblClick));
}
/**
* Checks if the plugin is enabled in the handsontable settings. This method is executed in {@link hooks#beforeinit Hooks#beforeInit}
* hook and if it returns `true` than the {@link auto-column-size#enableplugin #enablePlugin} method is called.
*
* @returns {boolean}
*/
isEnabled() {
return this.hot.getSettings()[PLUGIN_KEY] !== false && !this.hot.getSettings().colWidths;
}
/**
* Enables the plugin functionality for this Handsontable instance.
*/
enablePlugin() {
if (this.enabled) {
return;
}
const setting = this.hot.getSettings()[PLUGIN_KEY];
if (setting && setting.useHeaders !== null && setting.useHeaders !== void 0) {
this.ghostTable.setSetting('useHeaders', setting.useHeaders);
}
this.setSamplingOptions();
this.addHook('afterLoadData', () => this.onAfterLoadData());
this.addHook('beforeChange', changes => this.onBeforeChange(changes));
this.addHook('beforeRender', force => this.onBeforeRender(force));
this.addHook('modifyColWidth', (width, col) => this.getColumnWidth(col, width));
this.addHook('afterInit', () => this.onAfterInit());
super.enablePlugin();
}
/**
* Updates the plugin state. This method is executed when {@link core#updatesettings Core#updateSettings} is invoked.
*/
updatePlugin() {
const changedColumns = this.findColumnsWhereHeaderWasChanged();
if (changedColumns.length) {
this.clearCache(changedColumns);
this.calculateVisibleColumnsWidth();
}
super.updatePlugin();
}
/**
* Disables the plugin functionality for this Handsontable instance.
*/
disablePlugin() {
super.disablePlugin();
// Leave the listener active to allow auto-sizing the columns when the plugin is disabled.
// This is necesseary for width recalculation for resize handler doubleclick (ManualColumnResize).
this.addHook('beforeColumnResize',
(size, column, isDblClick) => this.onBeforeColumnResize(size, column, isDblClick));
}
/**
* Calculates visible columns width.
*/
calculateVisibleColumnsWidth() {
const rowsCount = this.hot.countRows();
// Keep last column widths unchanged for situation when all rows was deleted or trimmed (pro #6)
if (!rowsCount) {
return;
}
const force = this.hot.renderCall;
const firstVisibleColumn = this.getFirstVisibleColumn();
const lastVisibleColumn = this.getLastVisibleColumn();
if (firstVisibleColumn === -1 || lastVisibleColumn === -1) {
return;
}
this.calculateColumnsWidth({ from: firstVisibleColumn, to: lastVisibleColumn }, void 0, force);
}
/**
* Calculates a columns width.
*
* @param {number|object} colRange Visual column index or an object with `from` and `to` visual indexes as a range.
* @param {number|object} rowRange Visual row index or an object with `from` and `to` visual indexes as a range.
* @param {boolean} [force=false] If `true` the calculation will be processed regardless of whether the width exists in the cache.
*/
calculateColumnsWidth(colRange = { from: 0, to: this.hot.countCols() - 1 }, rowRange = { from: 0, to: this.hot.countRows() - 1 }, force = false) { // eslint-disable-line max-len
const columnsRange = typeof colRange === 'number' ? { from: colRange, to: colRange } : colRange;
const rowsRange = typeof rowRange === 'number' ? { from: rowRange, to: rowRange } : rowRange;
rangeEach(columnsRange.from, columnsRange.to, (visualColumn) => {
let physicalColumn = this.hot.toPhysicalColumn(visualColumn);
if (physicalColumn === null) {
physicalColumn = visualColumn;
}
if (force || (this.columnWidthsMap.getValueAtIndex(physicalColumn) === null &&
!this.hot._getColWidthFromSettings(physicalColumn))) {
const samples = this.samplesGenerator.generateColumnSamples(visualColumn, rowsRange);
arrayEach(samples, ([column, sample]) => this.ghostTable.addColumn(column, sample));
}
});
if (this.ghostTable.columns.length) {
this.hot.batchExecution(() => {
this.ghostTable.getWidths((visualColumn, width) => {
const physicalColumn = this.hot.toPhysicalColumn(visualColumn);
this.columnWidthsMap.setValueAtIndex(physicalColumn, width);
});
}, true);
this.measuredColumns = columnsRange.to + 1;
this.ghostTable.clean();
}
}
/**
* Calculates all columns width. The calculated column will be cached in the {@link auto-column-size#widths AutoColumnSize#widths} property.
* To retrieve width for specified column use {@link auto-column-size#getcolumnwidth AutoColumnSize#getColumnWidth} method.
*
* @param {object|number} rowRange Row index or an object with `from` and `to` properties which define row range.
*/
calculateAllColumnsWidth(rowRange = { from: 0, to: this.hot.countRows() - 1 }) {
let current = 0;
const length = this.hot.countCols() - 1;
let timer = null;
this.inProgress = true;
const loop = () => {
// When hot was destroyed after calculating finished cancel frame
if (!this.hot) {
cancelAnimationFrame(timer);
this.inProgress = false;
return;
}
this.calculateColumnsWidth({
from: current,
to: Math.min(current + AutoColumnSize.CALCULATION_STEP, length)
}, rowRange);
current = current + AutoColumnSize.CALCULATION_STEP + 1;
if (current < length) {
timer = requestAnimationFrame(loop);
} else {
cancelAnimationFrame(timer);
this.inProgress = false;
// @TODO Should call once per render cycle, currently fired separately in different plugins
this.hot.view.adjustElementsSize();
}
};
const syncLimit = this.getSyncCalculationLimit();
// sync
if (this.firstCalculation && syncLimit >= 0) {
this.calculateColumnsWidth({ from: 0, to: syncLimit }, rowRange);
this.firstCalculation = false;
current = syncLimit + 1;
}
// async
if (current < length) {
loop();
} else {
this.inProgress = false;
}
}
/**
* Sets the sampling options.
*
* @private
*/
setSamplingOptions() {
const setting = this.hot.getSettings()[PLUGIN_KEY];
const samplingRatio = setting && hasOwnProperty(setting, 'samplingRatio') ?
setting.samplingRatio : void 0;
const allowSampleDuplicates = setting && hasOwnProperty(setting, 'allowSampleDuplicates') ?
setting.allowSampleDuplicates : void 0;
if (samplingRatio && !isNaN(samplingRatio)) {
this.samplesGenerator.setSampleCount(parseInt(samplingRatio, 10));
}
if (allowSampleDuplicates) {
this.samplesGenerator.setAllowDuplicates(allowSampleDuplicates);
}
}
/**
* Recalculates all columns width (overwrite cache values).
*/
recalculateAllColumnsWidth() {
if (this.hot.view && this.hot.view.wt.wtTable.isVisible()) {
this.clearCache();
this.calculateAllColumnsWidth();
}
}
/**
* Gets value which tells how many columns should be calculated synchronously (rest of the columns will be calculated
* asynchronously). The limit is calculated based on `syncLimit` set to `autoColumnSize` option (see {@link options#autocolumnsize Options#autoColumnSize}).
*
* @returns {number}
*/
getSyncCalculationLimit() {
const settings = this.hot.getSettings()[PLUGIN_KEY];
/* eslint-disable no-bitwise */
let limit = AutoColumnSize.SYNC_CALCULATION_LIMIT;
const colsLimit = this.hot.countCols() - 1;
if (isObject(settings)) {
limit = settings.syncLimit;
if (isPercentValue(limit)) {
limit = valueAccordingPercent(colsLimit, limit);
} else {
// Force to Number
limit >>= 0;
}
}
return Math.min(limit, colsLimit);
}
/**
* Gets the calculated column width.
*
* @param {number} column Visual column index.
* @param {number} [defaultWidth] Default column width. It will be picked up if no calculated width found.
* @param {boolean} [keepMinimum=true] If `true` then returned value won't be smaller then 50 (default column width).
* @returns {number}
*/
getColumnWidth(column, defaultWidth = void 0, keepMinimum = true) {
let width = defaultWidth;
if (width === void 0) {
width = this.columnWidthsMap.getValueAtIndex(this.hot.toPhysicalColumn(column));
if (keepMinimum && typeof width === 'number') {
width = Math.max(width, ViewportColumnsCalculator.DEFAULT_WIDTH);
}
}
return width;
}
/**
* Gets the first visible column.
*
* @returns {number} Returns visual column index, -1 if table is not rendered or if there are no columns to base the the calculations on.
*/
getFirstVisibleColumn() {
const wot = this.hot.view.wt;
if (wot.wtViewport.columnsVisibleCalculator) {
// Fist fully visible column is stored as renderable index.
const firstFullyVisibleColumn = wot.wtTable.getFirstVisibleColumn();
if (firstFullyVisibleColumn !== -1) {
return this.hot.columnIndexMapper.getVisualFromRenderableIndex(firstFullyVisibleColumn);
}
}
if (wot.wtViewport.columnsRenderCalculator) {
const firstRenderedColumn = wot.wtTable.getFirstRenderedColumn();
// There are no rendered column.
if (firstRenderedColumn !== -1) {
return this.hot.columnIndexMapper.getVisualFromRenderableIndex(firstRenderedColumn);
}
}
return -1;
}
/**
* Gets the last visible column.
*
* @returns {number} Returns visual column index or -1 if table is not rendered.
*/
getLastVisibleColumn() {
const wot = this.hot.view.wt;
if (wot.wtViewport.columnsVisibleCalculator) {
// Last fully visible column is stored as renderable index.
const lastFullyVisibleColumn = wot.wtTable.getLastVisibleColumn();
if (lastFullyVisibleColumn !== -1) {
return this.hot.columnIndexMapper.getVisualFromRenderableIndex(lastFullyVisibleColumn);
}
}
if (wot.wtViewport.columnsRenderCalculator) {
// Last fully visible column is stored as renderable index.
const lastRenderedColumn = wot.wtTable.getLastRenderedColumn();
// There are no rendered columns.
if (lastRenderedColumn !== -1) {
return this.hot.columnIndexMapper.getVisualFromRenderableIndex(lastRenderedColumn);
}
}
return -1;
}
/**
* Collects all columns which titles has been changed in comparison to the previous state.
*
* @private
* @returns {Array} It returns an array of physical column indexes.
*/
findColumnsWhereHeaderWasChanged() {
const columnHeaders = this.hot.getColHeader();
const { cachedColumnHeaders } = privatePool.get(this);
const changedColumns = arrayReduce(columnHeaders, (acc, columnTitle, physicalColumn) => {
const cachedColumnsLength = cachedColumnHeaders.length;
if (cachedColumnsLength - 1 < physicalColumn || cachedColumnHeaders[physicalColumn] !== columnTitle) {
acc.push(physicalColumn);
}
if (cachedColumnsLength - 1 < physicalColumn) {
cachedColumnHeaders.push(columnTitle);
} else {
cachedColumnHeaders[physicalColumn] = columnTitle;
}
return acc;
}, []);
return changedColumns;
}
/**
* Clears cache of calculated column widths. If you want to clear only selected columns pass an array with their indexes.
* Otherwise whole cache will be cleared.
*
* @param {number[]} [columns] List of physical column indexes to clear.
*/
clearCache(columns = []) {
if (columns.length) {
this.hot.batchExecution(() => {
arrayEach(columns, (physicalIndex) => {
this.columnWidthsMap.setValueAtIndex(physicalIndex, null);
});
}, true);
} else {
this.columnWidthsMap.clear();
}
}
/**
* Checks if all widths were calculated. If not then return `true` (need recalculate).
*
* @returns {boolean}
*/
isNeedRecalculate() {
return !!arrayFilter(this.columnWidthsMap.getValues()
.slice(0, this.measuredColumns), item => (item === null)).length;
}
/**
* On before render listener.
*
* @private
*/
onBeforeRender() {
this.calculateVisibleColumnsWidth();
if (this.isNeedRecalculate() && !this.inProgress) {
this.calculateAllColumnsWidth();
}
}
/**
* On after load data listener.
*
* @private
*/
onAfterLoadData() {
if (this.hot.view) {
this.recalculateAllColumnsWidth();
} else {
// first load - initialization
setTimeout(() => {
if (this.hot) {
this.recalculateAllColumnsWidth();
}
}, 0);
}
}
/**
* On before change listener.
*
* @private
* @param {Array} changes An array of modified data.
*/
onBeforeChange(changes) {
const changedColumns = arrayMap(changes, ([, columnProperty]) =>
this.hot.toPhysicalColumn(this.hot.propToCol(columnProperty)));
this.clearCache(Array.from(new Set(changedColumns)));
}
/**
* On before column resize listener.
*
* @private
* @param {number} size Calculated new column width.
* @param {number} column Visual index of the resized column.
* @param {boolean} isDblClick Flag that determines whether there was a double-click.
* @returns {number}
*/
onBeforeColumnResize(size, column, isDblClick) {
let newSize = size;
if (isDblClick) {
this.calculateColumnsWidth(column, void 0, true);
newSize = this.getColumnWidth(column, void 0, false);
}
return newSize;
}
/**
* On after Handsontable init fill plugin with all necessary values.
*
* @private
*/
onAfterInit() {
privatePool.get(this).cachedColumnHeaders = this.hot.getColHeader();
}
/**
* Destroys the plugin instance.
*/
destroy() {
this.ghostTable.clean();
super.destroy();
}
}
| 1 | 19,174 | Should these link be removed? | handsontable-handsontable | js |
@@ -217,11 +217,12 @@ class TabbedBrowser(tabwidget.TabWidget):
for tab in self.widgets():
self._remove_tab(tab)
- def close_tab(self, tab):
+ def close_tab(self, tab, add_undo=True):
"""Close a tab.
Args:
tab: The QWebView to be closed.
+ add_undo: Whether the tab close can be undone.
"""
last_close = config.get('tabs', 'last-close')
count = self.count() | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2016 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""The main tabbed browser widget."""
import functools
import collections
from PyQt5.QtWidgets import QSizePolicy
from PyQt5.QtCore import pyqtSignal, pyqtSlot, QTimer, QUrl
from PyQt5.QtGui import QIcon
from qutebrowser.config import config
from qutebrowser.keyinput import modeman
from qutebrowser.mainwindow import tabwidget
from qutebrowser.browser import signalfilter, browsertab
from qutebrowser.utils import (log, usertypes, utils, qtutils, objreg,
urlutils, message)
UndoEntry = collections.namedtuple('UndoEntry', ['url', 'history', 'index'])
class TabDeletedError(Exception):
"""Exception raised when _tab_index is called for a deleted tab."""
class TabbedBrowser(tabwidget.TabWidget):
"""A TabWidget with QWebViews inside.
Provides methods to manage tabs, convenience methods to interact with the
current tab (cur_*) and filters signals to re-emit them when they occurred
in the currently visible tab.
For all tab-specific signals (cur_*) emitted by a tab, this happens:
- the signal gets filtered with _filter_signals and self.cur_* gets
emitted if the signal occurred in the current tab.
Attributes:
search_text/search_options: Search parameters which are shared between
all tabs.
_win_id: The window ID this tabbedbrowser is associated with.
_filter: A SignalFilter instance.
_now_focused: The tab which is focused now.
_tab_insert_idx_left: Where to insert a new tab with
tabbar -> new-tab-position set to 'left'.
_tab_insert_idx_right: Same as above, for 'right'.
_undo_stack: List of UndoEntry namedtuples of closed tabs.
shutting_down: Whether we're currently shutting down.
_local_marks: Jump markers local to each page
_global_marks: Jump markers used across all pages
default_window_icon: The qutebrowser window icon
Signals:
cur_progress: Progress of the current tab changed (load_progress).
cur_load_started: Current tab started loading (load_started)
cur_load_finished: Current tab finished loading (load_finished)
cur_url_changed: Current URL changed.
cur_link_hovered: Link hovered in current tab (link_hovered)
cur_scroll_perc_changed: Scroll percentage of current tab changed.
arg 1: x-position in %.
arg 2: y-position in %.
cur_load_status_changed: Loading status of current tab changed.
close_window: The last tab was closed, close this window.
resized: Emitted when the browser window has resized, so the completion
widget can adjust its size to it.
arg: The new size.
current_tab_changed: The current tab changed to the emitted tab.
new_tab: Emits the new WebView and its index when a new tab is opened.
"""
cur_progress = pyqtSignal(int)
cur_load_started = pyqtSignal()
cur_load_finished = pyqtSignal(bool)
cur_url_changed = pyqtSignal(QUrl)
cur_link_hovered = pyqtSignal(str)
cur_scroll_perc_changed = pyqtSignal(int, int)
cur_load_status_changed = pyqtSignal(str)
close_window = pyqtSignal()
resized = pyqtSignal('QRect')
current_tab_changed = pyqtSignal(browsertab.AbstractTab)
new_tab = pyqtSignal(browsertab.AbstractTab, int)
def __init__(self, win_id, parent=None):
super().__init__(win_id, parent)
self._win_id = win_id
self._tab_insert_idx_left = 0
self._tab_insert_idx_right = -1
self.shutting_down = False
self.tabCloseRequested.connect(self.on_tab_close_requested)
self.currentChanged.connect(self.on_current_changed)
self.cur_load_started.connect(self.on_cur_load_started)
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self._undo_stack = []
self._filter = signalfilter.SignalFilter(win_id, self)
self._now_focused = None
self.search_text = None
self.search_options = {}
self._local_marks = {}
self._global_marks = {}
self.default_window_icon = self.window().windowIcon()
objreg.get('config').changed.connect(self.update_favicons)
objreg.get('config').changed.connect(self.update_window_title)
objreg.get('config').changed.connect(self.update_tab_titles)
def __repr__(self):
return utils.get_repr(self, count=self.count())
def _tab_index(self, tab):
"""Get the index of a given tab.
Raises TabDeletedError if the tab doesn't exist anymore.
"""
try:
idx = self.indexOf(tab)
except RuntimeError as e:
log.webview.debug("Got invalid tab ({})!".format(e))
raise TabDeletedError(e)
if idx == -1:
log.webview.debug("Got invalid tab (index is -1)!")
raise TabDeletedError("index is -1!")
return idx
def widgets(self):
"""Get a list of open tab widgets.
We don't implement this as generator so we can delete tabs while
iterating over the list.
"""
w = []
for i in range(self.count()):
w.append(self.widget(i))
return w
@config.change_filter('ui', 'window-title-format')
def update_window_title(self):
"""Change the window title to match the current tab."""
idx = self.currentIndex()
if idx == -1:
# (e.g. last tab removed)
log.webview.debug("Not updating window title because index is -1")
return
fields = self.get_tab_fields(idx)
fields['id'] = self._win_id
fmt = config.get('ui', 'window-title-format')
self.window().setWindowTitle(fmt.format(**fields))
def _connect_tab_signals(self, tab):
"""Set up the needed signals for tab."""
# filtered signals
tab.link_hovered.connect(
self._filter.create(self.cur_link_hovered, tab))
tab.load_progress.connect(
self._filter.create(self.cur_progress, tab))
tab.load_finished.connect(
self._filter.create(self.cur_load_finished, tab))
tab.load_started.connect(
self._filter.create(self.cur_load_started, tab))
tab.scroller.perc_changed.connect(
self._filter.create(self.cur_scroll_perc_changed, tab))
tab.scroller.perc_changed.connect(self.on_scroll_pos_changed)
tab.url_changed.connect(
self._filter.create(self.cur_url_changed, tab))
tab.load_status_changed.connect(
self._filter.create(self.cur_load_status_changed, tab))
tab.url_changed.connect(
functools.partial(self.on_url_changed, tab))
# misc
tab.title_changed.connect(
functools.partial(self.on_title_changed, tab))
tab.icon_changed.connect(
functools.partial(self.on_icon_changed, tab))
tab.load_progress.connect(
functools.partial(self.on_load_progress, tab))
tab.load_finished.connect(
functools.partial(self.on_load_finished, tab))
tab.load_started.connect(
functools.partial(self.on_load_started, tab))
tab.window_close_requested.connect(
functools.partial(self.on_window_close_requested, tab))
tab.new_tab_requested.connect(self.tabopen)
tab.add_history_item.connect(objreg.get('web-history').add_from_tab)
def current_url(self):
"""Get the URL of the current tab.
Intended to be used from command handlers.
Return:
The current URL as QUrl.
"""
idx = self.currentIndex()
return super().tab_url(idx)
def shutdown(self):
"""Try to shut down all tabs cleanly."""
self.shutting_down = True
for tab in self.widgets():
self._remove_tab(tab)
def close_tab(self, tab):
"""Close a tab.
Args:
tab: The QWebView to be closed.
"""
last_close = config.get('tabs', 'last-close')
count = self.count()
if last_close == 'ignore' and count == 1:
return
self._remove_tab(tab)
if count == 1: # We just closed the last tab above.
if last_close == 'close':
self.close_window.emit()
elif last_close == 'blank':
self.openurl(QUrl('about:blank'), newtab=True)
elif last_close == 'startpage':
url = QUrl(config.get('general', 'startpage')[0])
self.openurl(url, newtab=True)
elif last_close == 'default-page':
url = config.get('general', 'default-page')
self.openurl(url, newtab=True)
def _remove_tab(self, tab):
"""Remove a tab from the tab list and delete it properly.
Args:
tab: The QWebView to be closed.
"""
idx = self.indexOf(tab)
if idx == -1:
raise TabDeletedError("tab {} is not contained in "
"TabbedWidget!".format(tab))
if tab is self._now_focused:
self._now_focused = None
if tab is objreg.get('last-focused-tab', None, scope='window',
window=self._win_id):
objreg.delete('last-focused-tab', scope='window',
window=self._win_id)
if tab.url().isValid():
history_data = tab.history.serialize()
entry = UndoEntry(tab.url(), history_data, idx)
self._undo_stack.append(entry)
elif tab.url().isEmpty():
# There are some good reasons why a URL could be empty
# (target="_blank" with a download, see [1]), so we silently ignore
# this.
# [1] https://github.com/The-Compiler/qutebrowser/issues/163
pass
else:
# We display a warnings for URLs which are not empty but invalid -
# but we don't return here because we want the tab to close either
# way.
urlutils.invalid_url_error(self._win_id, tab.url(), "saving tab")
tab.shutdown()
self.removeTab(idx)
tab.deleteLater()
def undo(self):
"""Undo removing of a tab."""
# Remove unused tab which may be created after the last tab is closed
last_close = config.get('tabs', 'last-close')
use_current_tab = False
if last_close in ['blank', 'startpage', 'default-page']:
only_one_tab_open = self.count() == 1
no_history = len(self.widget(0).history) == 1
urls = {
'blank': QUrl('about:blank'),
'startpage': QUrl(config.get('general', 'startpage')[0]),
'default-page': config.get('general', 'default-page'),
}
first_tab_url = self.widget(0).url()
last_close_urlstr = urls[last_close].toString().rstrip('/')
first_tab_urlstr = first_tab_url.toString().rstrip('/')
last_close_url_used = first_tab_urlstr == last_close_urlstr
use_current_tab = (only_one_tab_open and no_history and
last_close_url_used)
url, history_data, idx = self._undo_stack.pop()
if use_current_tab:
self.openurl(url, newtab=False)
newtab = self.widget(0)
else:
newtab = self.tabopen(url, background=False, idx=idx)
newtab.history.deserialize(history_data)
@pyqtSlot('QUrl', bool)
def openurl(self, url, newtab):
"""Open a URL, used as a slot.
Args:
url: The URL to open as QUrl.
newtab: True to open URL in a new tab, False otherwise.
"""
qtutils.ensure_valid(url)
if newtab or self.currentWidget() is None:
self.tabopen(url, background=False)
else:
self.currentWidget().openurl(url)
@pyqtSlot(int)
def on_tab_close_requested(self, idx):
"""Close a tab via an index."""
tab = self.widget(idx)
if tab is None:
log.webview.debug("Got invalid tab {} for index {}!".format(
tab, idx))
return
self.close_tab(tab)
@pyqtSlot(browsertab.AbstractTab)
def on_window_close_requested(self, widget):
"""Close a tab with a widget given."""
try:
self.close_tab(widget)
except TabDeletedError:
log.webview.debug("Requested to close {!r} which does not "
"exist!".format(widget))
@pyqtSlot('QUrl')
@pyqtSlot('QUrl', bool)
def tabopen(self, url=None, background=None, explicit=False, idx=None):
"""Open a new tab with a given URL.
Inner logic for open-tab and open-tab-bg.
Also connect all the signals we need to _filter_signals.
Args:
url: The URL to open as QUrl or None for an empty tab.
background: Whether to open the tab in the background.
if None, the background-tabs setting decides.
explicit: Whether the tab was opened explicitly.
If this is set, the new position might be different. With
the default settings we handle it like Chromium does:
- Tabs from clicked links etc. are to the right of
the current.
- Explicitly opened tabs are at the very right.
idx: The index where the new tab should be opened.
Return:
The opened WebView instance.
"""
if url is not None:
qtutils.ensure_valid(url)
log.webview.debug("Creating new tab with URL {}".format(url))
if config.get('tabs', 'tabs-are-windows') and self.count() > 0:
from qutebrowser.mainwindow import mainwindow
window = mainwindow.MainWindow()
window.show()
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=window.win_id)
return tabbed_browser.tabopen(url, background, explicit)
tab = browsertab.create(win_id=self._win_id, parent=self)
self._connect_tab_signals(tab)
if idx is None:
idx = self._get_new_tab_idx(explicit)
self.insertTab(idx, tab, "")
if url is not None:
tab.openurl(url)
if background is None:
background = config.get('tabs', 'background-tabs')
if background:
self.tab_index_changed.emit(self.currentIndex(), self.count())
else:
self.setCurrentWidget(tab)
tab.show()
self.new_tab.emit(tab, idx)
return tab
def _get_new_tab_idx(self, explicit):
"""Get the index of a tab to insert.
Args:
explicit: Whether the tab was opened explicitly.
Return:
The index of the new tab.
"""
if explicit:
pos = config.get('tabs', 'new-tab-position-explicit')
else:
pos = config.get('tabs', 'new-tab-position')
if pos == 'left':
idx = self._tab_insert_idx_left
# On first sight, we'd think we have to decrement
# self._tab_insert_idx_left here, as we want the next tab to be
# *before* the one we just opened. However, since we opened a tab
# *to the left* of the currently focused tab, indices will shift by
# 1 automatically.
elif pos == 'right':
idx = self._tab_insert_idx_right
self._tab_insert_idx_right += 1
elif pos == 'first':
idx = 0
elif pos == 'last':
idx = -1
else:
raise ValueError("Invalid new-tab-position '{}'.".format(pos))
log.webview.debug("new-tab-position {} -> opening new tab at {}, "
"next left: {} / right: {}".format(
pos, idx, self._tab_insert_idx_left,
self._tab_insert_idx_right))
return idx
@config.change_filter('tabs', 'show-favicons')
def update_favicons(self):
"""Update favicons when config was changed."""
show = config.get('tabs', 'show-favicons')
tabs_are_wins = config.get('tabs', 'tabs-are-windows')
for i, tab in enumerate(self.widgets()):
if show:
self.setTabIcon(i, tab.icon())
if tabs_are_wins:
self.window().setWindowIcon(tab.icon())
else:
self.setTabIcon(i, QIcon())
if tabs_are_wins:
self.window().setWindowIcon(self.default_window_icon)
@pyqtSlot()
def on_load_started(self, tab):
"""Clear icon and update title when a tab started loading.
Args:
tab: The tab where the signal belongs to.
"""
try:
idx = self._tab_index(tab)
except TabDeletedError:
# We can get signals for tabs we already deleted...
return
self.update_tab_title(idx)
if tab.data.keep_icon:
tab.data.keep_icon = False
else:
self.setTabIcon(idx, QIcon())
if (config.get('tabs', 'tabs-are-windows') and
config.get('tabs', 'show-favicons')):
self.window().setWindowIcon(self.default_window_icon)
if idx == self.currentIndex():
self.update_window_title()
@pyqtSlot()
def on_cur_load_started(self):
"""Leave insert/hint mode when loading started."""
modeman.maybe_leave(self._win_id, usertypes.KeyMode.insert,
'load started')
modeman.maybe_leave(self._win_id, usertypes.KeyMode.hint,
'load started')
@pyqtSlot(browsertab.AbstractTab, str)
def on_title_changed(self, tab, text):
"""Set the title of a tab.
Slot for the title_changed signal of any tab.
Args:
tab: The WebView where the title was changed.
text: The text to set.
"""
if not text:
log.webview.debug("Ignoring title change to '{}'.".format(text))
return
try:
idx = self._tab_index(tab)
except TabDeletedError:
# We can get signals for tabs we already deleted...
return
log.webview.debug("Changing title for idx {} to '{}'".format(
idx, text))
self.set_page_title(idx, text)
if idx == self.currentIndex():
self.update_window_title()
@pyqtSlot(browsertab.AbstractTab, QUrl)
def on_url_changed(self, tab, url):
"""Set the new URL as title if there's no title yet.
Args:
tab: The WebView where the title was changed.
url: The new URL.
"""
try:
idx = self._tab_index(tab)
except TabDeletedError:
# We can get signals for tabs we already deleted...
return
if not self.page_title(idx):
self.set_page_title(idx, url.toDisplayString())
@pyqtSlot(browsertab.AbstractTab, QIcon)
def on_icon_changed(self, tab, icon):
"""Set the icon of a tab.
Slot for the iconChanged signal of any tab.
Args:
tab: The WebView where the title was changed.
icon: The new icon
"""
if not config.get('tabs', 'show-favicons'):
return
try:
idx = self._tab_index(tab)
except TabDeletedError:
# We can get signals for tabs we already deleted...
return
self.setTabIcon(idx, icon)
if config.get('tabs', 'tabs-are-windows'):
self.window().setWindowIcon(icon)
@pyqtSlot(usertypes.KeyMode)
def on_mode_left(self, mode):
"""Give focus to current tab if command mode was left."""
if mode in [usertypes.KeyMode.command, usertypes.KeyMode.prompt,
usertypes.KeyMode.yesno]:
widget = self.currentWidget()
log.modes.debug("Left status-input mode, focusing {!r}".format(
widget))
if widget is None:
return
widget.setFocus()
@pyqtSlot(int)
def on_current_changed(self, idx):
"""Set last-focused-tab and leave hinting mode when focus changed."""
if idx == -1 or self.shutting_down:
# closing the last tab (before quitting) or shutting down
return
tab = self.widget(idx)
log.modes.debug("Current tab changed, focusing {!r}".format(tab))
tab.setFocus()
for mode in [usertypes.KeyMode.hint, usertypes.KeyMode.insert,
usertypes.KeyMode.caret, usertypes.KeyMode.passthrough]:
modeman.maybe_leave(self._win_id, mode, 'tab changed')
if self._now_focused is not None:
objreg.register('last-focused-tab', self._now_focused, update=True,
scope='window', window=self._win_id)
self._now_focused = tab
self.current_tab_changed.emit(tab)
QTimer.singleShot(0, self.update_window_title)
self._tab_insert_idx_left = self.currentIndex()
self._tab_insert_idx_right = self.currentIndex() + 1
@pyqtSlot()
def on_cmd_return_pressed(self):
"""Set focus when the commandline closes."""
log.modes.debug("Commandline closed, focusing {!r}".format(self))
def on_load_progress(self, tab, perc):
"""Adjust tab indicator on load progress."""
try:
idx = self._tab_index(tab)
except TabDeletedError:
# We can get signals for tabs we already deleted...
return
start = config.get('colors', 'tabs.indicator.start')
stop = config.get('colors', 'tabs.indicator.stop')
system = config.get('colors', 'tabs.indicator.system')
color = utils.interpolate_color(start, stop, perc, system)
self.set_tab_indicator_color(idx, color)
self.update_tab_title(idx)
if idx == self.currentIndex():
self.update_window_title()
def on_load_finished(self, tab, ok):
"""Adjust tab indicator when loading finished."""
try:
idx = self._tab_index(tab)
except TabDeletedError:
# We can get signals for tabs we already deleted...
return
if ok:
start = config.get('colors', 'tabs.indicator.start')
stop = config.get('colors', 'tabs.indicator.stop')
system = config.get('colors', 'tabs.indicator.system')
color = utils.interpolate_color(start, stop, 100, system)
else:
color = config.get('colors', 'tabs.indicator.error')
self.set_tab_indicator_color(idx, color)
self.update_tab_title(idx)
if idx == self.currentIndex():
self.update_window_title()
@pyqtSlot()
def on_scroll_pos_changed(self):
"""Update tab and window title when scroll position changed."""
self.update_window_title()
self.update_tab_title(self.currentIndex())
def resizeEvent(self, e):
"""Extend resizeEvent of QWidget to emit a resized signal afterwards.
Args:
e: The QResizeEvent
"""
super().resizeEvent(e)
self.resized.emit(self.geometry())
def wheelEvent(self, e):
"""Override wheelEvent of QWidget to forward it to the focused tab.
Args:
e: The QWheelEvent
"""
if self._now_focused is not None:
self._now_focused.wheelEvent(e)
else:
e.ignore()
def set_mark(self, key):
"""Set a mark at the current scroll position in the current tab.
Args:
key: mark identifier; capital indicates a global mark
"""
# strip the fragment as it may interfere with scrolling
try:
url = self.current_url().adjusted(QUrl.RemoveFragment)
except qtutils.QtValueError:
# show an error only if the mark is not automatically set
if key != "'":
message.error(self._win_id, "Failed to set mark: url invalid")
return
point = self.currentWidget().scroller.pos_px()
if key.isupper():
self._global_marks[key] = point, url
else:
if url not in self._local_marks:
self._local_marks[url] = {}
self._local_marks[url][key] = point
def jump_mark(self, key):
"""Jump to the mark named by `key`.
Args:
key: mark identifier; capital indicates a global mark
"""
try:
# consider urls that differ only in fragment to be identical
urlkey = self.current_url().adjusted(QUrl.RemoveFragment)
except qtutils.QtValueError:
urlkey = None
tab = self.currentWidget()
if key.isupper():
if key in self._global_marks:
point, url = self._global_marks[key]
def callback(ok):
if ok:
self.cur_load_finished.disconnect(callback)
tab.scroller.to_point(point)
self.openurl(url, newtab=False)
self.cur_load_finished.connect(callback)
else:
message.error(self._win_id, "Mark {} is not set".format(key))
elif urlkey is None:
message.error(self._win_id, "Current URL is invalid!")
elif urlkey in self._local_marks and key in self._local_marks[urlkey]:
point = self._local_marks[urlkey][key]
# save the pre-jump position in the special ' mark
# this has to happen after we read the mark, otherwise jump_mark
# "'" would just jump to the current position every time
self.set_mark("'")
tab.scroller.to_point(point)
else:
message.error(self._win_id, "Mark {} is not set".format(key))
| 1 | 15,910 | Please make this a keyword-only argument by adding a `*` argument before `add_undo`. | qutebrowser-qutebrowser | py |
@@ -201,6 +201,13 @@ var eventStreamWriterTestTmpl = template.Must(
{{- end }}
}
+ var marshalers request.HandlerList
+ marshalers.PushBackNamed({{ $.API.ProtocolPackage }}.BuildHandler)
+ payloadMarshaler := protocol.HandlerPayloadMarshal{
+ Marshalers: marshalers,
+ }
+ _ = payloadMarshaler
+
eventMsgs := []eventstream.Message{
{{- range $idx, $event := $.InputStream.Events }}
{{- template "set event message" Map "idx" $idx "parentShape" $event.Shape "eventName" $event.Name }} | 1 | // +build codegen
package api
import (
"text/template"
)
var eventStreamWriterTestTmpl = template.Must(
template.New("eventStreamWriterTestTmpl").Funcs(template.FuncMap{
"ValueForType": valueForType,
"HasNonBlobPayloadMembers": eventHasNonBlobPayloadMembers,
"EventHeaderValueForType": setEventHeaderValueForType,
"Map": templateMap,
"OptionalAddInt": func(do bool, a, b int) int {
if !do {
return a
}
return a + b
},
"HasNonEventStreamMember": func(s *Shape) bool {
for _, ref := range s.MemberRefs {
if !ref.Shape.IsEventStream {
return true
}
}
return false
},
}).Parse(`
{{ range $opName, $op := $.Operations }}
{{ if $op.EventStreamAPI }}
{{ if $op.EventStreamAPI.InputStream }}
{{ template "event stream inputStream tests" $op.EventStreamAPI }}
{{ end }}
{{ end }}
{{ end }}
{{ define "event stream inputStream tests" }}
func Test{{ $.Operation.ExportedName }}_Write(t *testing.T) {
clientEvents, expectedClientEvents := mock{{ $.Operation.ExportedName }}WriteEvents()
sess, cleanupFn, err := eventstreamtest.SetupEventStreamSession(t,
&eventstreamtest.ServeEventStream{
T: t,
ClientEvents: expectedClientEvents,
BiDirectional: true,
},
true)
defer cleanupFn()
svc := New(sess)
resp, err := svc.{{ $.Operation.ExportedName }}(nil)
if err != nil {
t.Fatalf("expect no error, got %v", err)
}
stream := resp.GetStream()
for _, event := range clientEvents {
err = stream.Send(context.Background(), event)
if err != nil {
t.Fatalf("expect no error, got %v", err)
}
}
if err := stream.Close(); err != nil {
t.Errorf("expect no error, got %v", err)
}
}
func Test{{ $.Operation.ExportedName }}_WriteClose(t *testing.T) {
sess, cleanupFn, err := eventstreamtest.SetupEventStreamSession(t,
eventstreamtest.ServeEventStream{T: t, BiDirectional: true},
true,
)
if err != nil {
t.Fatalf("expect no error, %v", err)
}
defer cleanupFn()
svc := New(sess)
resp, err := svc.{{ $.Operation.ExportedName }}(nil)
if err != nil {
t.Fatalf("expect no error got, %v", err)
}
// Assert calling Err before close does not close the stream.
resp.GetStream().Err()
{{ $eventShape := index $.InputStream.Events 0 }}
err = resp.GetStream().Send(context.Background(), &{{ $eventShape.Shape.ShapeName }}{})
if err != nil {
t.Fatalf("expect no error, got %v", err)
}
resp.GetStream().Close()
if err := resp.GetStream().Err(); err != nil {
t.Errorf("expect no error, %v", err)
}
}
func Test{{ $.Operation.ExportedName }}_WriteError(t *testing.T) {
sess, cleanupFn, err := eventstreamtest.SetupEventStreamSession(t,
eventstreamtest.ServeEventStream{
T: t,
BiDirectional: true,
ForceCloseAfter: time.Millisecond * 500,
},
true,
)
if err != nil {
t.Fatalf("expect no error, %v", err)
}
defer cleanupFn()
svc := New(sess)
resp, err := svc.{{ $.Operation.ExportedName }}(nil)
if err != nil {
t.Fatalf("expect no error got, %v", err)
}
defer resp.GetStream().Close()
{{ $eventShape := index $.InputStream.Events 0 }}
for {
err = resp.GetStream().Send(context.Background(), &{{ $eventShape.Shape.ShapeName }}{})
if err != nil {
if strings.Contains("unable to send event", err.Error()) {
t.Errorf("expected stream closed error, got %v", err)
}
break
}
}
}
func Test{{ $.Operation.ExportedName }}_ReadWrite(t *testing.T) {
expectedServiceEvents, serviceEvents := mock{{ $.Operation.ExportedName }}ReadEvents()
clientEvents, expectedClientEvents := mock{{ $.Operation.ExportedName }}WriteEvents()
sess, cleanupFn, err := eventstreamtest.SetupEventStreamSession(t,
&eventstreamtest.ServeEventStream{
T: t,
ClientEvents: expectedClientEvents,
Events: serviceEvents,
BiDirectional: true,
},
true)
defer cleanupFn()
svc := New(sess)
resp, err := svc.{{ $.Operation.ExportedName }}(nil)
if err != nil {
t.Fatalf("expect no error, got %v", err)
}
stream := resp.GetStream()
defer stream.Close()
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
var i int
for event := range resp.GetStream().Events() {
if event == nil {
t.Errorf("%d, expect event, got nil", i)
}
if e, a := expectedServiceEvents[i], event; !reflect.DeepEqual(e, a) {
t.Errorf("%d, expect %T %v, got %T %v", i, e, e, a, a)
}
i++
}
}()
for _, event := range clientEvents {
err = stream.Send(context.Background(), event)
if err != nil {
t.Errorf("expect no error, got %v", err)
}
}
resp.GetStream().Close()
wg.Wait()
if err := resp.GetStream().Err(); err != nil {
t.Errorf("expect no error, %v", err)
}
}
func mock{{ $.Operation.ExportedName }}WriteEvents() (
[]{{ $.InputStream.Name }}Event,
[]eventstream.Message,
) {
inputEvents := []{{ $.InputStream.Name }}Event {
{{- if eq $.Operation.API.Metadata.Protocol "json" }}
{{- template "set event type" $.Operation.InputRef.Shape }}
{{- end }}
{{- range $_, $event := $.InputStream.Events }}
{{- template "set event type" $event.Shape }}
{{- end }}
}
eventMsgs := []eventstream.Message{
{{- range $idx, $event := $.InputStream.Events }}
{{- template "set event message" Map "idx" $idx "parentShape" $event.Shape "eventName" $event.Name }}
{{- end }}
}
return inputEvents, eventMsgs
}
{{ end }}
{{/* Params: *Shape */}}
{{ define "set event type" }}
&{{ $.ShapeName }}{
{{- range $memName, $memRef := $.MemberRefs }}
{{- if not $memRef.Shape.IsEventStream }}
{{ $memName }}: {{ ValueForType $memRef.Shape nil }},
{{- end }}
{{- end }}
},
{{- end }}
{{/* Params: idx:int, parentShape:*Shape, eventName:string */}}
{{ define "set event message" }}
{
Headers: eventstream.Headers{
eventstreamtest.EventMessageTypeHeader,
{{- range $memName, $memRef := $.parentShape.MemberRefs }}
{{- template "set event message header" Map "idx" $.idx "parentShape" $.parentShape "memName" $memName "memRef" $memRef }}
{{- end }}
{
Name: eventstreamapi.EventTypeHeader,
Value: eventstream.StringValue("{{ $.eventName }}"),
},
},
{{- template "set event message payload" Map "idx" $.idx "parentShape" $.parentShape }}
},
{{- end }}
{{/* Params: idx:int, parentShape:*Shape, memName:string, memRef:*ShapeRef */}}
{{ define "set event message header" }}
{{- if (and ($.memRef.IsEventPayload) (eq $.memRef.Shape.Type "blob")) }}
{
Name: ":content-type",
Value: eventstream.StringValue("application/octet-stream"),
},
{{- else if $.memRef.IsEventHeader }}
{
Name: "{{ $.memName }}",
{{- $shapeValueVar := printf "inputEvents[%d].(%s).%s" $.idx $.parentShape.GoType $.memName }}
Value: {{ EventHeaderValueForType $.memRef.Shape $shapeValueVar }},
},
{{- end }}
{{- end }}
{{/* Params: idx:int, parentShape:*Shape, memName:string, memRef:*ShapeRef */}}
{{ define "set event message payload" }}
{{- $payloadMemName := $.parentShape.PayloadRefName }}
{{- if HasNonBlobPayloadMembers $.parentShape }}
Payload: eventstreamtest.MarshalEventPayload(payloadMarshaler, inputEvents[{{ $.idx }}]),
{{- else if $payloadMemName }}
{{- $shapeType := (index $.parentShape.MemberRefs $payloadMemName).Shape.Type }}
{{- if eq $shapeType "blob" }}
Payload: inputEvents[{{ $.idx }}].({{ $.parentShape.GoType }}).{{ $payloadMemName }},
{{- else if eq $shapeType "string" }}
Payload: []byte(*inputEvents[{{ $.idx }}].({{ $.parentShape.GoType }}).{{ $payloadMemName }}),
{{- end }}
{{- end }}
{{- end }}
`))
| 1 | 10,229 | didn't quite follow what this code block is doing. | aws-aws-sdk-go | go |
@@ -1071,6 +1071,12 @@ define(["playbackManager", "dom", "inputManager", "datetime", "itemHelper", "med
}
function onWindowKeyDown(e) {
+ // FIXME: Conflicts with keyboard navigation
+ // FIXME: Then the keyboard is completely ignored. Need another solution.
+ if (layoutManager.tv) {
+ return void showOsd();
+ }
+
if (!currentVisibleMenu && 32 === e.keyCode) {
playbackManager.playPause(currentPlayer);
return void showOsd(); | 1 | define(["playbackManager", "dom", "inputManager", "datetime", "itemHelper", "mediaInfo", "focusManager", "imageLoader", "scrollHelper", "events", "connectionManager", "browser", "globalize", "apphost", "layoutManager", "userSettings", "scrollStyles", "emby-slider", "paper-icon-button-light", "css!css/videoosd"], function (playbackManager, dom, inputManager, datetime, itemHelper, mediaInfo, focusManager, imageLoader, scrollHelper, events, connectionManager, browser, globalize, appHost, layoutManager, userSettings) {
"use strict";
function seriesImageUrl(item, options) {
if ("Episode" !== item.Type) {
return null;
}
options = options || {};
options.type = options.type || "Primary";
if ("Primary" === options.type && item.SeriesPrimaryImageTag) {
options.tag = item.SeriesPrimaryImageTag;
return connectionManager.getApiClient(item.ServerId).getScaledImageUrl(item.SeriesId, options);
}
if ("Thumb" === options.type) {
if (item.SeriesThumbImageTag) {
options.tag = item.SeriesThumbImageTag;
return connectionManager.getApiClient(item.ServerId).getScaledImageUrl(item.SeriesId, options);
}
if (item.ParentThumbImageTag) {
options.tag = item.ParentThumbImageTag;
return connectionManager.getApiClient(item.ServerId).getScaledImageUrl(item.ParentThumbItemId, options);
}
}
return null;
}
function imageUrl(item, options) {
options = options || {};
options.type = options.type || "Primary";
if (item.ImageTags && item.ImageTags[options.type]) {
options.tag = item.ImageTags[options.type];
return connectionManager.getApiClient(item.ServerId).getScaledImageUrl(item.PrimaryImageItemId || item.Id, options);
}
if ("Primary" === options.type && item.AlbumId && item.AlbumPrimaryImageTag) {
options.tag = item.AlbumPrimaryImageTag;
return connectionManager.getApiClient(item.ServerId).getScaledImageUrl(item.AlbumId, options);
}
return null;
}
function logoImageUrl(item, apiClient, options) {
options = options || {};
options.type = "Logo";
if (item.ImageTags && item.ImageTags.Logo) {
options.tag = item.ImageTags.Logo;
return apiClient.getScaledImageUrl(item.Id, options);
}
if (item.ParentLogoImageTag) {
options.tag = item.ParentLogoImageTag;
return apiClient.getScaledImageUrl(item.ParentLogoItemId, options);
}
return null;
}
return function (view, params) {
function onVerticalSwipe(e, elem, data) {
var player = currentPlayer;
if (player) {
var deltaY = data.currentDeltaY;
var windowSize = dom.getWindowSize();
if (supportsBrightnessChange && data.clientX < windowSize.innerWidth / 2) {
return void doBrightnessTouch(deltaY, player, windowSize.innerHeight);
}
doVolumeTouch(deltaY, player, windowSize.innerHeight);
}
}
function doBrightnessTouch(deltaY, player, viewHeight) {
var delta = -deltaY / viewHeight * 100;
var newValue = playbackManager.getBrightness(player) + delta;
newValue = Math.min(newValue, 100);
newValue = Math.max(newValue, 0);
playbackManager.setBrightness(newValue, player);
}
function doVolumeTouch(deltaY, player, viewHeight) {
var delta = -deltaY / viewHeight * 100;
var newValue = playbackManager.getVolume(player) + delta;
newValue = Math.min(newValue, 100);
newValue = Math.max(newValue, 0);
playbackManager.setVolume(newValue, player);
}
function onDoubleClick(e) {
var clientX = e.clientX;
if (null != clientX) {
if (clientX < dom.getWindowSize().innerWidth / 2) {
playbackManager.rewind(currentPlayer);
} else {
playbackManager.fastForward(currentPlayer);
}
e.preventDefault();
e.stopPropagation();
}
}
function getDisplayItem(item) {
if ("TvChannel" === item.Type) {
var apiClient = connectionManager.getApiClient(item.ServerId);
return apiClient.getItem(apiClient.getCurrentUserId(), item.Id).then(function (refreshedItem) {
return {
originalItem: refreshedItem,
displayItem: refreshedItem.CurrentProgram
};
});
}
return Promise.resolve({
originalItem: item
});
}
function updateRecordingButton(item) {
if (!item || "Program" !== item.Type) {
if (recordingButtonManager) {
recordingButtonManager.destroy();
recordingButtonManager = null;
}
return void view.querySelector(".btnRecord").classList.add("hide");
}
connectionManager.getApiClient(item.ServerId).getCurrentUser().then(function (user) {
if (user.Policy.EnableLiveTvManagement) {
require(["recordingButton"], function (RecordingButton) {
if (recordingButtonManager) {
return void recordingButtonManager.refreshItem(item);
}
recordingButtonManager = new RecordingButton({
item: item,
button: view.querySelector(".btnRecord")
});
view.querySelector(".btnRecord").classList.remove("hide");
});
}
});
}
function updateDisplayItem(itemInfo) {
var item = itemInfo.originalItem;
currentItem = item;
var displayItem = itemInfo.displayItem || item;
updateRecordingButton(displayItem);
setPoster(displayItem, item);
var parentName = displayItem.SeriesName || displayItem.Album;
if (displayItem.EpisodeTitle || displayItem.IsSeries) {
parentName = displayItem.Name;
}
setTitle(displayItem, parentName);
var titleElement;
var osdTitle = view.querySelector(".osdTitle");
titleElement = osdTitle;
var displayName = itemHelper.getDisplayName(displayItem, {
includeParentInfo: "Program" !== displayItem.Type,
includeIndexNumber: "Program" !== displayItem.Type
});
if (!displayName) {
displayItem.Type;
}
titleElement.innerHTML = displayName;
if (displayName) {
titleElement.classList.remove("hide");
} else {
titleElement.classList.add("hide");
}
var mediaInfoHtml = mediaInfo.getPrimaryMediaInfoHtml(displayItem, {
runtime: false,
subtitles: false,
tomatoes: false,
endsAt: false,
episodeTitle: false,
originalAirDate: "Program" !== displayItem.Type,
episodeTitleIndexNumber: "Program" !== displayItem.Type,
programIndicator: false
});
var osdMediaInfo = view.querySelector(".osdMediaInfo");
osdMediaInfo.innerHTML = mediaInfoHtml;
if (mediaInfoHtml) {
osdMediaInfo.classList.remove("hide");
} else {
osdMediaInfo.classList.add("hide");
}
var secondaryMediaInfo = view.querySelector(".osdSecondaryMediaInfo");
var secondaryMediaInfoHtml = mediaInfo.getSecondaryMediaInfoHtml(displayItem, {
startDate: false,
programTime: false
});
secondaryMediaInfo.innerHTML = secondaryMediaInfoHtml;
if (secondaryMediaInfoHtml) {
secondaryMediaInfo.classList.remove("hide");
} else {
secondaryMediaInfo.classList.add("hide");
}
if (displayName) {
view.querySelector(".osdMainTextContainer").classList.remove("hide");
} else {
view.querySelector(".osdMainTextContainer").classList.add("hide");
}
if (enableProgressByTimeOfDay) {
setDisplayTime(startTimeText, displayItem.StartDate);
setDisplayTime(endTimeText, displayItem.EndDate);
startTimeText.classList.remove("hide");
endTimeText.classList.remove("hide");
programStartDateMs = displayItem.StartDate ? datetime.parseISO8601Date(displayItem.StartDate).getTime() : 0;
programEndDateMs = displayItem.EndDate ? datetime.parseISO8601Date(displayItem.EndDate).getTime() : 0;
} else {
startTimeText.classList.add("hide");
endTimeText.classList.add("hide");
startTimeText.innerHTML = "";
endTimeText.innerHTML = "";
programStartDateMs = 0;
programEndDateMs = 0;
}
}
function getDisplayTimeWithoutAmPm(date, showSeconds) {
if (showSeconds) {
return datetime.toLocaleTimeString(date, {
hour: "numeric",
minute: "2-digit",
second: "2-digit"
}).toLowerCase().replace("am", "").replace("pm", "").trim();
}
return datetime.getDisplayTime(date).toLowerCase().replace("am", "").replace("pm", "").trim();
}
function setDisplayTime(elem, date) {
var html;
if (date) {
date = datetime.parseISO8601Date(date);
html = getDisplayTimeWithoutAmPm(date);
}
elem.innerHTML = html || "";
}
function shouldEnableProgressByTimeOfDay(item) {
return !("TvChannel" !== item.Type || !item.CurrentProgram);
}
function updateNowPlayingInfo(player, state) {
var item = state.NowPlayingItem;
currentItem = item;
if (!item) {
setPoster(null);
updateRecordingButton(null);
Emby.Page.setTitle("");
nowPlayingVolumeSlider.disabled = true;
nowPlayingPositionSlider.disabled = true;
btnFastForward.disabled = true;
btnRewind.disabled = true;
view.querySelector(".btnSubtitles").classList.add("hide");
view.querySelector(".btnAudio").classList.add("hide");
view.querySelector(".osdTitle").innerHTML = "";
view.querySelector(".osdMediaInfo").innerHTML = "";
return;
}
enableProgressByTimeOfDay = shouldEnableProgressByTimeOfDay(item);
getDisplayItem(item).then(updateDisplayItem);
nowPlayingVolumeSlider.disabled = false;
nowPlayingPositionSlider.disabled = false;
btnFastForward.disabled = false;
btnRewind.disabled = false;
if (playbackManager.subtitleTracks(player).length) {
view.querySelector(".btnSubtitles").classList.remove("hide");
toggleSubtitleSync();
} else {
view.querySelector(".btnSubtitles").classList.add("hide");
toggleSubtitleSync("forceToHide");
}
if (playbackManager.audioTracks(player).length > 1) {
view.querySelector(".btnAudio").classList.remove("hide");
} else {
view.querySelector(".btnAudio").classList.add("hide");
}
}
function setTitle(item, parentName) {
var url = logoImageUrl(item, connectionManager.getApiClient(item.ServerId), {});
if (url) {
Emby.Page.setTitle("");
var pageTitle = document.querySelector(".pageTitle");
pageTitle.style.backgroundImage = "url('" + url + "')";
pageTitle.classList.add("pageTitleWithLogo");
pageTitle.classList.remove("pageTitleWithDefaultLogo");
pageTitle.innerHTML = "";
} else {
Emby.Page.setTitle(parentName || "");
}
var documentTitle = parentName || (item ? item.Name : null);
if (documentTitle) {
document.title = documentTitle;
}
}
function setPoster(item, secondaryItem) {
var osdPoster = view.querySelector(".osdPoster");
if (item) {
var imgUrl = seriesImageUrl(item, {
type: "Primary"
}) || seriesImageUrl(item, {
type: "Thumb"
}) || imageUrl(item, {
type: "Primary"
});
if (!imgUrl && secondaryItem && (imgUrl = seriesImageUrl(secondaryItem, {
type: "Primary"
}) || seriesImageUrl(secondaryItem, {
type: "Thumb"
}) || imageUrl(secondaryItem, {
type: "Primary"
})), imgUrl) {
return void (osdPoster.innerHTML = '<img src="' + imgUrl + '" />');
}
}
osdPoster.innerHTML = "";
}
function showOsd() {
slideDownToShow(headerElement);
showMainOsdControls();
startOsdHideTimer();
}
function hideOsd() {
slideUpToHide(headerElement);
hideMainOsdControls();
}
function toggleOsd() {
if ("osd" === currentVisibleMenu) {
hideOsd();
} else if (!currentVisibleMenu) {
showOsd();
}
}
function startOsdHideTimer() {
stopOsdHideTimer();
osdHideTimeout = setTimeout(hideOsd, 5e3);
}
function stopOsdHideTimer() {
if (osdHideTimeout) {
clearTimeout(osdHideTimeout);
osdHideTimeout = null;
}
}
function slideDownToShow(elem) {
elem.classList.remove("osdHeader-hidden");
}
function slideUpToHide(elem) {
elem.classList.add("osdHeader-hidden");
}
function clearHideAnimationEventListeners(elem) {
dom.removeEventListener(elem, transitionEndEventName, onHideAnimationComplete, {
once: true
});
}
function onHideAnimationComplete(e) {
var elem = e.target;
elem.classList.add("hide");
dom.removeEventListener(elem, transitionEndEventName, onHideAnimationComplete, {
once: true
});
}
function showMainOsdControls() {
if (!currentVisibleMenu) {
var elem = osdBottomElement;
currentVisibleMenu = "osd";
clearHideAnimationEventListeners(elem);
elem.classList.remove("hide");
elem.classList.remove("videoOsdBottom-hidden");
if (!layoutManager.mobile) {
setTimeout(function () {
focusManager.focus(elem.querySelector(".btnPause"));
}, 50);
}
toggleSubtitleSync();
}
}
function hideMainOsdControls() {
if ("osd" === currentVisibleMenu) {
var elem = osdBottomElement;
clearHideAnimationEventListeners(elem);
elem.classList.add("videoOsdBottom-hidden");
dom.addEventListener(elem, transitionEndEventName, onHideAnimationComplete, {
once: true
});
currentVisibleMenu = null;
toggleSubtitleSync("hide");
}
}
function onPointerMove(e) {
if ("mouse" === (e.pointerType || (layoutManager.mobile ? "touch" : "mouse"))) {
var eventX = e.screenX || 0;
var eventY = e.screenY || 0;
var obj = lastPointerMoveData;
if (!obj) {
lastPointerMoveData = {
x: eventX,
y: eventY
};
return;
}
if (Math.abs(eventX - obj.x) < 10 && Math.abs(eventY - obj.y) < 10) {
return;
}
obj.x = eventX;
obj.y = eventY;
showOsd();
}
}
function onInputCommand(e) {
var player = currentPlayer;
switch (e.detail.command) {
case "left":
if ("osd" === currentVisibleMenu) {
showOsd();
} else {
if (!currentVisibleMenu) {
e.preventDefault();
playbackManager.rewind(player);
}
}
break;
case "right":
if ("osd" === currentVisibleMenu) {
showOsd();
} else if (!currentVisibleMenu) {
e.preventDefault();
playbackManager.fastForward(player);
}
break;
case "pageup":
playbackManager.nextChapter(player);
break;
case "pagedown":
playbackManager.previousChapter(player);
break;
case "up":
case "down":
case "select":
case "menu":
case "info":
case "play":
case "playpause":
case "pause":
case "fastforward":
case "rewind":
case "next":
case "previous":
showOsd();
break;
case "record":
onRecordingCommand();
showOsd();
break;
case "togglestats":
toggleStats();
}
}
function onRecordingCommand() {
var btnRecord = view.querySelector(".btnRecord");
if (!btnRecord.classList.contains("hide")) {
btnRecord.click();
}
}
function updateFullscreenIcon() {
if (playbackManager.isFullscreen(currentPlayer)) {
view.querySelector(".btnFullscreen").setAttribute("title", globalize.translate("ExitFullscreen"));
view.querySelector(".btnFullscreen i").innerHTML = "";
} else {
view.querySelector(".btnFullscreen").setAttribute("title", globalize.translate("Fullscreen") + " (f)");
view.querySelector(".btnFullscreen i").innerHTML = "";
}
}
function onPlayerChange() {
bindToPlayer(playbackManager.getCurrentPlayer());
}
function onStateChanged(event, state) {
var player = this;
if (state.NowPlayingItem) {
isEnabled = true;
updatePlayerStateInternal(event, player, state);
updatePlaylist(player);
enableStopOnBack(true);
}
}
function onPlayPauseStateChanged(e) {
if (isEnabled) {
updatePlayPauseState(this.paused());
}
}
function onVolumeChanged(e) {
if (isEnabled) {
var player = this;
updatePlayerVolumeState(player, player.isMuted(), player.getVolume());
}
}
function onPlaybackStart(e, state) {
console.log("nowplaying event: " + e.type);
var player = this;
onStateChanged.call(player, e, state);
resetUpNextDialog();
}
function resetUpNextDialog() {
comingUpNextDisplayed = false;
var dlg = currentUpNextDialog;
if (dlg) {
dlg.destroy();
currentUpNextDialog = null;
}
}
function onPlaybackStopped(e, state) {
currentRuntimeTicks = null;
resetUpNextDialog();
console.log("nowplaying event: " + e.type);
if ("Video" !== state.NextMediaType) {
view.removeEventListener("viewbeforehide", onViewHideStopPlayback);
Emby.Page.back();
}
}
function onMediaStreamsChanged(e) {
var player = this;
var state = playbackManager.getPlayerState(player);
onStateChanged.call(player, {
type: "init"
}, state);
}
function onBeginFetch() {
document.querySelector(".osdMediaStatus").classList.remove("hide");
}
function onEndFetch() {
document.querySelector(".osdMediaStatus").classList.add("hide");
}
function bindToPlayer(player) {
if (player !== currentPlayer) {
releaseCurrentPlayer();
currentPlayer = player;
if (!player) return;
}
var state = playbackManager.getPlayerState(player);
onStateChanged.call(player, {
type: "init"
}, state);
events.on(player, "playbackstart", onPlaybackStart);
events.on(player, "playbackstop", onPlaybackStopped);
events.on(player, "volumechange", onVolumeChanged);
events.on(player, "pause", onPlayPauseStateChanged);
events.on(player, "unpause", onPlayPauseStateChanged);
events.on(player, "timeupdate", onTimeUpdate);
events.on(player, "fullscreenchange", updateFullscreenIcon);
events.on(player, "mediastreamschange", onMediaStreamsChanged);
events.on(player, "beginFetch", onBeginFetch);
events.on(player, "endFetch", onEndFetch);
resetUpNextDialog();
if (player.isFetching) {
onBeginFetch();
}
}
function releaseCurrentPlayer() {
destroyStats();
destroySubtitleSync();
resetUpNextDialog();
var player = currentPlayer;
if (player) {
events.off(player, "playbackstart", onPlaybackStart);
events.off(player, "playbackstop", onPlaybackStopped);
events.off(player, "volumechange", onVolumeChanged);
events.off(player, "pause", onPlayPauseStateChanged);
events.off(player, "unpause", onPlayPauseStateChanged);
events.off(player, "timeupdate", onTimeUpdate);
events.off(player, "fullscreenchange", updateFullscreenIcon);
events.off(player, "mediastreamschange", onMediaStreamsChanged);
currentPlayer = null;
}
}
function onTimeUpdate(e) {
if (isEnabled) {
var now = new Date().getTime();
if (!(now - lastUpdateTime < 700)) {
lastUpdateTime = now;
var player = this;
currentRuntimeTicks = playbackManager.duration(player);
var currentTime = playbackManager.currentTime(player);
updateTimeDisplay(currentTime, currentRuntimeTicks, playbackManager.playbackStartTime(player), playbackManager.getBufferedRanges(player));
var item = currentItem;
refreshProgramInfoIfNeeded(player, item);
showComingUpNextIfNeeded(player, item, currentTime, currentRuntimeTicks);
}
}
}
function showComingUpNextIfNeeded(player, currentItem, currentTimeTicks, runtimeTicks) {
if (runtimeTicks && currentTimeTicks && !comingUpNextDisplayed && !currentVisibleMenu && "Episode" === currentItem.Type && userSettings.enableNextVideoInfoOverlay()) {
var showAtSecondsLeft = runtimeTicks >= 3e10 ? 40 : runtimeTicks >= 24e9 ? 35 : 30;
var showAtTicks = runtimeTicks - 1e3 * showAtSecondsLeft * 1e4;
var timeRemainingTicks = runtimeTicks - currentTimeTicks;
if (currentTimeTicks >= showAtTicks && runtimeTicks >= 6e9 && timeRemainingTicks >= 2e8) {
showComingUpNext(player);
}
}
}
function onUpNextHidden() {
if ("upnext" === currentVisibleMenu) {
currentVisibleMenu = null;
}
}
function showComingUpNext(player) {
require(["upNextDialog"], function (UpNextDialog) {
if (!(currentVisibleMenu || currentUpNextDialog)) {
currentVisibleMenu = "upnext";
comingUpNextDisplayed = true;
playbackManager.nextItem(player).then(function (nextItem) {
currentUpNextDialog = new UpNextDialog({
parent: view.querySelector(".upNextContainer"),
player: player,
nextItem: nextItem
});
events.on(currentUpNextDialog, "hide", onUpNextHidden);
}, onUpNextHidden);
}
});
}
function refreshProgramInfoIfNeeded(player, item) {
if ("TvChannel" === item.Type) {
var program = item.CurrentProgram;
if (program && program.EndDate) {
try {
var endDate = datetime.parseISO8601Date(program.EndDate);
if (new Date().getTime() >= endDate.getTime()) {
console.log("program info needs to be refreshed");
var state = playbackManager.getPlayerState(player);
onStateChanged.call(player, {
type: "init"
}, state);
}
} catch (e) {
console.log("Error parsing date: " + program.EndDate);
}
}
}
}
function updatePlayPauseState(isPaused) {
var button = view.querySelector(".btnPause i");
if (isPaused) {
button.innerHTML = "";
button.setAttribute("title", globalize.translate("ButtonPlay") + " (k)");
} else {
button.innerHTML = "";
button.setAttribute("title", globalize.translate("ButtonPause") + " (k)");
}
}
function updatePlayerStateInternal(event, player, state) {
var playState = state.PlayState || {};
updatePlayPauseState(playState.IsPaused);
var supportedCommands = playbackManager.getSupportedCommands(player);
currentPlayerSupportedCommands = supportedCommands;
supportsBrightnessChange = -1 !== supportedCommands.indexOf("SetBrightness");
updatePlayerVolumeState(player, playState.IsMuted, playState.VolumeLevel);
if (nowPlayingPositionSlider && !nowPlayingPositionSlider.dragging) {
nowPlayingPositionSlider.disabled = !playState.CanSeek;
}
btnFastForward.disabled = !playState.CanSeek;
btnRewind.disabled = !playState.CanSeek;
var nowPlayingItem = state.NowPlayingItem || {};
playbackStartTimeTicks = playState.PlaybackStartTimeTicks;
updateTimeDisplay(playState.PositionTicks, nowPlayingItem.RunTimeTicks, playState.PlaybackStartTimeTicks, playState.BufferedRanges || []);
updateNowPlayingInfo(player, state);
if (state.MediaSource && state.MediaSource.SupportsTranscoding && -1 !== supportedCommands.indexOf("SetMaxStreamingBitrate")) {
view.querySelector(".btnVideoOsdSettings").classList.remove("hide");
} else {
view.querySelector(".btnVideoOsdSettings").classList.add("hide");
}
var isProgressClear = state.MediaSource && null == state.MediaSource.RunTimeTicks;
nowPlayingPositionSlider.setIsClear(isProgressClear);
if (-1 === supportedCommands.indexOf("ToggleFullscreen") || player.isLocalPlayer && layoutManager.tv && playbackManager.isFullscreen(player)) {
view.querySelector(".btnFullscreen").classList.add("hide");
} else {
view.querySelector(".btnFullscreen").classList.remove("hide");
}
if (-1 === supportedCommands.indexOf("PictureInPicture")) {
view.querySelector(".btnPip").classList.add("hide");
} else {
view.querySelector(".btnPip").classList.remove("hide");
}
updateFullscreenIcon();
}
function getDisplayPercentByTimeOfDay(programStartDateMs, programRuntimeMs, currentTimeMs) {
return (currentTimeMs - programStartDateMs) / programRuntimeMs * 100;
}
function updateTimeDisplay(positionTicks, runtimeTicks, playbackStartTimeTicks, bufferedRanges) {
if (enableProgressByTimeOfDay) {
if (nowPlayingPositionSlider && !nowPlayingPositionSlider.dragging) {
if (programStartDateMs && programEndDateMs) {
var currentTimeMs = (playbackStartTimeTicks + (positionTicks || 0)) / 1e4;
var programRuntimeMs = programEndDateMs - programStartDateMs;
if (nowPlayingPositionSlider.value = getDisplayPercentByTimeOfDay(programStartDateMs, programRuntimeMs, currentTimeMs), bufferedRanges.length) {
var rangeStart = getDisplayPercentByTimeOfDay(programStartDateMs, programRuntimeMs, (playbackStartTimeTicks + (bufferedRanges[0].start || 0)) / 1e4);
var rangeEnd = getDisplayPercentByTimeOfDay(programStartDateMs, programRuntimeMs, (playbackStartTimeTicks + (bufferedRanges[0].end || 0)) / 1e4);
nowPlayingPositionSlider.setBufferedRanges([{
start: rangeStart,
end: rangeEnd
}]);
} else {
nowPlayingPositionSlider.setBufferedRanges([]);
}
} else {
nowPlayingPositionSlider.value = 0;
nowPlayingPositionSlider.setBufferedRanges([]);
}
}
nowPlayingPositionText.innerHTML = "";
nowPlayingDurationText.innerHTML = "";
} else {
if (nowPlayingPositionSlider && !nowPlayingPositionSlider.dragging) {
if (runtimeTicks) {
var pct = positionTicks / runtimeTicks;
pct *= 100;
nowPlayingPositionSlider.value = pct;
} else {
nowPlayingPositionSlider.value = 0;
}
if (runtimeTicks && null != positionTicks && currentRuntimeTicks && !enableProgressByTimeOfDay && currentItem.RunTimeTicks && "Recording" !== currentItem.Type) {
endsAtText.innerHTML = " - " + mediaInfo.getEndsAtFromPosition(runtimeTicks, positionTicks, true);
} else {
endsAtText.innerHTML = "";
}
}
if (nowPlayingPositionSlider) {
nowPlayingPositionSlider.setBufferedRanges(bufferedRanges, runtimeTicks, positionTicks);
}
updateTimeText(nowPlayingPositionText, positionTicks);
updateTimeText(nowPlayingDurationText, runtimeTicks, true);
}
}
function updatePlayerVolumeState(player, isMuted, volumeLevel) {
var supportedCommands = currentPlayerSupportedCommands;
var showMuteButton = true;
var showVolumeSlider = true;
var volumeSlider = view.querySelector('.osdVolumeSliderContainer');
var progressElement = volumeSlider.querySelector('.mdl-slider-background-lower');
if (-1 === supportedCommands.indexOf("Mute")) {
showMuteButton = false;
}
if (-1 === supportedCommands.indexOf("SetVolume")) {
showVolumeSlider = false;
}
if (player.isLocalPlayer && appHost.supports("physicalvolumecontrol")) {
showMuteButton = false;
showVolumeSlider = false;
}
if (isMuted) {
view.querySelector(".buttonMute").setAttribute("title", globalize.translate("Unmute") + " (m)");
view.querySelector(".buttonMute i").innerHTML = "";
} else {
view.querySelector(".buttonMute").setAttribute("title", globalize.translate("Mute") + " (m)");
view.querySelector(".buttonMute i").innerHTML = "";
}
if (progressElement) {
progressElement.style.width = (volumeLevel || 0) + '%';
}
if (showMuteButton) {
view.querySelector(".buttonMute").classList.remove("hide");
} else {
view.querySelector(".buttonMute").classList.add("hide");
}
if (nowPlayingVolumeSlider) {
if (showVolumeSlider) {
nowPlayingVolumeSliderContainer.classList.remove("hide");
} else {
nowPlayingVolumeSliderContainer.classList.add("hide");
}
if (!nowPlayingVolumeSlider.dragging) {
nowPlayingVolumeSlider.value = volumeLevel || 0;
}
}
}
function updatePlaylist(player) {
var btnPreviousTrack = view.querySelector(".btnPreviousTrack");
var btnNextTrack = view.querySelector(".btnNextTrack");
btnPreviousTrack.classList.remove("hide");
btnNextTrack.classList.remove("hide");
btnNextTrack.disabled = false;
btnPreviousTrack.disabled = false;
}
function updateTimeText(elem, ticks, divider) {
if (null == ticks) {
elem.innerHTML = "";
return;
}
var html = datetime.getDisplayRunningTime(ticks);
if (divider) {
html = " / " + html;
}
elem.innerHTML = html;
}
function onSettingsButtonClick(e) {
var btn = this;
require(["playerSettingsMenu"], function (playerSettingsMenu) {
var player = currentPlayer;
if (player) {
// show subtitle offset feature only if player and media support it
var showSubOffset = playbackManager.supportSubtitleOffset(player) &&
playbackManager.canHandleOffsetOnCurrentSubtitle(player);
playerSettingsMenu.show({
mediaType: "Video",
player: player,
positionTo: btn,
stats: true,
suboffset: showSubOffset,
onOption: onSettingsOption
});
}
});
}
function onSettingsOption(selectedOption) {
if ("stats" === selectedOption) {
toggleStats();
} else if ("suboffset" === selectedOption) {
var player = currentPlayer;
if (player) {
playbackManager.enableShowingSubtitleOffset(player);
toggleSubtitleSync();
}
}
}
function toggleStats() {
require(["playerStats"], function (PlayerStats) {
var player = currentPlayer;
if (player) {
if (statsOverlay) {
statsOverlay.toggle();
} else {
statsOverlay = new PlayerStats({
player: player
});
}
}
});
}
function destroyStats() {
if (statsOverlay) {
statsOverlay.destroy();
statsOverlay = null;
}
}
function showAudioTrackSelection() {
var player = currentPlayer;
var audioTracks = playbackManager.audioTracks(player);
var currentIndex = playbackManager.getAudioStreamIndex(player);
var menuItems = audioTracks.map(function (stream) {
var opt = {
name: stream.DisplayTitle,
id: stream.Index
};
if (stream.Index === currentIndex) {
opt.selected = true;
}
return opt;
});
var positionTo = this;
require(["actionsheet"], function (actionsheet) {
actionsheet.show({
items: menuItems,
title: globalize.translate("Audio"),
positionTo: positionTo
}).then(function (id) {
var index = parseInt(id);
if (index !== currentIndex) {
playbackManager.setAudioStreamIndex(index, player);
}
});
});
}
function showSubtitleTrackSelection() {
var player = currentPlayer;
var streams = playbackManager.subtitleTracks(player);
var currentIndex = playbackManager.getSubtitleStreamIndex(player);
if (null == currentIndex) {
currentIndex = -1;
}
streams.unshift({
Index: -1,
DisplayTitle: globalize.translate("Off")
});
var menuItems = streams.map(function (stream) {
var opt = {
name: stream.DisplayTitle,
id: stream.Index
};
if (stream.Index === currentIndex) {
opt.selected = true;
}
return opt;
});
var positionTo = this;
require(["actionsheet"], function (actionsheet) {
actionsheet.show({
title: globalize.translate("Subtitles"),
items: menuItems,
positionTo: positionTo
}).then(function (id) {
var index = parseInt(id);
if (index !== currentIndex) {
playbackManager.setSubtitleStreamIndex(index, player);
}
toggleSubtitleSync();
});
});
}
function toggleSubtitleSync(action) {
require(["subtitleSync"], function (SubtitleSync) {
var player = currentPlayer;
if (subtitleSyncOverlay) {
subtitleSyncOverlay.toggle(action);
} else if(player){
subtitleSyncOverlay = new SubtitleSync(player);
}
});
}
function destroySubtitleSync() {
if (subtitleSyncOverlay) {
subtitleSyncOverlay.destroy();
subtitleSyncOverlay = null;
}
}
function onWindowKeyDown(e) {
if (!currentVisibleMenu && 32 === e.keyCode) {
playbackManager.playPause(currentPlayer);
return void showOsd();
}
switch (e.key) {
case "k":
playbackManager.playPause(currentPlayer);
showOsd();
break;
case "l":
case "ArrowRight":
case "Right":
playbackManager.fastForward(currentPlayer);
showOsd();
break;
case "j":
case "ArrowLeft":
case "Left":
playbackManager.rewind(currentPlayer);
showOsd();
break;
case "f":
if (!e.ctrlKey && !e.metaKey) {
playbackManager.toggleFullscreen(currentPlayer);
showOsd();
}
break;
case "m":
playbackManager.toggleMute(currentPlayer);
showOsd();
break;
case "NavigationLeft":
case "GamepadDPadLeft":
case "GamepadLeftThumbstickLeft":
// Ignores gamepad events that are always triggered, even when not focused.
if (document.hasFocus()) {
playbackManager.rewind(currentPlayer);
showOsd();
}
break;
case "NavigationRight":
case "GamepadDPadRight":
case "GamepadLeftThumbstickRight":
// Ignores gamepad events that are always triggered, even when not focused.
if (document.hasFocus()) {
playbackManager.fastForward(currentPlayer);
showOsd();
}
}
}
function getImgUrl(item, chapter, index, maxWidth, apiClient) {
if (chapter.ImageTag) {
return apiClient.getScaledImageUrl(item.Id, {
maxWidth: maxWidth,
tag: chapter.ImageTag,
type: "Chapter",
index: index
});
}
return null;
}
function getChapterBubbleHtml(apiClient, item, chapters, positionTicks) {
var chapter;
var index = -1;
for (var i = 0, length = chapters.length; i < length; i++) {
var currentChapter = chapters[i];
if (positionTicks >= currentChapter.StartPositionTicks) {
chapter = currentChapter;
index = i;
}
}
if (!chapter) {
return null;
}
var src = getImgUrl(item, chapter, index, 400, apiClient);
if (src) {
var html = '<div class="chapterThumbContainer">';
html += '<img class="chapterThumb" src="' + src + '" />';
html += '<div class="chapterThumbTextContainer">';
html += '<div class="chapterThumbText chapterThumbText-dim">';
html += chapter.Name;
html += "</div>";
html += '<h2 class="chapterThumbText">';
html += datetime.getDisplayRunningTime(positionTicks);
html += "</h2>";
html += "</div>";
return html + "</div>";
}
return null;
}
function onViewHideStopPlayback() {
if (playbackManager.isPlayingVideo()) {
require(['shell'], function (shell) {
shell.disableFullscreen();
});
var player = currentPlayer;
view.removeEventListener("viewbeforehide", onViewHideStopPlayback);
releaseCurrentPlayer();
playbackManager.stop(player);
}
}
function enableStopOnBack(enabled) {
view.removeEventListener("viewbeforehide", onViewHideStopPlayback);
if (enabled && playbackManager.isPlayingVideo(currentPlayer)) {
view.addEventListener("viewbeforehide", onViewHideStopPlayback);
}
}
require(['shell'], function (shell) {
shell.enableFullscreen();
});
var currentPlayer;
var comingUpNextDisplayed;
var currentUpNextDialog;
var isEnabled;
var currentItem;
var recordingButtonManager;
var enableProgressByTimeOfDay;
var supportsBrightnessChange;
var currentVisibleMenu;
var statsOverlay;
var osdHideTimeout;
var lastPointerMoveData;
var self = this;
var currentPlayerSupportedCommands = [];
var currentRuntimeTicks = 0;
var lastUpdateTime = 0;
var programStartDateMs = 0;
var programEndDateMs = 0;
var playbackStartTimeTicks = 0;
var subtitleSyncOverlay;
var volumeSliderTimer;
var nowPlayingVolumeSlider = view.querySelector(".osdVolumeSlider");
var nowPlayingVolumeSliderContainer = view.querySelector(".osdVolumeSliderContainer");
var nowPlayingPositionSlider = view.querySelector(".osdPositionSlider");
var nowPlayingPositionText = view.querySelector(".osdPositionText");
var nowPlayingDurationText = view.querySelector(".osdDurationText");
var startTimeText = view.querySelector(".startTimeText");
var endTimeText = view.querySelector(".endTimeText");
var endsAtText = view.querySelector(".endsAtText");
var btnRewind = view.querySelector(".btnRewind");
var btnFastForward = view.querySelector(".btnFastForward");
var transitionEndEventName = dom.whichTransitionEvent();
var headerElement = document.querySelector(".skinHeader");
var osdBottomElement = document.querySelector(".videoOsdBottom-maincontrols");
view.addEventListener("viewbeforeshow", function (e) {
headerElement.classList.add("osdHeader");
Emby.Page.setTransparency("full");
});
view.addEventListener("viewshow", function (e) {
try {
events.on(playbackManager, "playerchange", onPlayerChange);
bindToPlayer(playbackManager.getCurrentPlayer());
dom.addEventListener(document, window.PointerEvent ? "pointermove" : "mousemove", onPointerMove, {
passive: true
});
showOsd();
inputManager.on(window, onInputCommand);
dom.addEventListener(window, "keydown", onWindowKeyDown, {
passive: true
});
} catch(e) {
require(['appRouter'], function(appRouter) {
appRouter.showDirect('/');
});
}
});
view.addEventListener("viewbeforehide", function () {
if (statsOverlay) {
statsOverlay.enabled(false);
}
dom.removeEventListener(window, "keydown", onWindowKeyDown, {
passive: true
});
stopOsdHideTimer();
headerElement.classList.remove("osdHeader");
headerElement.classList.remove("osdHeader-hidden");
dom.removeEventListener(document, window.PointerEvent ? "pointermove" : "mousemove", onPointerMove, {
passive: true
});
inputManager.off(window, onInputCommand);
events.off(playbackManager, "playerchange", onPlayerChange);
releaseCurrentPlayer();
});
view.querySelector(".btnFullscreen").addEventListener("click", function () {
playbackManager.toggleFullscreen(currentPlayer);
});
view.querySelector(".btnPip").addEventListener("click", function () {
playbackManager.togglePictureInPicture(currentPlayer);
});
view.querySelector(".btnVideoOsdSettings").addEventListener("click", onSettingsButtonClick);
view.addEventListener("viewhide", function () {
headerElement.classList.remove("hide");
});
view.addEventListener("viewdestroy", function () {
if (self.touchHelper) {
self.touchHelper.destroy();
self.touchHelper = null;
}
if (recordingButtonManager) {
recordingButtonManager.destroy();
recordingButtonManager = null;
}
destroyStats();
destroySubtitleSync();
});
var lastPointerDown = 0;
dom.addEventListener(view, window.PointerEvent ? "pointerdown" : "click", function (e) {
if (dom.parentWithClass(e.target, ["videoOsdBottom", "upNextContainer"])) {
return void showOsd();
}
var pointerType = e.pointerType || (layoutManager.mobile ? "touch" : "mouse");
var now = new Date().getTime();
switch (pointerType) {
case "touch":
if (now - lastPointerDown > 300) {
lastPointerDown = now;
toggleOsd();
}
break;
case "mouse":
if (!e.button) {
playbackManager.playPause(currentPlayer);
showOsd();
}
break;
default:
playbackManager.playPause(currentPlayer);
showOsd();
}
}, {
passive: true
});
if (browser.touch) {
dom.addEventListener(view, "dblclick", onDoubleClick, {});
} else {
var options = { passive: true };
dom.addEventListener(view, "dblclick", function () { playbackManager.toggleFullscreen(currentPlayer); }, options);
}
view.querySelector(".buttonMute").addEventListener("click", function () {
playbackManager.toggleMute(currentPlayer);
});
nowPlayingVolumeSlider.addEventListener("change", function () {
if(volumeSliderTimer){
// interupt and remove existing timer
clearTimeout(volumeSliderTimer);
volumeSliderTimer = null;
}
playbackManager.setVolume(this.value, currentPlayer);
});
nowPlayingVolumeSlider.addEventListener("mousemove", function () {
if(!volumeSliderTimer){
var that = this;
// register new timer
volumeSliderTimer = setTimeout(function(){
playbackManager.setVolume(that.value, currentPlayer);
// delete timer after completion
volumeSliderTimer = null;
}, 700);
}
});
nowPlayingVolumeSlider.addEventListener("touchmove", function () {
if(!volumeSliderTimer){
var that = this;
// register new timer
volumeSliderTimer = setTimeout(function(){
playbackManager.setVolume(that.value, currentPlayer);
// delete timer after completion
volumeSliderTimer = null;
}, 700);
}
});
nowPlayingPositionSlider.addEventListener("change", function () {
var player = currentPlayer;
if (player) {
var newPercent = parseFloat(this.value);
if (enableProgressByTimeOfDay) {
var seekAirTimeTicks = newPercent / 100 * (programEndDateMs - programStartDateMs) * 1e4;
seekAirTimeTicks += 1e4 * programStartDateMs;
seekAirTimeTicks -= playbackStartTimeTicks;
playbackManager.seek(seekAirTimeTicks, player);
} else {
playbackManager.seekPercent(newPercent, player);
}
}
});
nowPlayingPositionSlider.getBubbleHtml = function (value) {
showOsd();
if (enableProgressByTimeOfDay) {
if (programStartDateMs && programEndDateMs) {
var ms = programEndDateMs - programStartDateMs;
ms /= 100;
ms *= value;
ms += programStartDateMs;
return '<h1 class="sliderBubbleText">' + getDisplayTimeWithoutAmPm(new Date(parseInt(ms)), true) + "</h1>";
}
return "--:--";
}
if (!currentRuntimeTicks) {
return "--:--";
}
var ticks = currentRuntimeTicks;
ticks /= 100;
ticks *= value;
var item = currentItem;
if (item && item.Chapters && item.Chapters.length && item.Chapters[0].ImageTag) {
var html = getChapterBubbleHtml(connectionManager.getApiClient(item.ServerId), item, item.Chapters, ticks);
if (html) {
return html;
}
}
return '<h1 class="sliderBubbleText">' + datetime.getDisplayRunningTime(ticks) + "</h1>";
};
view.querySelector(".btnPreviousTrack").addEventListener("click", function () {
playbackManager.previousTrack(currentPlayer);
});
view.querySelector(".btnPause").addEventListener("click", function () {
playbackManager.playPause(currentPlayer);
});
view.querySelector(".btnNextTrack").addEventListener("click", function () {
playbackManager.nextTrack(currentPlayer);
});
btnRewind.addEventListener("click", function () {
playbackManager.rewind(currentPlayer);
});
btnFastForward.addEventListener("click", function () {
playbackManager.fastForward(currentPlayer);
});
view.querySelector(".btnAudio").addEventListener("click", showAudioTrackSelection);
view.querySelector(".btnSubtitles").addEventListener("click", showSubtitleTrackSelection);
if (browser.touch) {
(function () {
require(["touchHelper"], function (TouchHelper) {
self.touchHelper = new TouchHelper(view, {
swipeYThreshold: 30,
triggerOnMove: true,
preventDefaultOnMove: true,
ignoreTagNames: ["BUTTON", "INPUT", "TEXTAREA"]
});
events.on(self.touchHelper, "swipeup", onVerticalSwipe);
events.on(self.touchHelper, "swipedown", onVerticalSwipe);
});
})();
}
};
});
| 1 | 11,997 | this doesn't sound pretty... I would rather we have a proper navigation for all modes - AFAIK TV remote navigation is very similar to normal keyboard, but maybe it just uses another key codes - those should be extracted in a single file and defined there depending on context then | jellyfin-jellyfin-web | js |
@@ -205,4 +205,17 @@ function patchDOMElement(dom, newVNode, internal, globalContext, commitQueue) {
dom.firstChild
);
}
+
+ if ('value' in newProps && dom._isControlled) {
+ dom._prevValue = newProps.value;
+
+ if (newProps.value !== dom.value) {
+ setProperty(dom, 'value', i, oldProps.value, 0);
+ }
+ } else if ('checked' in newProps && dom._isControlled) {
+ dom._prevValue = newProps.checked;
+ if (newProps.checked !== dom.checked) {
+ setProperty(dom, 'checked', i, oldProps.checked, 0);
+ }
+ }
} | 1 | import { diffChildren } from './children';
import { setProperty } from './props';
import options from '../options';
import { renderComponent } from './component';
import {
RESET_MODE,
TYPE_TEXT,
TYPE_ELEMENT,
MODE_SUSPENDED,
MODE_ERRORED,
TYPE_ROOT,
MODE_SVG,
UNDEFINED
} from '../constants';
import { getChildDom, getDomSibling } from '../tree';
/**
* Diff two virtual nodes and apply proper changes to the DOM
* @param {import('../internal').PreactElement} parentDom The parent of the DOM element
* @param {import('../internal').VNode | string} newVNode The new virtual node
* @param {import('../internal').Internal} internal The Internal node to patch
* @param {object} globalContext The current context object. Modified by getChildContext
* @param {import('../internal').CommitQueue} commitQueue List of components
* which have callbacks to invoke in commitRoot
* @param {import('../internal').PreactNode} startDom
*/
export function patch(
parentDom,
newVNode,
internal,
globalContext,
commitQueue,
startDom
) {
let dom = internal._dom;
let flags = internal._flags;
if (flags & TYPE_TEXT) {
if (newVNode !== internal.props) {
dom.data = newVNode;
internal.props = newVNode;
}
return dom.nextSibling;
}
// When passing through createElement it assigns the object
// constructor as undefined. This to prevent JSON-injection.
if (newVNode.constructor !== UNDEFINED) return null;
if (options._diff) options._diff(internal, newVNode);
if (flags & TYPE_ELEMENT) {
if (newVNode._vnodeId !== internal._vnodeId) {
// @ts-ignore dom is a PreactElement here
patchDOMElement(dom, newVNode, internal, globalContext, commitQueue);
// Once we have successfully rendered the new VNode, copy it's ID over
internal._vnodeId = newVNode._vnodeId;
}
if (options.diffed) options.diffed(internal);
// We successfully rendered this VNode, unset any stored hydration/bailout state:
internal._flags &= RESET_MODE;
return dom.nextSibling;
}
/** @type {import('../internal').PreactNode} */
let nextDomSibling;
// Root nodes signal that an attempt to render into a specific DOM node on
// the page. Root nodes can occur anywhere in the tree and not just at the
// top.
let prevStartDom = startDom;
let prevParentDom = parentDom;
if (flags & TYPE_ROOT) {
parentDom = newVNode.props._parentDom;
if (parentDom !== prevParentDom) {
startDom = getChildDom(internal) || startDom;
// The `startDom` variable might point to a node from another
// tree from a previous render
if (startDom != null && startDom.parentNode !== parentDom) {
startDom = null;
}
}
}
try {
nextDomSibling = renderComponent(
parentDom,
/** @type {import('../internal').VNode} */
(newVNode),
internal,
globalContext,
commitQueue,
startDom
);
} catch (e) {
// @TODO: assign a new VNode ID here? Or NaN?
// newVNode._vnodeId = 0;
internal._flags |= e.then ? MODE_SUSPENDED : MODE_ERRORED;
options._catchError(e, internal);
return nextDomSibling;
}
if (prevParentDom !== parentDom) {
// If this is a root node/Portal, and it changed the parentDom it's
// children, then we need to determine which dom node the diff should
// continue with.
if (prevStartDom == null || prevStartDom.parentNode == prevParentDom) {
// If prevStartDom == null, then we are diffing a root node that
// didn't have a startDom to begin with, so we can just return null.
//
// Or, if the previous value for start dom still has the same parent
// DOM has the root node's parent tree, then we can use it. This case
// assumes the root node rendered its children into a new parent.
nextDomSibling = prevStartDom;
} else {
// Here, if the parentDoms are different and prevStartDom has moved into
// a new parentDom, we'll assume the root node moved prevStartDom under
// the new parentDom. Because of this change, we need to search the
// internal tree for the next DOM sibling the tree should begin with
nextDomSibling = getDomSibling(internal);
}
}
if (options.diffed) options.diffed(internal);
// We successfully rendered this VNode, unset any stored hydration/bailout state:
internal._flags &= RESET_MODE;
// Once we have successfully rendered the new VNode, copy it's ID over
internal._vnodeId = newVNode._vnodeId;
return nextDomSibling;
}
/**
* Diff two virtual nodes representing DOM element
* @param {import('../internal').PreactElement} dom The DOM element representing
* the virtual nodes being diffed
* @param {import('../internal').VNode} newVNode The new virtual node
* @param {import('../internal').Internal} internal The Internal node to patch
* @param {object} globalContext The current context object
* @param {import('../internal').CommitQueue} commitQueue List of components
* which have callbacks to invoke in commitRoot
*/
function patchDOMElement(dom, newVNode, internal, globalContext, commitQueue) {
let oldProps = internal.props,
newProps = (internal.props = newVNode.props),
isSvg = internal._flags & MODE_SVG,
i,
value,
tmp,
newHtml,
oldHtml,
newChildren;
for (i in oldProps) {
value = oldProps[i];
if (i === 'key') {
} else if (i === 'children') {
} else if (i === 'dangerouslySetInnerHTML') {
oldHtml = value;
} else if (!(i in newProps)) {
setProperty(dom, i, null, value, isSvg);
}
}
for (i in newProps) {
value = newProps[i];
if (i === 'key') {
} else if (i === 'children') {
newChildren = value;
} else if (i === 'dangerouslySetInnerHTML') {
newHtml = value;
} else if (
value !== (tmp = oldProps[i]) ||
((i === 'checked' || i === 'value') && value != null && value !== dom[i])
) {
setProperty(dom, i, value, tmp, isSvg);
}
}
// If the new vnode didn't have dangerouslySetInnerHTML, diff its children
if (newHtml) {
value = newHtml.__html;
// Avoid re-applying the same '__html' if it did not changed between re-render
if (!oldHtml || (value !== oldHtml.__html && value !== dom.innerHTML)) {
dom.innerHTML = value;
}
internal._children = null;
} else {
if (oldHtml) dom.innerHTML = '';
diffChildren(
dom,
newChildren && Array.isArray(newChildren) ? newChildren : [newChildren],
internal,
globalContext,
commitQueue,
dom.firstChild
);
}
}
| 1 | 17,294 | After diffing the children we check whether the value got out of sync, if it did we update it. We also update the `_prevValue` to prepare for the next event hitting our controlled component | preactjs-preact | js |
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+// Package node provides the glue-code needed in order
+// to start a Bee node.
package node
import ( | 1 | // Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package node
import (
"context"
"crypto/ecdsa"
"errors"
"fmt"
"io"
"log"
"net"
"net/http"
"path/filepath"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethersphere/bee/pkg/accounting"
"github.com/ethersphere/bee/pkg/addressbook"
"github.com/ethersphere/bee/pkg/api"
"github.com/ethersphere/bee/pkg/crypto"
"github.com/ethersphere/bee/pkg/debugapi"
"github.com/ethersphere/bee/pkg/hive"
"github.com/ethersphere/bee/pkg/kademlia"
"github.com/ethersphere/bee/pkg/localstore"
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/metrics"
"github.com/ethersphere/bee/pkg/netstore"
"github.com/ethersphere/bee/pkg/p2p/libp2p"
"github.com/ethersphere/bee/pkg/pingpong"
"github.com/ethersphere/bee/pkg/pricing"
"github.com/ethersphere/bee/pkg/pss"
"github.com/ethersphere/bee/pkg/puller"
"github.com/ethersphere/bee/pkg/pullsync"
"github.com/ethersphere/bee/pkg/pullsync/pullstorage"
"github.com/ethersphere/bee/pkg/pusher"
"github.com/ethersphere/bee/pkg/pushsync"
"github.com/ethersphere/bee/pkg/recovery"
"github.com/ethersphere/bee/pkg/resolver/multiresolver"
"github.com/ethersphere/bee/pkg/retrieval"
settlement "github.com/ethersphere/bee/pkg/settlement"
"github.com/ethersphere/bee/pkg/settlement/pseudosettle"
"github.com/ethersphere/bee/pkg/settlement/swap"
"github.com/ethersphere/bee/pkg/settlement/swap/chequebook"
"github.com/ethersphere/bee/pkg/settlement/swap/swapprotocol"
"github.com/ethersphere/bee/pkg/settlement/swap/transaction"
"github.com/ethersphere/bee/pkg/statestore/leveldb"
mockinmem "github.com/ethersphere/bee/pkg/statestore/mock"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/tags"
"github.com/ethersphere/bee/pkg/tracing"
"github.com/ethersphere/bee/pkg/traversal"
ma "github.com/multiformats/go-multiaddr"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
)
type Bee struct {
p2pService io.Closer
p2pCancel context.CancelFunc
apiCloser io.Closer
apiServer *http.Server
debugAPIServer *http.Server
resolverCloser io.Closer
errorLogWriter *io.PipeWriter
tracerCloser io.Closer
tagsCloser io.Closer
stateStoreCloser io.Closer
localstoreCloser io.Closer
topologyCloser io.Closer
pusherCloser io.Closer
pullerCloser io.Closer
pullSyncCloser io.Closer
pssCloser io.Closer
recoveryHandleCleanup func()
}
type Options struct {
DataDir string
DBCapacity uint64
APIAddr string
DebugAPIAddr string
Addr string
NATAddr string
EnableWS bool
EnableQUIC bool
WelcomeMessage string
Bootnodes []string
CORSAllowedOrigins []string
Logger logging.Logger
Standalone bool
TracingEnabled bool
TracingEndpoint string
TracingServiceName string
GlobalPinningEnabled bool
PaymentThreshold uint64
PaymentTolerance uint64
PaymentEarly uint64
ResolverConnectionCfgs []multiresolver.ConnectionConfig
GatewayMode bool
SwapEndpoint string
SwapFactoryAddress string
SwapInitialDeposit uint64
SwapEnable bool
}
func NewBee(addr string, swarmAddress swarm.Address, publicKey ecdsa.PublicKey, signer crypto.Signer, networkID uint64, logger logging.Logger, libp2pPrivateKey, pssPrivateKey *ecdsa.PrivateKey, o Options) (*Bee, error) {
tracer, tracerCloser, err := tracing.NewTracer(&tracing.Options{
Enabled: o.TracingEnabled,
Endpoint: o.TracingEndpoint,
ServiceName: o.TracingServiceName,
})
if err != nil {
return nil, fmt.Errorf("tracer: %w", err)
}
p2pCtx, p2pCancel := context.WithCancel(context.Background())
b := &Bee{
p2pCancel: p2pCancel,
errorLogWriter: logger.WriterLevel(logrus.ErrorLevel),
tracerCloser: tracerCloser,
}
var stateStore storage.StateStorer
if o.DataDir == "" {
stateStore = mockinmem.NewStateStore()
logger.Warning("using in-mem state store. no node state will be persisted")
} else {
stateStore, err = leveldb.NewStateStore(filepath.Join(o.DataDir, "statestore"))
if err != nil {
return nil, fmt.Errorf("statestore: %w", err)
}
}
b.stateStoreCloser = stateStore
addressbook := addressbook.New(stateStore)
var chequebookService chequebook.Service
var chequeStore chequebook.ChequeStore
var cashoutService chequebook.CashoutService
var overlayEthAddress common.Address
if o.SwapEnable {
swapBackend, err := ethclient.Dial(o.SwapEndpoint)
if err != nil {
return nil, err
}
transactionService, err := transaction.NewService(logger, swapBackend, signer)
if err != nil {
return nil, err
}
overlayEthAddress, err = signer.EthereumAddress()
if err != nil {
return nil, err
}
chainID, err := swapBackend.ChainID(p2pCtx)
if err != nil {
logger.Infof("could not connect to backend at %v. In a swap-enabled network a working blockchain node (for goerli network in production) is required. Check your node or specify another node using --swap-endpoint.", o.SwapEndpoint)
return nil, fmt.Errorf("could not get chain id from ethereum backend: %w", err)
}
var factoryAddress common.Address
if o.SwapFactoryAddress == "" {
var found bool
factoryAddress, found = chequebook.DiscoverFactoryAddress(chainID.Int64())
if !found {
return nil, errors.New("no known factory address for this network")
}
logger.Infof("using default factory address for chain id %d: %x", chainID, factoryAddress)
} else if !common.IsHexAddress(o.SwapFactoryAddress) {
return nil, errors.New("malformed factory address")
} else {
factoryAddress = common.HexToAddress(o.SwapFactoryAddress)
logger.Infof("using custom factory address: %x", factoryAddress)
}
chequebookFactory, err := chequebook.NewFactory(swapBackend, transactionService, factoryAddress, chequebook.NewSimpleSwapFactoryBindingFunc)
if err != nil {
return nil, err
}
chequeSigner := chequebook.NewChequeSigner(signer, chainID.Int64())
// initialize chequebook logic
chequebookService, err = chequebook.Init(p2pCtx,
chequebookFactory,
stateStore,
logger,
o.SwapInitialDeposit,
transactionService,
swapBackend,
chainID.Int64(),
overlayEthAddress,
chequeSigner,
chequebook.NewSimpleSwapBindings,
chequebook.NewERC20Bindings)
if err != nil {
return nil, err
}
chequeStore = chequebook.NewChequeStore(stateStore, swapBackend, chequebookFactory, chainID.Int64(), overlayEthAddress, chequebook.NewSimpleSwapBindings, chequebook.RecoverCheque)
cashoutService, err = chequebook.NewCashoutService(stateStore, chequebook.NewSimpleSwapBindings, swapBackend, transactionService, chequeStore)
if err != nil {
return nil, err
}
}
p2ps, err := libp2p.New(p2pCtx, signer, networkID, swarmAddress, addr, addressbook, stateStore, logger, tracer, libp2p.Options{
PrivateKey: libp2pPrivateKey,
NATAddr: o.NATAddr,
EnableWS: o.EnableWS,
EnableQUIC: o.EnableQUIC,
Standalone: o.Standalone,
WelcomeMessage: o.WelcomeMessage,
})
if err != nil {
return nil, fmt.Errorf("p2p service: %w", err)
}
b.p2pService = p2ps
if !o.Standalone {
if natManager := p2ps.NATManager(); natManager != nil {
// wait for nat manager to init
logger.Debug("initializing NAT manager")
select {
case <-natManager.Ready():
// this is magic sleep to give NAT time to sync the mappings
// this is a hack, kind of alchemy and should be improved
time.Sleep(3 * time.Second)
logger.Debug("NAT manager initialized")
case <-time.After(10 * time.Second):
logger.Warning("NAT manager init timeout")
}
}
}
// Construct protocols.
pingPong := pingpong.New(p2ps, logger, tracer)
if err = p2ps.AddProtocol(pingPong.Protocol()); err != nil {
return nil, fmt.Errorf("pingpong service: %w", err)
}
hive := hive.New(p2ps, addressbook, networkID, logger)
if err = p2ps.AddProtocol(hive.Protocol()); err != nil {
return nil, fmt.Errorf("hive service: %w", err)
}
var bootnodes []ma.Multiaddr
if o.Standalone {
logger.Info("Starting node in standalone mode, no p2p connections will be made or accepted")
} else {
for _, a := range o.Bootnodes {
addr, err := ma.NewMultiaddr(a)
if err != nil {
logger.Debugf("multiaddress fail %s: %v", a, err)
logger.Warningf("invalid bootnode address %s", a)
continue
}
bootnodes = append(bootnodes, addr)
}
}
var settlement settlement.Interface
var swapService *swap.Service
if o.SwapEnable {
swapProtocol := swapprotocol.New(p2ps, logger, overlayEthAddress)
swapAddressBook := swap.NewAddressbook(stateStore)
swapService = swap.New(swapProtocol, logger, stateStore, chequebookService, chequeStore, swapAddressBook, networkID, cashoutService, p2ps)
swapProtocol.SetSwap(swapService)
if err = p2ps.AddProtocol(swapProtocol.Protocol()); err != nil {
return nil, fmt.Errorf("swap protocol: %w", err)
}
settlement = swapService
} else {
pseudosettleService := pseudosettle.New(p2ps, logger, stateStore)
if err = p2ps.AddProtocol(pseudosettleService.Protocol()); err != nil {
return nil, fmt.Errorf("pseudosettle service: %w", err)
}
settlement = pseudosettleService
}
pricing := pricing.New(p2ps, logger, o.PaymentThreshold)
if err = p2ps.AddProtocol(pricing.Protocol()); err != nil {
return nil, fmt.Errorf("pricing service: %w", err)
}
acc, err := accounting.NewAccounting(o.PaymentThreshold, o.PaymentTolerance, o.PaymentEarly, logger, stateStore, settlement, pricing)
if err != nil {
return nil, fmt.Errorf("accounting: %w", err)
}
settlement.SetNotifyPaymentFunc(acc.AsyncNotifyPayment)
pricing.SetPaymentThresholdObserver(acc)
kad := kademlia.New(swarmAddress, addressbook, hive, p2ps, logger, kademlia.Options{Bootnodes: bootnodes, Standalone: o.Standalone})
b.topologyCloser = kad
hive.SetAddPeersHandler(kad.AddPeers)
p2ps.SetNotifier(kad)
addrs, err := p2ps.Addresses()
if err != nil {
return nil, fmt.Errorf("get server addresses: %w", err)
}
for _, addr := range addrs {
logger.Debugf("p2p address: %s", addr)
}
var path string
if o.DataDir != "" {
path = filepath.Join(o.DataDir, "localstore")
}
lo := &localstore.Options{
Capacity: o.DBCapacity,
}
storer, err := localstore.New(path, swarmAddress.Bytes(), lo, logger)
if err != nil {
return nil, fmt.Errorf("localstore: %w", err)
}
b.localstoreCloser = storer
retrieve := retrieval.New(swarmAddress, storer, p2ps, kad, logger, acc, accounting.NewFixedPricer(swarmAddress, 10), tracer)
tagService := tags.NewTags(stateStore, logger)
b.tagsCloser = tagService
if err = p2ps.AddProtocol(retrieve.Protocol()); err != nil {
return nil, fmt.Errorf("retrieval service: %w", err)
}
pssService := pss.New(pssPrivateKey, logger)
b.pssCloser = pssService
var ns storage.Storer
if o.GlobalPinningEnabled {
// create recovery callback for content repair
recoverFunc := recovery.NewCallback(pssService)
ns = netstore.New(storer, recoverFunc, retrieve, logger)
} else {
ns = netstore.New(storer, nil, retrieve, logger)
}
traversalService := traversal.NewService(ns)
pushSyncProtocol := pushsync.New(p2ps, storer, kad, tagService, pssService.TryUnwrap, logger, acc, accounting.NewFixedPricer(swarmAddress, 10), tracer)
// set the pushSyncer in the PSS
pssService.SetPushSyncer(pushSyncProtocol)
if err = p2ps.AddProtocol(pushSyncProtocol.Protocol()); err != nil {
return nil, fmt.Errorf("pushsync service: %w", err)
}
if o.GlobalPinningEnabled {
// register function for chunk repair upon receiving a trojan message
chunkRepairHandler := recovery.NewRepairHandler(ns, logger, pushSyncProtocol)
b.recoveryHandleCleanup = pssService.Register(recovery.Topic, chunkRepairHandler)
}
pushSyncPusher := pusher.New(storer, kad, pushSyncProtocol, tagService, logger, tracer)
b.pusherCloser = pushSyncPusher
pullStorage := pullstorage.New(storer)
pullSync := pullsync.New(p2ps, pullStorage, pssService.TryUnwrap, logger)
b.pullSyncCloser = pullSync
if err = p2ps.AddProtocol(pullSync.Protocol()); err != nil {
return nil, fmt.Errorf("pullsync protocol: %w", err)
}
puller := puller.New(stateStore, kad, pullSync, logger, puller.Options{})
b.pullerCloser = puller
multiResolver := multiresolver.NewMultiResolver(
multiresolver.WithConnectionConfigs(o.ResolverConnectionCfgs),
multiresolver.WithLogger(o.Logger),
)
b.resolverCloser = multiResolver
var apiService api.Service
if o.APIAddr != "" {
// API server
apiService = api.New(tagService, ns, multiResolver, pssService, traversalService, logger, tracer, api.Options{
CORSAllowedOrigins: o.CORSAllowedOrigins,
GatewayMode: o.GatewayMode,
WsPingPeriod: 60 * time.Second,
})
apiListener, err := net.Listen("tcp", o.APIAddr)
if err != nil {
return nil, fmt.Errorf("api listener: %w", err)
}
apiServer := &http.Server{
IdleTimeout: 30 * time.Second,
ReadHeaderTimeout: 3 * time.Second,
Handler: apiService,
ErrorLog: log.New(b.errorLogWriter, "", 0),
}
go func() {
logger.Infof("api address: %s", apiListener.Addr())
if err := apiServer.Serve(apiListener); err != nil && err != http.ErrServerClosed {
logger.Debugf("api server: %v", err)
logger.Error("unable to serve api")
}
}()
b.apiServer = apiServer
b.apiCloser = apiService
}
if o.DebugAPIAddr != "" {
// Debug API server
debugAPIService := debugapi.New(swarmAddress, publicKey, pssPrivateKey.PublicKey, overlayEthAddress, p2ps, pingPong, kad, storer, logger, tracer, tagService, acc, settlement, o.SwapEnable, swapService, chequebookService)
// register metrics from components
debugAPIService.MustRegisterMetrics(p2ps.Metrics()...)
debugAPIService.MustRegisterMetrics(pingPong.Metrics()...)
debugAPIService.MustRegisterMetrics(acc.Metrics()...)
debugAPIService.MustRegisterMetrics(storer.Metrics()...)
debugAPIService.MustRegisterMetrics(puller.Metrics()...)
debugAPIService.MustRegisterMetrics(pushSyncProtocol.Metrics()...)
debugAPIService.MustRegisterMetrics(pushSyncPusher.Metrics()...)
debugAPIService.MustRegisterMetrics(pullSync.Metrics()...)
if pssServiceMetrics, ok := pssService.(metrics.Collector); ok {
debugAPIService.MustRegisterMetrics(pssServiceMetrics.Metrics()...)
}
if apiService != nil {
debugAPIService.MustRegisterMetrics(apiService.Metrics()...)
}
if l, ok := logger.(metrics.Collector); ok {
debugAPIService.MustRegisterMetrics(l.Metrics()...)
}
if l, ok := settlement.(metrics.Collector); ok {
debugAPIService.MustRegisterMetrics(l.Metrics()...)
}
debugAPIListener, err := net.Listen("tcp", o.DebugAPIAddr)
if err != nil {
return nil, fmt.Errorf("debug api listener: %w", err)
}
debugAPIServer := &http.Server{
IdleTimeout: 30 * time.Second,
ReadHeaderTimeout: 3 * time.Second,
Handler: debugAPIService,
ErrorLog: log.New(b.errorLogWriter, "", 0),
}
go func() {
logger.Infof("debug api address: %s", debugAPIListener.Addr())
if err := debugAPIServer.Serve(debugAPIListener); err != nil && err != http.ErrServerClosed {
logger.Debugf("debug api server: %v", err)
logger.Error("unable to serve debug api")
}
}()
b.debugAPIServer = debugAPIServer
}
if err := kad.Start(p2pCtx); err != nil {
return nil, err
}
return b, nil
}
func (b *Bee) Shutdown(ctx context.Context) error {
errs := new(multiError)
if b.apiCloser != nil {
if err := b.apiCloser.Close(); err != nil {
errs.add(fmt.Errorf("api: %w", err))
}
}
var eg errgroup.Group
if b.apiServer != nil {
eg.Go(func() error {
if err := b.apiServer.Shutdown(ctx); err != nil {
return fmt.Errorf("api server: %w", err)
}
return nil
})
}
if b.debugAPIServer != nil {
eg.Go(func() error {
if err := b.debugAPIServer.Shutdown(ctx); err != nil {
return fmt.Errorf("debug api server: %w", err)
}
return nil
})
}
if err := eg.Wait(); err != nil {
errs.add(err)
}
if b.recoveryHandleCleanup != nil {
b.recoveryHandleCleanup()
}
if err := b.pusherCloser.Close(); err != nil {
errs.add(fmt.Errorf("pusher: %w", err))
}
if err := b.pullerCloser.Close(); err != nil {
errs.add(fmt.Errorf("puller: %w", err))
}
if err := b.pullSyncCloser.Close(); err != nil {
errs.add(fmt.Errorf("pull sync: %w", err))
}
if err := b.pssCloser.Close(); err != nil {
errs.add(fmt.Errorf("pss: %w", err))
}
b.p2pCancel()
if err := b.p2pService.Close(); err != nil {
errs.add(fmt.Errorf("p2p server: %w", err))
}
if err := b.tracerCloser.Close(); err != nil {
errs.add(fmt.Errorf("tracer: %w", err))
}
if err := b.tagsCloser.Close(); err != nil {
errs.add(fmt.Errorf("tag persistence: %w", err))
}
if err := b.stateStoreCloser.Close(); err != nil {
errs.add(fmt.Errorf("statestore: %w", err))
}
if err := b.localstoreCloser.Close(); err != nil {
errs.add(fmt.Errorf("localstore: %w", err))
}
if err := b.topologyCloser.Close(); err != nil {
errs.add(fmt.Errorf("topology driver: %w", err))
}
if err := b.errorLogWriter.Close(); err != nil {
errs.add(fmt.Errorf("error log writer: %w", err))
}
// Shutdown the resolver service only if it has been initialized.
if b.resolverCloser != nil {
if err := b.resolverCloser.Close(); err != nil {
errs.add(fmt.Errorf("resolver service: %w", err))
}
}
if errs.hasErrors() {
return errs
}
return nil
}
type multiError struct {
errors []error
}
func (e *multiError) Error() string {
if len(e.errors) == 0 {
return ""
}
s := e.errors[0].Error()
for _, err := range e.errors[1:] {
s += "; " + err.Error()
}
return s
}
func (e *multiError) add(err error) {
e.errors = append(e.errors, err)
}
func (e *multiError) hasErrors() bool {
return len(e.errors) > 0
}
| 1 | 13,684 | It provides a type called Node which is a fully functional bee client. This package is where the dependencies are injected. It is not just a glue-code, it is concept of node. | ethersphere-bee | go |
@@ -330,6 +330,8 @@ class ConfigManager(QObject):
CHANGED_OPTIONS = {
('content', 'cookies-accept'):
_get_value_transformer('default', 'no-3rdparty'),
+ ('storage', 'download-directory'):
+ _get_value_transformer('', '%'),
}
changed = pyqtSignal(str, str) | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2015 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Configuration storage and config-related utilities.
This borrows a lot of ideas from configparser, but also has some things that
are fundamentally different. This is why nothing inherits from configparser,
but we borrow some methods and classes from there where it makes sense.
"""
import os
import sys
import os.path
import functools
import configparser
import collections
import collections.abc
from PyQt5.QtCore import pyqtSignal, pyqtSlot, QObject, QUrl, QSettings
from qutebrowser.config import configdata, configexc, textwrapper
from qutebrowser.config.parsers import ini, keyconf
from qutebrowser.commands import cmdexc, cmdutils
from qutebrowser.utils import (message, objreg, utils, standarddir, log,
qtutils, error, usertypes)
from qutebrowser.utils.usertypes import Completion
class change_filter: # pylint: disable=invalid-name
"""Decorator to filter calls based on a config section/option matching.
This could also be a function, but as a class (with a "wrong" name) it's
much cleaner to implement.
Attributes:
_sectname: The section to be filtered.
_optname: The option to be filtered.
_function: Whether a function rather than a method is decorated.
"""
def __init__(self, sectname, optname=None, function=False):
"""Save decorator arguments.
Gets called on parse-time with the decorator arguments.
Args:
sectname: The section to be filtered.
optname: The option to be filtered.
function: Whether a function rather than a method is decorated.
"""
if sectname not in configdata.DATA:
raise configexc.NoSectionError(sectname)
if optname is not None and optname not in configdata.DATA[sectname]:
raise configexc.NoOptionError(optname, sectname)
self._sectname = sectname
self._optname = optname
self._function = function
def __call__(self, func):
"""Filter calls to the decorated function.
Gets called when a function should be decorated.
Adds a filter which returns if we're not interested in the change-event
and calls the wrapped function if we are.
We assume the function passed doesn't take any parameters.
Args:
func: The function to be decorated.
Return:
The decorated function.
"""
if self._function:
@pyqtSlot(str, str)
@functools.wraps(func)
def wrapper(sectname=None, optname=None):
# pylint: disable=missing-docstring
if sectname is None and optname is None:
# Called directly, not from a config change event.
return func()
elif sectname != self._sectname:
return
elif self._optname is not None and optname != self._optname:
return
else:
return func()
else:
@pyqtSlot(str, str)
@functools.wraps(func)
def wrapper(wrapper_self, sectname=None, optname=None):
# pylint: disable=missing-docstring
if sectname is None and optname is None:
# Called directly, not from a config change event.
return func(wrapper_self)
elif sectname != self._sectname:
return
elif self._optname is not None and optname != self._optname:
return
else:
return func(wrapper_self)
return wrapper
def get(*args, **kwargs):
"""Convenience method to call get(...) of the config instance."""
return objreg.get('config').get(*args, **kwargs)
def section(sect):
"""Get a config section from the global config."""
return objreg.get('config')[sect]
def _init_main_config(parent=None):
"""Initialize the main config.
Args:
parent: The parent to pass to ConfigManager.
"""
args = objreg.get('args')
try:
config_obj = ConfigManager(standarddir.config(), 'qutebrowser.conf',
args.relaxed_config, parent=parent)
except (configexc.Error, configparser.Error, UnicodeDecodeError) as e:
log.init.exception(e)
errstr = "Error while reading config:"
try:
errstr += "\n\n{} -> {}:".format(
e.section, e.option) # pylint: disable=no-member
except AttributeError:
pass
errstr += "\n"
error.handle_fatal_exc(e, args, "Error while reading config!",
pre_text=errstr)
# We didn't really initialize much so far, so we just quit hard.
sys.exit(usertypes.Exit.err_config)
else:
objreg.register('config', config_obj)
if standarddir.config() is not None:
filename = os.path.join(standarddir.config(), 'qutebrowser.conf')
save_manager = objreg.get('save-manager')
save_manager.add_saveable(
'config', config_obj.save, config_obj.changed,
config_opt=('general', 'auto-save-config'), filename=filename)
for sect in config_obj.sections.values():
for opt in sect.values.values():
if opt.values['conf'] is None:
# Option added to built-in defaults but not in user's
# config yet
save_manager.save('config', explicit=True, force=True)
return
def _init_key_config(parent):
"""Initialize the key config.
Args:
parent: The parent to use for the KeyConfigParser.
"""
args = objreg.get('args')
try:
key_config = keyconf.KeyConfigParser(standarddir.config(), 'keys.conf',
args.relaxed_config,
parent=parent)
except (keyconf.KeyConfigError, UnicodeDecodeError) as e:
log.init.exception(e)
errstr = "Error while reading key config:\n"
if e.lineno is not None:
errstr += "In line {}: ".format(e.lineno)
error.handle_fatal_exc(e, args, "Error while reading key config!",
pre_text=errstr)
# We didn't really initialize much so far, so we just quit hard.
sys.exit(usertypes.Exit.err_key_config)
else:
objreg.register('key-config', key_config)
if standarddir.config() is not None:
save_manager = objreg.get('save-manager')
filename = os.path.join(standarddir.config(), 'keys.conf')
save_manager.add_saveable(
'key-config', key_config.save, key_config.config_dirty,
config_opt=('general', 'auto-save-config'), filename=filename,
dirty=key_config.is_dirty)
def _init_misc():
"""Initialize misc. config-related files."""
save_manager = objreg.get('save-manager')
state_config = ini.ReadWriteConfigParser(standarddir.data(), 'state')
for sect in ('general', 'geometry'):
try:
state_config.add_section(sect)
except configparser.DuplicateSectionError:
pass
# See commit a98060e020a4ba83b663813a4b9404edb47f28ad.
state_config['general'].pop('fooled', None)
objreg.register('state-config', state_config)
save_manager.add_saveable('state-config', state_config.save)
# We need to import this here because lineparser needs config.
from qutebrowser.misc import lineparser
command_history = lineparser.LimitLineParser(
standarddir.data(), 'cmd-history',
limit=('completion', 'cmd-history-max-items'),
parent=objreg.get('config'))
objreg.register('command-history', command_history)
save_manager.add_saveable('command-history', command_history.save,
command_history.changed)
# Set the QSettings path to something like
# ~/.config/qutebrowser/qsettings/qutebrowser/qutebrowser.conf so it
# doesn't overwrite our config.
#
# This fixes one of the corruption issues here:
# https://github.com/The-Compiler/qutebrowser/issues/515
if standarddir.config() is None:
path = os.devnull
else:
path = os.path.join(standarddir.config(), 'qsettings')
for fmt in (QSettings.NativeFormat, QSettings.IniFormat):
QSettings.setPath(fmt, QSettings.UserScope, path)
def init(parent=None):
"""Initialize the config.
Args:
parent: The parent to pass to QObjects which get initialized.
"""
_init_main_config(parent)
_init_key_config(parent)
_init_misc()
def _get_value_transformer(old, new):
"""Get a function which transforms a value for CHANGED_OPTIONS.
Args:
old: The old value - if the supplied value doesn't match this, it's
returned untransformed.
new: The new value.
Return:
A function which takes a value and transforms it.
"""
def transformer(val):
if val == old:
return new
else:
return val
return transformer
class ConfigManager(QObject):
"""Configuration manager for qutebrowser.
Class attributes:
KEY_ESCAPE: Chars which need escaping when they occur as first char
in a line.
ESCAPE_CHAR: The char to be used for escaping
RENAMED_SECTIONS: A mapping of renamed sections, {'oldname': 'newname'}
RENAMED_OPTIONS: A mapping of renamed options,
{('section', 'oldname'): 'newname'}
CHANGED_OPTIONS: A mapping of arbitrarily changed options,
{('section', 'option'): callable}.
The callable takes the old value and returns the new
one.
DELETED_OPTIONS: A (section, option) list of deleted options.
Attributes:
sections: The configuration data as an OrderedDict.
_fname: The filename to be opened.
_configdir: The dictionary to read the config from and save it in.
_interpolation: An configparser.Interpolation object
_proxies: configparser.SectionProxy objects for sections.
_initialized: Whether the ConfigManager is fully initialized yet.
Signals:
changed: Emitted when a config option changed.
style_changed: When style caches need to be invalidated.
Args: the changed section and option.
"""
KEY_ESCAPE = r'\#['
ESCAPE_CHAR = '\\'
RENAMED_SECTIONS = {
'permissions': 'content'
}
RENAMED_OPTIONS = {
('colors', 'tab.fg.odd'): 'tabs.fg.odd',
('colors', 'tab.fg.even'): 'tabs.fg.even',
('colors', 'tab.fg.selected'): 'tabs.fg.selected',
('colors', 'tab.bg.odd'): 'tabs.bg.odd',
('colors', 'tab.bg.even'): 'tabs.bg.even',
('colors', 'tab.bg.selected'): 'tabs.bg.selected',
('colors', 'tab.bg.bar'): 'tabs.bg.bar',
('colors', 'tab.indicator.start'): 'tabs.indicator.start',
('colors', 'tab.indicator.stop'): 'tabs.indicator.stop',
('colors', 'tab.indicator.error'): 'tabs.indicator.error',
('colors', 'tab.indicator.system'): 'tabs.indicator.system',
('tabs', 'auto-hide'): 'hide-auto',
('completion', 'history-length'): 'cmd-history-max-items',
}
DELETED_OPTIONS = [
('colors', 'tab.separator'),
('colors', 'tabs.separator'),
('colors', 'completion.item.bg'),
]
CHANGED_OPTIONS = {
('content', 'cookies-accept'):
_get_value_transformer('default', 'no-3rdparty'),
}
changed = pyqtSignal(str, str)
style_changed = pyqtSignal(str, str)
def __init__(self, configdir, fname, relaxed=False, parent=None):
super().__init__(parent)
self._initialized = False
self.sections = configdata.data()
self._interpolation = configparser.ExtendedInterpolation()
self._proxies = {}
for sectname in self.sections.keys():
self._proxies[sectname] = SectionProxy(self, sectname)
self._fname = fname
if configdir is None:
self._configdir = None
self._initialized = True
else:
self._configdir = configdir
parser = ini.ReadConfigParser(configdir, fname)
self._from_cp(parser, relaxed)
self._initialized = True
self._validate_all()
def __getitem__(self, key):
"""Get a section from the config."""
return self._proxies[key]
def __repr__(self):
return utils.get_repr(self, fname=self._fname)
def __str__(self):
"""Get the whole config as a string."""
lines = configdata.FIRST_COMMENT.strip('\n').splitlines()
for sectname, sect in self.sections.items():
lines.append('\n[{}]'.format(sectname))
lines += self._str_section_desc(sectname)
lines += self._str_option_desc(sectname, sect)
lines += self._str_items(sect)
return '\n'.join(lines) + '\n'
def _str_section_desc(self, sectname):
"""Get the section description string for sectname."""
wrapper = textwrapper.TextWrapper()
lines = []
seclines = configdata.SECTION_DESC[sectname].splitlines()
for secline in seclines:
if 'http://' in secline or 'https://' in secline:
lines.append('# ' + secline)
else:
lines += wrapper.wrap(secline)
return lines
def _str_option_desc(self, sectname, sect):
"""Get the option description strings for sect/sectname."""
wrapper = textwrapper.TextWrapper(initial_indent='#' + ' ' * 5,
subsequent_indent='#' + ' ' * 5)
lines = []
if not getattr(sect, 'descriptions', None):
return lines
for optname, option in sect.items():
lines.append('#')
if option.typ.typestr is None:
typestr = ''
else:
typestr = ' ({})'.format(option.typ.typestr)
lines.append("# {}{}:".format(optname, typestr))
try:
desc = self.sections[sectname].descriptions[optname]
except KeyError:
log.config.exception("No description for {}.{}!".format(
sectname, optname))
continue
for descline in desc.splitlines():
lines += wrapper.wrap(descline)
valid_values = option.typ.valid_values
if valid_values is not None:
if valid_values.descriptions:
for val in valid_values:
desc = valid_values.descriptions[val]
lines += wrapper.wrap(" {}: {}".format(val, desc))
else:
lines += wrapper.wrap("Valid values: {}".format(', '.join(
valid_values)))
lines += wrapper.wrap("Default: {}".format(
option.values['default']))
return lines
def _str_items(self, sect):
"""Get the option items as string for sect."""
lines = []
for optname, option in sect.items():
value = option.value(startlayer='conf')
for c in self.KEY_ESCAPE:
if optname.startswith(c):
optname = optname.replace(c, self.ESCAPE_CHAR + c, 1)
# configparser can't handle = in keys :(
optname = optname.replace('=', '<eq>')
keyval = '{} = {}'.format(optname, value)
lines.append(keyval)
return lines
def _get_real_sectname(self, cp, sectname):
"""Get an old or new section name based on a configparser.
This checks if sectname is in cp, and if not, migrates it if needed and
tries again.
Args:
cp: The configparser to check.
sectname: The new section name.
Returns:
The section name in the configparser as a string, or None if the
configparser doesn't contain the section.
"""
reverse_renamed_sections = {v: k for k, v in
self.RENAMED_SECTIONS.items()}
if sectname in reverse_renamed_sections:
old_sectname = reverse_renamed_sections[sectname]
else:
old_sectname = sectname
if old_sectname in cp:
return old_sectname
elif sectname in cp:
return sectname
else:
return None
def _from_cp(self, cp, relaxed=False):
"""Read the config from a configparser instance.
Args:
cp: The configparser instance to read the values from.
relaxed: Whether to ignore inexistent sections/options.
"""
for sectname in cp:
if sectname in self.RENAMED_SECTIONS:
sectname = self.RENAMED_SECTIONS[sectname]
if sectname is not 'DEFAULT' and sectname not in self.sections:
if not relaxed:
raise configexc.NoSectionError(sectname)
for sectname in self.sections:
self._from_cp_section(sectname, cp, relaxed)
def _from_cp_section(self, sectname, cp, relaxed):
"""Read a single section from a configparser instance.
Args:
sectname: The name of the section to read.
cp: The configparser instance to read the values from.
relaxed: Whether to ignore inexistent options.
"""
real_sectname = self._get_real_sectname(cp, sectname)
if real_sectname is None:
return
for k, v in cp[real_sectname].items():
if k.startswith(self.ESCAPE_CHAR):
k = k[1:]
if (sectname, k) in self.DELETED_OPTIONS:
return
if (sectname, k) in self.RENAMED_OPTIONS:
k = self.RENAMED_OPTIONS[sectname, k]
if (sectname, k) in self.CHANGED_OPTIONS:
func = self.CHANGED_OPTIONS[(sectname, k)]
v = func(v)
try:
self.set('conf', sectname, k, v, validate=False)
except configexc.NoOptionError:
if relaxed:
pass
else:
raise
def _validate_all(self):
"""Validate all values set in self._from_cp."""
for sectname, sect in self.sections.items():
mapping = {key: val.value() for key, val in sect.values.items()}
for optname, opt in sect.items():
interpolated = self._interpolation.before_get(
self, sectname, optname, opt.value(), mapping)
try:
opt.typ.validate(interpolated)
except configexc.ValidationError as e:
e.section = sectname
e.option = optname
raise
def _changed(self, sectname, optname):
"""Notify other objects the config has changed."""
log.config.debug("Config option changed: {} -> {}".format(
sectname, optname))
if sectname in ('colors', 'fonts'):
self.style_changed.emit(sectname, optname)
self.changed.emit(sectname, optname)
def _after_set(self, changed_sect, changed_opt):
"""Clean up caches and emit signals after an option has been set."""
self.get.cache_clear()
self._changed(changed_sect, changed_opt)
# Options in the same section and ${optname} interpolation.
for optname, option in self.sections[changed_sect].items():
if '${' + changed_opt + '}' in option.value():
self._changed(changed_sect, optname)
# Options in any section and ${sectname:optname} interpolation.
for sectname, sect in self.sections.items():
for optname, option in sect.items():
if ('${' + changed_sect + ':' + changed_opt + '}' in
option.value()):
self._changed(sectname, optname)
def items(self, sectname, raw=True):
"""Get a list of (optname, value) tuples for a section.
Implemented for configparser interpolation compatibility
Args:
sectname: The name of the section to get.
raw: Whether to get raw values. Note this parameter only exists
for ConfigParser compatibility and raw=False is not supported.
"""
items = []
if not raw:
raise ValueError("items() with raw=True is not implemented!")
for optname, option in self.sections[sectname].items():
items.append((optname, option.value()))
return items
def has_option(self, sectname, optname):
"""Check if option exists in section.
Args:
sectname: The section name.
optname: The option name
Return:
True if the option and section exist, False otherwise.
"""
if sectname not in self.sections:
return False
return optname in self.sections[sectname]
def remove_option(self, sectname, optname):
"""Remove an option.
Args:
sectname: The section where to remove an option.
optname: The option name to remove.
Return:
True if the option existed, False otherwise.
"""
try:
sectdict = self.sections[sectname]
except KeyError:
raise configexc.NoSectionError(sectname)
optname = self.optionxform(optname)
existed = optname in sectdict
if existed:
del sectdict[optname]
self.get.cache_clear()
return existed
@functools.lru_cache()
def get(self, sectname, optname, raw=False, transformed=True):
"""Get the value from a section/option.
Args:
sectname: The section to get the option from.
optname: The option name
raw: Whether to get the uninterpolated, untransformed value.
transformed: Whether the value should be transformed.
Return:
The value of the option.
"""
if not self._initialized:
raise Exception("get got called before initialization was "
"complete!")
try:
sect = self.sections[sectname]
except KeyError:
raise configexc.NoSectionError(sectname)
try:
val = sect[optname]
except KeyError:
raise configexc.NoOptionError(optname, sectname)
if raw:
return val.value()
mapping = {key: val.value() for key, val in sect.values.items()}
newval = self._interpolation.before_get(self, sectname, optname,
val.value(), mapping)
if transformed:
newval = val.typ.transform(newval)
return newval
@cmdutils.register(name='set', instance='config', win_id='win_id',
completion=[Completion.section, Completion.option,
Completion.value])
def set_command(self, win_id, section_=None, option=None, value=None,
temp=False, print_=False):
"""Set an option.
If the option name ends with '?', the value of the option is shown
instead.
If the option name ends with '!' and it is a boolean value, toggle it.
//
Wrapper for self.set() to output exceptions in the status bar.
Args:
section_: The section where the option is in.
option: The name of the option.
value: The value to set.
temp: Set value temporarily.
print_: Print the value after setting.
"""
if section_ is not None and option is None:
raise cmdexc.CommandError(
"set: Either both section and option have to be given, or "
"neither!")
if section_ is None and option is None:
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=win_id)
tabbed_browser.openurl(QUrl('qute:settings'), newtab=False)
return
if option.endswith('?'):
option = option[:-1]
print_ = True
else:
try:
if option.endswith('!') and value is None:
val = self.get(section_, option[:-1])
layer = 'temp' if temp else 'conf'
if isinstance(val, bool):
self.set(layer, section_, option[:-1], str(not val))
else:
raise cmdexc.CommandError(
"set: Attempted inversion of non-boolean value.")
elif value is not None:
layer = 'temp' if temp else 'conf'
self.set(layer, section_, option, value)
else:
raise cmdexc.CommandError("set: The following arguments "
"are required: value")
except (configexc.Error, configparser.Error) as e:
raise cmdexc.CommandError("set: {} - {}".format(
e.__class__.__name__, e))
if print_:
val = self.get(section_, option, transformed=False)
message.info(win_id, "{} {} = {}".format(
section_, option, val), immediately=True)
def set(self, layer, sectname, optname, value, validate=True):
"""Set an option.
Args:
layer: A layer name as string (conf/temp/default).
sectname: The name of the section to change.
optname: The name of the option to change.
value: The new value.
validate: Whether to validate the value immediately.
"""
try:
value = self._interpolation.before_set(self, sectname, optname,
value)
except ValueError as e:
raise configexc.InterpolationSyntaxError(optname, sectname, str(e))
try:
sect = self.sections[sectname]
except KeyError:
raise configexc.NoSectionError(sectname)
mapping = {key: val.value() for key, val in sect.values.items()}
if validate:
interpolated = self._interpolation.before_get(
self, sectname, optname, value, mapping)
else:
interpolated = None
try:
sect.setv(layer, optname, value, interpolated)
except KeyError:
raise configexc.NoOptionError(optname, sectname)
else:
if self._initialized:
self._after_set(sectname, optname)
def save(self):
"""Save the config file."""
if self._configdir is None:
return
configfile = os.path.join(self._configdir, self._fname)
log.destroy.debug("Saving config to {}".format(configfile))
with qtutils.savefile_open(configfile) as f:
f.write(str(self))
def dump_userconfig(self):
"""Get the part of the config which was changed by the user.
Return:
The changed config part as string.
"""
lines = []
for sectname, sect in self.sections.items():
changed = sect.dump_userconfig()
if changed:
lines.append('[{}]'.format(sectname))
lines += ['{} = {}'.format(k, v) for k, v in changed]
if not lines:
lines = ['<Default configuration>']
return '\n'.join(lines)
def optionxform(self, val):
"""Implemented to be compatible with ConfigParser interpolation."""
return val
class SectionProxy(collections.abc.MutableMapping):
"""A proxy for a single section from a config.
Attributes:
_conf: The Config object.
_name: The section name.
"""
def __init__(self, conf, name):
"""Create a view on a section.
Args:
conf: The Config object.
name: The section name.
"""
self.conf = conf
self.name = name
def __repr__(self):
return utils.get_repr(self, name=self.name)
def __getitem__(self, key):
if not self.conf.has_option(self.name, key):
raise KeyError(key)
return self.conf.get(self.name, key)
def __setitem__(self, key, value):
return self.conf.set('conf', self.name, key, value)
def __delitem__(self, key):
if not (self.conf.has_option(self.name, key) and
self.conf.remove_option(self.name, key)):
raise KeyError(key)
def __contains__(self, key):
return self.conf.has_option(self.name, key)
def __len__(self):
return len(self._options())
def __iter__(self):
return self._options().__iter__()
def _options(self):
"""Get the option keys from this section."""
return self.conf.sections[self.name].keys()
def get(self, optname, *, raw=False): # pylint: disable=arguments-differ
"""Get a value from this section.
We deliberately don't support the default argument here, but have a raw
argument instead.
Args:
optname: The option name to get.
raw: Whether to get a raw value or not.
"""
return self.conf.get(self.name, optname, raw=raw)
| 1 | 13,175 | I think this will change `''` to `'%'` unconditionally, i.e. there'll be no way to set `''` anymore. This should really only be used for values which make no sense anymore. | qutebrowser-qutebrowser | py |
@@ -88,6 +88,13 @@ public interface Table {
*/
Map<Integer, SortOrder> sortOrders();
+ /**
+ * Return the {@link RowKey row key} for this table.
+ *
+ * @return this table's row key.
+ */
+ RowKey rowKey();
+
/**
* Return a map of string properties for this table.
* | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg;
import java.util.List;
import java.util.Map;
import org.apache.iceberg.encryption.EncryptionManager;
import org.apache.iceberg.io.FileIO;
import org.apache.iceberg.io.LocationProvider;
/**
* Represents a table.
*/
public interface Table {
/**
* Return the full name for this table.
*
* @return this table's name
*/
default String name() {
return toString();
}
/**
* Refresh the current table metadata.
*/
void refresh();
/**
* Create a new {@link TableScan scan} for this table.
* <p>
* Once a table scan is created, it can be refined to project columns and filter data.
*
* @return a table scan for this table
*/
TableScan newScan();
/**
* Return the {@link Schema schema} for this table.
*
* @return this table's schema
*/
Schema schema();
/**
* Return the {@link PartitionSpec partition spec} for this table.
*
* @return this table's partition spec
*/
PartitionSpec spec();
/**
* Return a map of {@link PartitionSpec partition specs} for this table.
*
* @return this table's partition specs map
*/
Map<Integer, PartitionSpec> specs();
/**
* Return the {@link SortOrder sort order} for this table.
*
* @return this table's sort order
*/
SortOrder sortOrder();
/**
* Return a map of sort order IDs to {@link SortOrder sort orders} for this table.
*
* @return this table's sort orders map
*/
Map<Integer, SortOrder> sortOrders();
/**
* Return a map of string properties for this table.
*
* @return this table's properties map
*/
Map<String, String> properties();
/**
* Return the table's base location.
*
* @return this table's location
*/
String location();
/**
* Get the current {@link Snapshot snapshot} for this table, or null if there are no snapshots.
*
* @return the current table Snapshot.
*/
Snapshot currentSnapshot();
/**
* Get the {@link Snapshot snapshot} of this table with the given id, or null if there is no
* matching snapshot.
*
* @return the {@link Snapshot} with the given id.
*/
Snapshot snapshot(long snapshotId);
/**
* Get the {@link Snapshot snapshots} of this table.
*
* @return an Iterable of snapshots of this table.
*/
Iterable<Snapshot> snapshots();
/**
* Get the snapshot history of this table.
*
* @return a list of {@link HistoryEntry history entries}
*/
List<HistoryEntry> history();
/**
* Create a new {@link UpdateSchema} to alter the columns of this table and commit the change.
*
* @return a new {@link UpdateSchema}
*/
UpdateSchema updateSchema();
/**
* Create a new {@link UpdatePartitionSpec} to alter the partition spec of this table and commit the change.
*
* @return a new {@link UpdatePartitionSpec}
*/
UpdatePartitionSpec updateSpec();
/**
* Create a new {@link UpdateProperties} to update table properties and commit the changes.
*
* @return a new {@link UpdateProperties}
*/
UpdateProperties updateProperties();
/**
* Create a new {@link ReplaceSortOrder} to set the table sort order and commit the change.
*
* @return a new {@link ReplaceSortOrder}
*/
ReplaceSortOrder replaceSortOrder();
/**
* Create a new {@link UpdateLocation} to update table location and commit the changes.
*
* @return a new {@link UpdateLocation}
*/
UpdateLocation updateLocation();
/**
* Create a new {@link AppendFiles append API} to add files to this table and commit.
*
* @return a new {@link AppendFiles}
*/
AppendFiles newAppend();
/**
* Create a new {@link AppendFiles append API} to add files to this table and commit.
* <p>
* Using this method signals to the underlying implementation that the append should not perform
* extra work in order to commit quickly. Fast appends are not recommended for normal writes
* because the fast commit may cause split planning to slow down over time.
* <p>
* Implementations may not support fast appends, in which case this will return the same appender
* as {@link #newAppend()}.
*
* @return a new {@link AppendFiles}
*/
default AppendFiles newFastAppend() {
return newAppend();
}
/**
* Create a new {@link RewriteFiles rewrite API} to replace files in this table and commit.
*
* @return a new {@link RewriteFiles}
*/
RewriteFiles newRewrite();
/**
* Create a new {@link RewriteManifests rewrite manifests API} to replace manifests for this
* table and commit.
*
* @return a new {@link RewriteManifests}
*/
RewriteManifests rewriteManifests();
/**
* Create a new {@link OverwriteFiles overwrite API} to overwrite files by a filter expression.
*
* @return a new {@link OverwriteFiles}
*/
OverwriteFiles newOverwrite();
/**
* Create a new {@link RowDelta row-level delta API} to remove or replace rows in existing data files.
*
* @return a new {@link RowDelta}
*/
RowDelta newRowDelta();
/**
* Not recommended: Create a new {@link ReplacePartitions replace partitions API} to dynamically
* overwrite partitions in the table with new data.
* <p>
* This is provided to implement SQL compatible with Hive table operations but is not recommended.
* Instead, use the {@link OverwriteFiles overwrite API} to explicitly overwrite data.
*
* @return a new {@link ReplacePartitions}
*/
ReplacePartitions newReplacePartitions();
/**
* Create a new {@link DeleteFiles delete API} to replace files in this table and commit.
*
* @return a new {@link DeleteFiles}
*/
DeleteFiles newDelete();
/**
* Create a new {@link ExpireSnapshots expire API} to manage snapshots in this table and commit.
*
* @return a new {@link ExpireSnapshots}
*/
ExpireSnapshots expireSnapshots();
/**
* Create a new {@link Rollback rollback API} to roll back to a previous snapshot and commit.
*
* @return a new {@link Rollback}
* @deprecated Replaced by {@link #manageSnapshots()}
*/
@Deprecated
Rollback rollback();
/**
* Create a new {@link ManageSnapshots manage snapshots API} to manage snapshots in this table and commit.
* @return a new {@link ManageSnapshots}
*/
ManageSnapshots manageSnapshots();
/**
* Create a new {@link Transaction transaction API} to commit multiple table operations at once.
*
* @return a new {@link Transaction}
*/
Transaction newTransaction();
/**
* Returns a {@link FileIO} to read and write table data and metadata files.
*/
FileIO io();
/**
* Returns an {@link org.apache.iceberg.encryption.EncryptionManager} to encrypt and decrypt data files.
*/
EncryptionManager encryption();
/**
* Returns a {@link LocationProvider} to provide locations for new data files.
*/
LocationProvider locationProvider();
}
| 1 | 35,160 | Nit: this table's row key map -> this table's row keys map | apache-iceberg | java |
@@ -87,7 +87,7 @@ void proj_assign_context( PJ* pj, PJ_CONTEXT *ctx )
pj_ctx pj_ctx::createDefault()
{
pj_ctx ctx;
- ctx.debug_level = PJ_LOG_ERROR;
+ ctx.debug_level = PJ_LOG_NONE;
ctx.logger = pj_stderr_logger;
NS_PROJ::FileManager::fillDefaultNetworkInterface(&ctx);
| 1 | /******************************************************************************
* Project: PROJ.4
* Purpose: Implementation of the PJ_CONTEXT thread context object.
* Author: Frank Warmerdam, [email protected]
*
******************************************************************************
* Copyright (c) 2010, Frank Warmerdam
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*****************************************************************************/
#ifndef FROM_PROJ_CPP
#define FROM_PROJ_CPP
#endif
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include <new>
#include "proj_experimental.h"
#include "proj_internal.h"
#include "filemanager.hpp"
#include "proj/internal/io_internal.hpp"
/************************************************************************/
/* pj_get_ctx() */
/************************************************************************/
PJ_CONTEXT* pj_get_ctx( PJ *pj )
{
if (nullptr==pj)
return pj_get_default_ctx ();
if (nullptr==pj->ctx)
return pj_get_default_ctx ();
return pj->ctx;
}
/************************************************************************/
/* proj_assign_context() */
/************************************************************************/
/** \brief Re-assign a context to a PJ* object.
*
* This may be useful if the PJ* has been created with a context that is
* thread-specific, and is later used in another thread. In that case,
* the user may want to assign another thread-specific context to the
* object.
*/
void proj_assign_context( PJ* pj, PJ_CONTEXT *ctx )
{
if (pj==nullptr)
return;
pj->ctx = ctx;
if( pj->reassign_context )
{
pj->reassign_context(pj, ctx);
}
for( const auto &alt: pj->alternativeCoordinateOperations )
{
proj_assign_context(alt.pj, ctx);
}
}
/************************************************************************/
/* createDefault() */
/************************************************************************/
pj_ctx pj_ctx::createDefault()
{
pj_ctx ctx;
ctx.debug_level = PJ_LOG_ERROR;
ctx.logger = pj_stderr_logger;
NS_PROJ::FileManager::fillDefaultNetworkInterface(&ctx);
if( getenv("PROJ_DEBUG") != nullptr )
{
if( atoi(getenv("PROJ_DEBUG")) >= -PJ_LOG_TRACE )
ctx.debug_level = atoi(getenv("PROJ_DEBUG"));
else
ctx.debug_level = PJ_LOG_TRACE;
}
return ctx;
}
/**************************************************************************/
/* get_cpp_context() */
/**************************************************************************/
projCppContext* pj_ctx::get_cpp_context()
{
if (cpp_context == nullptr) {
cpp_context = new projCppContext(this);
}
return cpp_context;
}
/**************************************************************************/
/* safeAutoCloseDbIfNeeded() */
/**************************************************************************/
void pj_ctx::safeAutoCloseDbIfNeeded()
{
if (cpp_context) {
cpp_context->autoCloseDbIfNeeded();
}
}
/************************************************************************/
/* set_search_paths() */
/************************************************************************/
void pj_ctx::set_search_paths(const std::vector<std::string>& search_paths_in )
{
search_paths = search_paths_in;
delete[] c_compat_paths;
c_compat_paths = nullptr;
if( !search_paths.empty() ) {
c_compat_paths = new const char*[search_paths.size()];
for( size_t i = 0; i < search_paths.size(); ++i ) {
c_compat_paths[i] = search_paths[i].c_str();
}
}
}
/**************************************************************************/
/* set_ca_bundle_path() */
/**************************************************************************/
void pj_ctx::set_ca_bundle_path(const std::string& ca_bundle_path_in)
{
ca_bundle_path = ca_bundle_path_in;
}
/************************************************************************/
/* pj_ctx(const pj_ctx& other) */
/************************************************************************/
pj_ctx::pj_ctx(const pj_ctx& other) :
debug_level(other.debug_level),
logger(other.logger),
logger_app_data(other.logger_app_data),
cpp_context(other.cpp_context ? other.cpp_context->clone(this) : nullptr),
use_proj4_init_rules(other.use_proj4_init_rules),
epsg_file_exists(other.epsg_file_exists),
ca_bundle_path(other.ca_bundle_path),
env_var_proj_lib(other.env_var_proj_lib),
file_finder(other.file_finder),
file_finder_user_data(other.file_finder_user_data),
custom_sqlite3_vfs_name(other.custom_sqlite3_vfs_name),
user_writable_directory(other.user_writable_directory),
// BEGIN ini file settings
iniFileLoaded(other.iniFileLoaded),
endpoint(other.endpoint),
networking(other.networking),
gridChunkCache(other.gridChunkCache),
defaultTmercAlgo(other.defaultTmercAlgo)
// END ini file settings
{
set_search_paths(other.search_paths);
}
/************************************************************************/
/* pj_get_default_ctx() */
/************************************************************************/
PJ_CONTEXT* pj_get_default_ctx()
{
// C++11 rules guarantee a thread-safe instantiation.
static pj_ctx default_context(pj_ctx::createDefault());
return &default_context;
}
/************************************************************************/
/* ~pj_ctx() */
/************************************************************************/
pj_ctx::~pj_ctx()
{
delete[] c_compat_paths;
proj_context_delete_cpp_context(cpp_context);
}
/************************************************************************/
/* proj_context_clone() */
/* Create a new context based on a custom context */
/************************************************************************/
PJ_CONTEXT* proj_context_clone (PJ_CONTEXT *ctx)
{
if (nullptr==ctx)
return proj_context_create();
return new (std::nothrow) pj_ctx(*ctx);
}
| 1 | 12,404 | this change should be reverted | OSGeo-PROJ | cpp |
@@ -26,8 +26,10 @@ __copyright__ = "Copyright 2014-2018 Florian Bruhin (The Compiler)"
__license__ = "GPL"
__maintainer__ = __author__
__email__ = "[email protected]"
-__version_info__ = (1, 5, 0)
-__version__ = '.'.join(str(e) for e in __version_info__)
+__version__ = "1.5.0"
+__version_info__ = [int(part) for part in __version__.split('.')]
__description__ = "A keyboard-driven, vim-like browser based on PyQt5."
basedir = os.path.dirname(os.path.realpath(__file__))
+appdata_path = os.path.join(os.path.dirname(basedir), "misc",
+ "qutebrowser.appdata.xml") | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2018 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""A keyboard-driven, vim-like browser based on PyQt5."""
import os.path
__author__ = "Florian Bruhin"
__copyright__ = "Copyright 2014-2018 Florian Bruhin (The Compiler)"
__license__ = "GPL"
__maintainer__ = __author__
__email__ = "[email protected]"
__version_info__ = (1, 5, 0)
__version__ = '.'.join(str(e) for e in __version_info__)
__description__ = "A keyboard-driven, vim-like browser based on PyQt5."
basedir = os.path.dirname(os.path.realpath(__file__))
| 1 | 22,459 | I'm a bit confused about the `os.path.dirname(basedir)` here - is this just to get to the parent directory? Either way, I think I'd prefer just having this in `update_version.py` as it's not needed in qutebrowser itself. | qutebrowser-qutebrowser | py |
@@ -750,8 +750,8 @@ func TestConfigCheck(t *testing.T) {
http_port = 8222
`,
err: errors.New(`Duplicate user "foo" detected`),
- errorLine: 6,
- errorPos: 21,
+ errorLine: 5,
+ errorPos: 19,
},
{
name: "when accounts block imports are not a list", | 1 | // Copyright 2018 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"errors"
"fmt"
"os"
"strings"
"testing"
)
func TestConfigCheck(t *testing.T) {
tests := []struct {
// name is the name of the test.
name string
// config is content of the configuration file.
config string
// warningErr is an error that does not prevent server from starting.
warningErr error
// errorLine is the location of the error.
errorLine int
// errorPos is the position of the error.
errorPos int
// warning errors also include a reason optionally.
reason string
// newDefaultErr is a configuration error that includes source of error.
err error
}{
{
name: "when unknown field is used at top level",
config: `
monitor = "127.0.0.1:4442"
`,
err: errors.New(`unknown field "monitor"`),
errorLine: 2,
errorPos: 17,
},
{
name: "when default permissions are used at top level",
config: `
"default_permissions" {
publish = ["_SANDBOX.>"]
subscribe = ["_SANDBOX.>"]
}
`,
err: errors.New(`unknown field "default_permissions"`),
errorLine: 2,
errorPos: 18,
},
{
name: "when authorization config is empty",
config: `
authorization = {
}
`,
err: nil,
},
{
name: "when authorization config has unknown fields",
config: `
authorization = {
foo = "bar"
}
`,
err: errors.New(`unknown field "foo"`),
errorLine: 3,
errorPos: 5,
},
{
name: "when authorization config has unknown fields",
config: `
port = 4222
authorization = {
user = "hello"
foo = "bar"
password = "world"
}
`,
err: errors.New(`unknown field "foo"`),
errorLine: 6,
errorPos: 5,
},
{
name: "when user authorization config has unknown fields",
config: `
authorization = {
users = [
{
user = "foo"
pass = "bar"
token = "quux"
}
]
}
`,
err: errors.New(`unknown field "token"`),
errorLine: 7,
errorPos: 9,
},
{
name: "when user authorization permissions config has unknown fields",
config: `
authorization {
permissions {
subscribe = {}
inboxes = {}
publish = {}
}
}
`,
err: errors.New(`Unknown field "inboxes" parsing permissions`),
errorLine: 5,
errorPos: 7,
},
{
name: "when user authorization permissions config has unknown fields within allow or deny",
config: `
authorization {
permissions {
subscribe = {
allow = ["hello", "world"]
deny = ["foo", "bar"]
denied = "_INBOX.>"
}
publish = {}
}
}
`,
err: errors.New(`Unknown field name "denied" parsing subject permissions, only 'allow' or 'deny' are permitted`),
errorLine: 7,
errorPos: 9,
},
{
name: "when user authorization permissions config has unknown fields within allow or deny",
config: `
authorization {
permissions {
publish = {
allow = ["hello", "world"]
deny = ["foo", "bar"]
allowed = "_INBOX.>"
}
subscribe = {}
}
}
`,
err: errors.New(`Unknown field name "allowed" parsing subject permissions, only 'allow' or 'deny' are permitted`),
errorLine: 7,
errorPos: 9,
},
{
name: "when user authorization permissions config has unknown fields using arrays",
config: `
authorization {
default_permissions {
subscribe = ["a"]
publish = ["b"]
inboxes = ["c"]
}
users = [
{
user = "foo"
pass = "bar"
}
]
}
`,
err: errors.New(`Unknown field "inboxes" parsing permissions`),
errorLine: 7,
errorPos: 6,
},
{
name: "when user authorization permissions config has unknown fields using strings",
config: `
authorization {
default_permissions {
subscribe = "a"
requests = "b"
publish = "c"
}
users = [
{
user = "foo"
pass = "bar"
}
]
}
`,
err: errors.New(`Unknown field "requests" parsing permissions`),
errorLine: 6,
errorPos: 6,
},
{
name: "when user authorization permissions config is empty",
config: `
authorization = {
users = [
{
user = "foo", pass = "bar", permissions = {
}
}
]
}
`,
err: nil,
},
{
name: "when unknown permissions are included in user config",
config: `
authorization = {
users = [
{
user = "foo", pass = "bar", permissions {
inboxes = true
}
}
]
}
`,
err: errors.New(`Unknown field "inboxes" parsing permissions`),
errorLine: 6,
errorPos: 11,
},
{
name: "when clustering config is empty",
config: `
cluster = {
}
`,
err: nil,
},
{
name: "when unknown option is in clustering config",
config: `
# NATS Server Configuration
port = 4222
cluster = {
port = 6222
foo = "bar"
authorization {
user = "hello"
pass = "world"
}
}
`,
err: errors.New(`unknown field "foo"`),
errorLine: 9,
errorPos: 5,
},
{
name: "when unknown option is in clustering authorization config",
config: `
cluster = {
authorization {
foo = "bar"
}
}
`,
err: errors.New(`unknown field "foo"`),
errorLine: 4,
errorPos: 7,
},
{
name: "when unknown option is in tls config",
config: `
tls = {
hello = "world"
}
`,
err: errors.New(`error parsing tls config, unknown field ["hello"]`),
errorLine: 3,
errorPos: 5,
},
{
name: "when unknown option is in cluster tls config",
config: `
cluster {
tls = {
foo = "bar"
}
}
`,
err: errors.New(`error parsing tls config, unknown field ["foo"]`),
errorLine: 4,
errorPos: 7,
},
{
name: "when using cipher suites in the TLS config",
config: `
tls = {
cipher_suites: [
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"
]
preferences = []
}
`,
err: errors.New(`error parsing tls config, unknown field ["preferences"]`),
errorLine: 7,
errorPos: 7,
},
{
name: "when using curve preferences in the TLS config",
config: `
tls = {
curve_preferences: [
"CurveP256",
"CurveP384",
"CurveP521"
]
suites = []
}
`,
err: errors.New(`error parsing tls config, unknown field ["suites"]`),
errorLine: 8,
errorPos: 7,
},
{
name: "when using curve preferences in the TLS config",
config: `
tls = {
curve_preferences: [
"CurveP5210000"
]
}
`,
err: errors.New(`unrecognized curve preference CurveP5210000`),
errorLine: 4,
errorPos: 5,
},
{
name: "when unknown option is in cluster config with defined routes",
config: `
cluster {
port = 6222
routes = [
nats://127.0.0.1:6222
]
peers = []
}
`,
err: errors.New(`unknown field "peers"`),
errorLine: 7,
errorPos: 5,
},
{
name: "when used as variable in authorization block it should not be considered as unknown field",
config: `
# listen: 127.0.0.1:-1
listen: 127.0.0.1:4222
authorization {
# Superuser can do anything.
super_user = {
publish = ">"
subscribe = ">"
}
# Can do requests on foo or bar, and subscribe to anything
# that is a response to an _INBOX.
#
# Notice that authorization filters can be singletons or arrays.
req_pub_user = {
publish = ["req.foo", "req.bar"]
subscribe = "_INBOX.>"
}
# Setup a default user that can subscribe to anything, but has
# no publish capabilities.
default_user = {
subscribe = "PUBLIC.>"
}
unused = "hello"
# Default permissions if none presented. e.g. susan below.
default_permissions: $default_user
# Users listed with persmissions.
users = [
{user: alice, password: foo, permissions: $super_user}
{user: bob, password: bar, permissions: $req_pub_user}
{user: susan, password: baz}
]
}
`,
err: errors.New(`unknown field "unused"`),
errorLine: 27,
errorPos: 5,
},
{
name: "when used as variable in top level config it should not be considered as unknown field",
config: `
monitoring_port = 8222
http_port = $monitoring_port
port = 4222
`,
err: nil,
},
{
name: "when used as variable in cluster config it should not be considered as unknown field",
config: `
cluster {
clustering_port = 6222
port = $clustering_port
}
`,
err: nil,
},
{
name: "when setting permissions within cluster authorization block",
config: `
cluster {
authorization {
permissions = {
publish = { allow = ["foo", "bar"] }
}
}
permissions = {
publish = { deny = ["foo", "bar"] }
}
}
`,
warningErr: errors.New(`invalid use of field "authorization"`),
errorLine: 3,
errorPos: 5,
reason: `setting "permissions" within cluster authorization block is deprecated`,
},
{
name: "when write deadline is used with deprecated usage",
config: `
write_deadline = 100
`,
warningErr: errors.New(`invalid use of field "write_deadline"`),
errorLine: 2,
errorPos: 17,
reason: `write_deadline should be converted to a duration`,
},
/////////////////////
// ACCOUNTS //
/////////////////////
{
name: "when accounts block is correctly configured",
config: `
http_port = 8222
accounts {
#
# synadia > nats.io, cncf
#
synadia {
# SAADJL5XAEM6BDYSWDTGVILJVY54CQXZM5ZLG4FRUAKB62HWRTPNSGXOHA
nkey = "AC5GRL36RQV7MJ2GT6WQSCKDKJKYTK4T2LGLWJ2SEJKRDHFOQQWGGFQL"
users [
{
# SUAEL6RU3BSDAFKOHNTEOK5Q6FTM5FTAMWVIKBET6FHPO4JRII3CYELVNM
nkey = "UCARKS2E3KVB7YORL2DG34XLT7PUCOL2SVM7YXV6ETHLW6Z46UUJ2VZ3"
}
]
exports = [
{ service: "synadia.requests", accounts: [nats, cncf] }
]
}
#
# nats < synadia
#
nats {
# SUAJTM55JH4BNYDA22DMDZJSRBRKVDGSLYK2HDIOCM3LPWCDXIDV5Q4CIE
nkey = "ADRZ42QBM7SXQDXXTSVWT2WLLFYOQGAFC4TO6WOAXHEKQHIXR4HFYJDS"
users [
{
# SUADZTYQAKTY5NQM7XRB5XR3C24M6ROGZLBZ6P5HJJSSOFUGC5YXOOECOM
nkey = "UD6AYQSOIN2IN5OGC6VQZCR4H3UFMIOXSW6NNS6N53CLJA4PB56CEJJI"
}
]
imports = [
# This account has to send requests to 'nats.requests' subject
{ service: { account: "synadia", subject: "synadia.requests" }, to: "nats.requests" }
]
}
#
# cncf < synadia
#
cncf {
# SAAFHDZX7SGZ2SWHPS22JRPPK5WX44NPLNXQHR5C5RIF6QRI3U65VFY6C4
nkey = "AD4YRVUJF2KASKPGRMNXTYKIYSCB3IHHB4Y2ME6B2PDIV5QJ23C2ZRIT"
users [
{
# SUAKINP3Z2BPUXWOFSW2FZC7TFJCMMU7DHKP2C62IJQUDASOCDSTDTRMJQ
nkey = "UB57IEMPG4KOTPFV5A66QKE2HZ3XBXFHVRCCVMJEWKECMVN2HSH3VTSJ"
}
]
imports = [
# This account has to send requests to 'synadia.requests' subject
{ service: { account: "synadia", subject: "synadia.requests" } }
]
}
}
`,
err: nil,
},
{
name: "when nkey is invalid within accounts block",
config: `
accounts {
#
# synadia > nats.io, cncf
#
synadia {
# SAADJL5XAEM6BDYSWDTGVILJVY54CQXZM5ZLG4FRUAKB62HWRTPNSGXOHA
nkey = "AC5GRL36RQV7MJ2GT6WQSCKDKJKYTK4T2LGLWJ2SEJKRDHFOQQWGGFQL"
users [
{
# SUAEL6RU3BSDAFKOHNTEOK5Q6FTM5FTAMWVIKBET6FHPO4JRII3CYELVNM
nkey = "SCARKS2E3KVB7YORL2DG34XLT7PUCOL2SVM7YXV6ETHLW6Z46UUJ2VZ3"
}
]
exports = [
{ service: "synadia.requests", accounts: [nats, cncf] }
]
}
#
# nats < synadia
#
nats {
# SUAJTM55JH4BNYDA22DMDZJSRBRKVDGSLYK2HDIOCM3LPWCDXIDV5Q4CIE
nkey = "ADRZ42QBM7SXQDXXTSVWT2WLLFYOQGAFC4TO6WOAXHEKQHIXR4HFYJDS"
users [
{
# SUADZTYQAKTY5NQM7XRB5XR3C24M6ROGZLBZ6P5HJJSSOFUGC5YXOOECOM
nkey = "UD6AYQSOIN2IN5OGC6VQZCR4H3UFMIOXSW6NNS6N53CLJA4PB56CEJJI"
}
]
imports = [
# This account has to send requests to 'nats.requests' subject
{ service: { account: "synadia", subject: "synadia.requests" }, to: "nats.requests" }
]
}
#
# cncf < synadia
#
cncf {
# SAAFHDZX7SGZ2SWHPS22JRPPK5WX44NPLNXQHR5C5RIF6QRI3U65VFY6C4
nkey = "AD4YRVUJF2KASKPGRMNXTYKIYSCB3IHHB4Y2ME6B2PDIV5QJ23C2ZRIT"
users [
{
# SUAKINP3Z2BPUXWOFSW2FZC7TFJCMMU7DHKP2C62IJQUDASOCDSTDTRMJQ
nkey = "UB57IEMPG4KOTPFV5A66QKE2HZ3XBXFHVRCCVMJEWKECMVN2HSH3VTSJ"
}
]
imports = [
# This account has to send requests to 'synadia.requests' subject
{ service: { account: "synadia", subject: "synadia.requests" } }
]
}
}
`,
err: errors.New(`Not a valid public nkey for a user`),
errorLine: 14,
errorPos: 11,
},
{
name: "when accounts block has unknown fields",
config: `
http_port = 8222
accounts {
foo = "bar"
}`,
err: errors.New(`Expected map entries for accounts`),
errorLine: 5,
errorPos: 19,
},
{
name: "when accounts has a referenced config variable within same block",
config: `
accounts {
PERMISSIONS = {
publish = {
allow = ["foo","bar"]
deny = ["quux"]
}
}
synadia {
nkey = "AC5GRL36RQV7MJ2GT6WQSCKDKJKYTK4T2LGLWJ2SEJKRDHFOQQWGGFQL"
users [
{
nkey = "UCARKS2E3KVB7YORL2DG34XLT7PUCOL2SVM7YXV6ETHLW6Z46UUJ2VZ3"
permissions = $PERMISSIONS
}
]
exports = [
{ stream: "synadia.>" }
]
}
}`,
err: nil,
},
{
name: "when accounts has an unreferenced config variables within same block",
config: `
accounts {
PERMISSIONS = {
publish = {
allow = ["foo","bar"]
deny = ["quux"]
}
}
synadia {
nkey = "AC5GRL36RQV7MJ2GT6WQSCKDKJKYTK4T2LGLWJ2SEJKRDHFOQQWGGFQL"
users [
{
nkey = "UCARKS2E3KVB7YORL2DG34XLT7PUCOL2SVM7YXV6ETHLW6Z46UUJ2VZ3"
}
]
exports = [
{ stream: "synadia.>" }
]
}
}`,
err: errors.New(`unknown field "publish"`),
errorLine: 4,
errorPos: 5,
},
{
name: "when accounts block defines a global account",
config: `
http_port = 8222
accounts {
$G = {
}
}
`,
err: errors.New(`"$G" is a Reserved Account`),
errorLine: 5,
errorPos: 19,
},
{
name: "when accounts block uses an invalid public key",
config: `
accounts {
synadia = {
nkey = "invalid"
}
}
`,
err: errors.New(`Not a valid public nkey for an account: "invalid"`),
errorLine: 4,
errorPos: 21,
},
{
name: "when accounts list includes reserved account",
config: `
port = 4222
accounts = [foo, bar, "$G"]
http_port = 8222
`,
err: errors.New(`"$G" is a Reserved Account`),
errorLine: 4,
errorPos: 26,
},
{
name: "when accounts list includes a dupe entry",
config: `
port = 4222
accounts = [foo, bar, bar]
http_port = 8222
`,
err: errors.New(`Duplicate Account Entry: bar`),
errorLine: 4,
errorPos: 25,
},
{
name: "when accounts block includes a dupe user",
config: `
port = 4222
accounts = {
nats {
users = [
{ user: "foo", pass: "bar" },
{ user: "hello", pass: "world" },
{ user: "foo", pass: "bar" }
]
}
}
http_port = 8222
`,
err: errors.New(`Duplicate user "foo" detected`),
errorLine: 6,
errorPos: 21,
},
{
name: "when accounts block imports are not a list",
config: `
port = 4222
accounts = {
nats {
imports = true
}
}
http_port = 8222
`,
err: errors.New(`Imports should be an array, got bool`),
errorLine: 6,
errorPos: 21,
},
{
name: "when accounts block exports are not a list",
config: `
port = 4222
accounts = {
nats {
exports = true
}
}
http_port = 8222
`,
err: errors.New(`Exports should be an array, got bool`),
errorLine: 6,
errorPos: 21,
},
{
name: "when accounts block imports items are not a map",
config: `
port = 4222
accounts = {
nats {
imports = [
false
]
}
}
http_port = 8222
`,
err: errors.New(`Import Items should be a map with type entry, got bool`),
errorLine: 7,
errorPos: 23,
},
{
name: "when accounts block export items are not a map",
config: `
port = 4222
accounts = {
nats {
exports = [
false
]
}
}
http_port = 8222
`,
err: errors.New(`Export Items should be a map with type entry, got bool`),
errorLine: 7,
errorPos: 23,
},
{
name: "when accounts exports has a stream name that is not a string",
config: `
port = 4222
accounts = {
nats {
exports = [
{
stream: false
}
]
}
}
http_port = 8222
`,
err: errors.New(`Expected stream name to be string, got bool`),
errorLine: 8,
errorPos: 25,
},
{
name: "when accounts exports has a service name that is not a string",
config: `
accounts = {
nats {
exports = [
{
service: false
}
]
}
}
`,
err: errors.New(`Expected service name to be string, got bool`),
errorLine: 6,
errorPos: 25,
},
{
name: "when accounts imports stream without name",
config: `
port = 4222
accounts = {
nats {
imports = [
{ stream: { }}
]
}
}
http_port = 8222
`,
err: errors.New(`Expect an account name and a subject`),
errorLine: 7,
errorPos: 25,
},
{
name: "when accounts imports service without name",
config: `
port = 4222
accounts = {
nats {
imports = [
{ service: { }}
]
}
}
http_port = 8222
`,
err: errors.New(`Expect an account name and a subject`),
errorLine: 7,
errorPos: 25,
},
{
name: "when user authorization config has both token and users",
config: `
authorization = {
token = "s3cr3t"
users = [
{
user = "foo"
pass = "bar"
}
]
}
`,
err: errors.New(`Can not have a token and a users array`),
errorLine: 2,
errorPos: 3,
},
{
name: "when user authorization config has both token and user",
config: `
authorization = {
user = "foo"
pass = "bar"
users = [
{
user = "foo"
pass = "bar"
}
]
}
`,
err: errors.New(`Can not have a single user/pass and a users array`),
errorLine: 2,
errorPos: 3,
},
{
name: "when user authorization config has users not as a list",
config: `
authorization = {
users = false
}
`,
err: errors.New(`Expected users field to be an array, got false`),
errorLine: 3,
errorPos: 5,
},
{
name: "when user authorization config has users not as a map",
config: `
authorization = {
users = [false]
}
`,
err: errors.New(`Expected user entry to be a map/struct, got false`),
errorLine: 3,
errorPos: 14,
},
{
name: "when user authorization config has permissions not as a map",
config: `
authorization = {
users = [{user: hello, pass: world}]
permissions = false
}
`,
err: errors.New(`Expected permissions to be a map/struct, got false`),
errorLine: 4,
errorPos: 19,
},
{
name: "when user authorization permissions config has invalid fields within allow",
config: `
authorization {
permissions {
publish = {
allow = [false, "hello", "world"]
deny = ["foo", "bar"]
}
subscribe = {}
}
}
`,
err: errors.New(`Subject in permissions array cannot be cast to string`),
errorLine: 5,
errorPos: 18,
},
{
name: "when user authorization permissions config has invalid fields within deny",
config: `
authorization {
permissions {
publish = {
allow = ["hello", "world"]
deny = [true, "foo", "bar"]
}
subscribe = {}
}
}
`,
err: errors.New(`Subject in permissions array cannot be cast to string`),
errorLine: 6,
errorPos: 17,
},
{
name: "when user authorization permissions config has invalid type",
config: `
authorization {
permissions {
publish = {
allow = false
}
subscribe = {}
}
}
`,
err: errors.New(`Expected subject permissions to be a subject, or array of subjects, got bool`),
errorLine: 5,
errorPos: 9,
},
{
name: "when user authorization permissions subject is invalid",
config: `
authorization {
permissions {
publish = {
allow = ["foo..bar"]
}
subscribe = {}
}
}
`,
err: errors.New(`subject "foo..bar" is not a valid subject`),
errorLine: 5,
errorPos: 9,
},
{
name: "when cluster config listen is invalid",
config: `
cluster {
listen = "0.0.0.0:XXXX"
}
`,
err: errors.New(`could not parse port "XXXX"`),
errorLine: 3,
errorPos: 5,
},
{
name: "when cluster config includes multiple users",
config: `
cluster {
authorization {
users = []
}
}
`,
err: errors.New(`Cluster authorization does not allow multiple users`),
errorLine: 3,
errorPos: 5,
},
{
name: "when cluster routes are invalid",
config: `
cluster {
routes = [
"0.0.0.0:XXXX"
# "0.0.0.0:YYYY"
# "0.0.0.0:ZZZZ"
]
}
`,
err: errors.New(`error parsing route url ["0.0.0.0:XXXX"]`),
errorLine: 4,
errorPos: 22,
},
{
name: "when setting invalid TLS config within cluster block",
config: `
cluster {
tls {
}
}
`,
err: nil,
errorLine: 0,
errorPos: 0,
},
{
name: "invalid lame_duck_duration type",
config: `
lame_duck_duration: abc
`,
err: errors.New(`error parsing lame_duck_duration: time: invalid duration abc`),
errorLine: 2,
errorPos: 3,
},
{
name: "when only setting TLS timeout for a leafnode remote",
config: `
leafnodes {
remotes = [
{
url: "tls://connect.ngs.global:7422"
tls {
timeout: 0.01
}
}
]
}`,
err: nil,
errorLine: 0,
errorPos: 0,
},
{
name: "when setting latency tracking without a system account",
config: `
accounts {
sys { users = [ {user: sys, pass: "" } ] }
nats.io: {
users = [ { user : bar, pass: "" } ]
exports = [
{ service: "nats.add"
response: singleton
latency: {
sampling: 100%
subject: "latency.tracking.add"
}
}
]
}
}
`,
err: errors.New(`Error adding service latency sampling for "nats.add": system account not setup`),
errorLine: 2,
errorPos: 17,
},
{
name: "when setting latency tracking with a system account",
config: `
system_account: sys
accounts {
sys { users = [ {user: sys, pass: "" } ] }
nats.io: {
users = [ { user : bar, pass: "" } ]
exports = [
{ service: "nats.add"
response: singleton
latency: {
sampling: 100%
subject: "latency.tracking.add"
}
}
]
}
}
`,
err: nil,
errorLine: 0,
errorPos: 0,
},
{
name: "when setting latency tracking with an invalid publish subject",
config: `
system_account = sys
accounts {
sys { users = [ {user: sys, pass: "" } ] }
nats.io: {
users = [ { user : bar, pass: "" } ]
exports = [
{ service: "nats.add"
response: singleton
latency: "*"
}
]
}
}
`,
err: errors.New(`Error adding service latency sampling for "nats.add" on subject "*": invalid publish subject`),
errorLine: 3,
errorPos: 17,
},
{
name: "when setting latency tracking on a stream",
config: `
system_account = sys
accounts {
sys { users = [ {user: sys, pass: "" } ] }
nats.io: {
users = [ { user : bar, pass: "" } ]
exports = [
{ stream: "nats.add"
latency: "foo"
}
]
}
}
`,
err: errors.New(`Detected latency directive on non-service`),
errorLine: 11,
errorPos: 25,
},
{
name: "when using duplicate service import subject",
config: `
accounts {
A: {
users = [ {user: user1, pass: ""} ]
exports = [
{service: "remote1"}
{service: "remote2"}
]
}
B: {
users = [ {user: user2, pass: ""} ]
imports = [
{service: {account: "A", subject: "remote1"}, to: "local"}
{service: {account: "A", subject: "remote2"}, to: "local"}
]
}
}
`,
err: errors.New(`Duplicate service import subject "local", previously used in import for account "A", subject "remote1"`),
errorLine: 14,
errorPos: 71,
},
{
name: "mixing single and multi users in leafnode authorization",
config: `
leafnodes {
authorization {
user: user1
password: pwd
users = [{user: user2, password: pwd}]
}
}
`,
err: errors.New("can not have a single user/pass and a users array"),
errorLine: 3,
errorPos: 20,
},
{
name: "duplicate usernames in leafnode authorization",
config: `
leafnodes {
authorization {
users = [
{user: user, password: pwd}
{user: user, password: pwd}
]
}
}
`,
err: errors.New(`duplicate user "user" detected in leafnode authorization`),
errorLine: 3,
errorPos: 20,
},
}
checkConfig := func(config string) error {
opts := &Options{
CheckConfig: true,
}
return opts.ProcessConfigFile(config)
}
checkErr := func(t *testing.T, err, expectedErr error) {
t.Helper()
switch {
case err == nil && expectedErr == nil:
// OK
case err != nil && expectedErr == nil:
t.Errorf("Unexpected error after processing config: %s", err)
case err == nil && expectedErr != nil:
t.Errorf("Expected %q error after processing invalid config but got nothing", expectedErr)
}
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
conf := createConfFile(t, []byte(test.config))
defer os.Remove(conf)
err := checkConfig(conf)
var expectedErr error
// Check for either warnings or errors.
if test.err != nil {
expectedErr = test.err
} else if test.warningErr != nil {
expectedErr = test.warningErr
}
if err != nil && expectedErr != nil {
msg := fmt.Sprintf("%s:%d:%d: %s", conf, test.errorLine, test.errorPos, expectedErr.Error())
if test.reason != "" {
msg += ": " + test.reason
}
msg += "\n"
if err.Error() != msg {
t.Errorf("Expected:\n%q\ngot:\n%q", msg, err.Error())
}
}
checkErr(t, err, expectedErr)
})
}
}
func TestConfigCheckIncludes(t *testing.T) {
// Check happy path first.
opts := &Options{
CheckConfig: true,
}
err := opts.ProcessConfigFile("./configs/include_conf_check_a.conf")
if err != nil {
t.Errorf("Unexpected error processing include files with configuration check enabled: %v", err)
}
opts = &Options{
CheckConfig: true,
}
err = opts.ProcessConfigFile("./configs/include_bad_conf_check_a.conf")
if err == nil {
t.Errorf("Expected error processing include files with configuration check enabled: %v", err)
}
expectedErr := `include_bad_conf_check_b.conf:10:19: unknown field "monitoring_port"` + "\n"
if err != nil && !strings.HasSuffix(err.Error(), expectedErr) {
t.Errorf("Expected: \n%q, got\n: %q", expectedErr, err.Error())
}
}
func TestConfigCheckMultipleErrors(t *testing.T) {
opts := &Options{
CheckConfig: true,
}
err := opts.ProcessConfigFile("./configs/multiple_errors.conf")
if err == nil {
t.Errorf("Expected error processing config files with multiple errors check enabled: %v", err)
}
cerr, ok := err.(*processConfigErr)
if !ok {
t.Fatalf("Expected a configuration process error")
}
got := len(cerr.Warnings())
expected := 1
if got != expected {
t.Errorf("Expected a %d warning, got: %d", expected, got)
}
got = len(cerr.Errors())
expected = 7
if got != 7 {
t.Errorf("Expected a %d errors, got: %d", expected, got)
}
errMsg := err.Error()
errs := []string{
`./configs/multiple_errors.conf:12:1: invalid use of field "write_deadline": write_deadline should be converted to a duration`,
`./configs/multiple_errors.conf:2:1: Cannot have a user/pass and token`,
`./configs/multiple_errors.conf:10:1: unknown field "monitoring"`,
`./configs/multiple_errors.conf:67:3: Cluster authorization does not allow multiple users`,
`./configs/multiple_errors.conf:21:5: Not a valid public nkey for an account: "OC5GRL36RQV7MJ2GT6WQSCKDKJKYTK4T2LGLWJ2SEJKRDHFOQQWGGFQL"`,
`./configs/multiple_errors.conf:26:9: Not a valid public nkey for a user`,
`./configs/multiple_errors.conf:36:5: Not a valid public nkey for an account: "ODRZ42QBM7SXQDXXTSVWT2WLLFYOQGAFC4TO6WOAXHEKQHIXR4HFYJDS"`,
`./configs/multiple_errors.conf:41:9: Not a valid public nkey for a user`,
}
for _, msg := range errs {
found := strings.Contains(errMsg, msg)
if !found {
t.Errorf("Expected to find error %q", msg)
}
}
}
| 1 | 10,607 | Revert this change and see below why... | nats-io-nats-server | go |
@@ -1,6 +1,6 @@
_base_ = [
'../_base_/models/cascade_mask_rcnn_r50_fpn.py',
- '../_base_/datasets/coco_instance.py',
+ '../_base_/datasets/lvis_v1_instance.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
| 1 | _base_ = [
'../_base_/models/cascade_mask_rcnn_r50_fpn.py',
'../_base_/datasets/coco_instance.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='torchvision://resnet101')),
roi_head=dict(
bbox_head=[
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1203,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1203,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='Shared2FCBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=1203,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
cls_predictor_cfg=dict(type='NormedLinear', tempearture=20),
loss_cls=dict(
type='SeesawLoss',
p=0.8,
q=2.0,
num_classes=1203,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
],
mask_head=dict(num_classes=1203)),
test_cfg=dict(
rcnn=dict(
score_thr=0.0001,
# LVIS allows up to 300
max_per_img=300)))
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
dataset_type = 'LVISV1Dataset'
data_root = 'data/lvis_v1/'
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/lvis_v1_train.json',
img_prefix=data_root,
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/lvis_v1_val.json',
img_prefix=data_root,
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/lvis_v1_val.json',
img_prefix=data_root,
pipeline=test_pipeline))
evaluation = dict(interval=24, metric=['bbox', 'segm'])
| 1 | 26,795 | Should not switch to `lvis_v1_instance` here because that base config uses ClassBalancedDataset to oversample the data. | open-mmlab-mmdetection | py |
@@ -70,7 +70,7 @@ public class NodeOptions {
Capabilities caps = info.getCanonicalCapabilities();
builders.stream()
.filter(builder -> builder.score(caps) > 0)
- .peek(builder -> LOG.info(String.format("Adding %s %d times", caps, info.getMaximumSimultaneousSessions())))
+ .peek(builder -> LOG.finest(String.format("Adding %s %d times", caps, info.getMaximumSimultaneousSessions())))
.forEach(builder -> {
DriverService.Builder freePortBuilder = builder.usingAnyFreePort();
| 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package org.openqa.selenium.grid.node.config;
import io.opentracing.Tracer;
import org.openqa.selenium.Capabilities;
import org.openqa.selenium.WebDriverInfo;
import org.openqa.selenium.grid.config.Config;
import org.openqa.selenium.grid.node.local.LocalNode;
import org.openqa.selenium.remote.http.HttpClient;
import org.openqa.selenium.remote.service.DriverService;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
import java.util.ServiceLoader;
import java.util.logging.Logger;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
public class NodeOptions {
public static final Logger LOG = Logger.getLogger(NodeOptions.class.getName());
private final Config config;
public NodeOptions(Config config) {
this.config = Objects.requireNonNull(config);
}
public void configure(Tracer tracer, HttpClient.Factory httpClientFactory, LocalNode.Builder node) {
if (!config.getBool("node", "detect-drivers").orElse(false)) {
return;
}
addSystemDrivers(tracer, httpClientFactory, node);
}
private void addSystemDrivers(
Tracer tracer,
HttpClient.Factory clientFactory,
LocalNode.Builder node) {
// We don't expect duplicates, but they're fine
List<WebDriverInfo> infos =
StreamSupport.stream(ServiceLoader.load(WebDriverInfo.class).spliterator(), false)
.filter(WebDriverInfo::isAvailable)
.collect(Collectors.toList());
// Same
List<DriverService.Builder> builders = new ArrayList<>();
ServiceLoader.load(DriverService.Builder.class).forEach(builders::add);
infos.forEach(info -> {
Capabilities caps = info.getCanonicalCapabilities();
builders.stream()
.filter(builder -> builder.score(caps) > 0)
.peek(builder -> LOG.info(String.format("Adding %s %d times", caps, info.getMaximumSimultaneousSessions())))
.forEach(builder -> {
DriverService.Builder freePortBuilder = builder.usingAnyFreePort();
for (int i = 0; i < info.getMaximumSimultaneousSessions(); i++) {
node.add(
caps,
new DriverServiceSessionFactory(
tracer,
clientFactory, c -> freePortBuilder.score(c) > 0,
freePortBuilder));
}
});
});
}
}
| 1 | 17,124 | This is an informational message that allows someone to read the console output and understand how the grid node is configured. Please leave. | SeleniumHQ-selenium | py |
@@ -52,6 +52,9 @@ type Alert interface {
// GetKvdbInstance.
GetKvdbInstance() kvdb.Kvdb
+ // RaiseSingleton raises a singleton alert.
+ RaiseSingleton(alert *api.Alert) error
+
// Raise raises an Alert.
Raise(alert *api.Alert) error
| 1 | package alert
import (
"errors"
"fmt"
"sync"
"time"
"github.com/libopenstorage/openstorage/api"
"github.com/portworx/kvdb"
)
var (
// ErrNotSupported implemenation of a specific function is not supported.
ErrNotSupported = errors.New("Implementation not supported")
// ErrNotFound raised if Key is not found.
ErrNotFound = errors.New("Key not found")
// ErrExist raised if key already exists.
ErrExist = errors.New("Key already exists")
// ErrUnmarshal raised if Get fails to unmarshal value.
ErrUnmarshal = errors.New("Failed to unmarshal value")
// ErrIllegal raised if object is not valid.
ErrIllegal = errors.New("Illegal operation")
// ErrNotInitialized raised if alert not initialized.
ErrNotInitialized = errors.New("Alert not initialized")
// ErrAlertClientNotFound raised if no client implementation found.
ErrAlertClientNotFound = errors.New("Alert client not found")
// ErrResourceNotFound raised if ResourceType is not found>
ErrResourceNotFound = errors.New("Resource not found in Alert")
// ErrSubscribedRaise raised if unable to raise a subscribed alert
ErrSubscribedRaise = errors.New("Could not raise alert and its subscribed alerts")
instances = make(map[string]Alert)
drivers = make(map[string]InitFunc)
lock sync.RWMutex
)
// InitFunc initialization function for alert.
type InitFunc func(kv kvdb.Kvdb, clusterID string) (Alert, error)
// AlertWatcherFunc is a function type used as a callback for KV WatchTree.
type AlertWatcherFunc func(*api.Alert, api.AlertActionType, string, string) error
// Alert interface for Alert API.
type Alert interface {
fmt.Stringer
// Shutdown.
Shutdown()
// GetKvdbInstance.
GetKvdbInstance() kvdb.Kvdb
// Raise raises an Alert.
Raise(alert *api.Alert) error
// Raise raises an Alert only if another alert with given resource type,
// resource id, and unqiue_tage doesnt exists already.
RaiseIfNotExist(alert *api.Alert) error
// Subscribe allows a child (dependent) alert to subscribe to a parent alert
Subscribe(parentAlertType int64, childAlert *api.Alert) error
// Retrieve retrieves specific Alert.
Retrieve(resourceType api.ResourceType, id int64) (*api.Alert, error)
// Enumerate enumerates Alert.
Enumerate(filter *api.Alert) ([]*api.Alert, error)
// EnumerateWithinTimeRange enumerates Alert between timeStart and timeEnd.
EnumerateWithinTimeRange(
timeStart time.Time,
timeEnd time.Time,
resourceType api.ResourceType,
) ([]*api.Alert, error)
// Erase erases an Alert.
Erase(resourceType api.ResourceType, alertID int64) error
// Clear an Alert.
Clear(resourceType api.ResourceType, alertID int64, ttl uint64) error
// Clear an Alert for a resource with unique tag.
ClearByUniqueTag(
resourceType api.ResourceType,
resourceId string,
uniqueTag string,
ttl uint64,
) error
// Watch on all Alerts for the given clusterID. It uses the global kvdb
// options provided while creating the alertClient object to access this
// cluster
Watch(clusterID string, alertWatcher AlertWatcherFunc) error
}
// Shutdown the alert instance.
func Shutdown() {
lock.Lock()
defer lock.Unlock()
for _, v := range instances {
v.Shutdown()
}
}
// New returns a new alert instance tied with a clusterID and kvdb.
func New(name string, clusterID string, kv kvdb.Kvdb) (Alert, error) {
lock.Lock()
defer lock.Unlock()
if initFunc, exists := drivers[name]; exists {
driver, err := initFunc(kv, clusterID)
if err != nil {
return nil, err
}
instances[name] = driver
return driver, err
}
return nil, ErrNotSupported
}
// Register an alert interface.
func Register(name string, initFunc InitFunc) error {
lock.Lock()
defer lock.Unlock()
if _, exists := drivers[name]; exists {
return ErrExist
}
drivers[name] = initFunc
return nil
}
| 1 | 6,897 | This doesn't make sense to me. What does RaiseSingleton mean? To me it sounds like a single object is being.. raised? Not sure. | libopenstorage-openstorage | go |
@@ -45,6 +45,7 @@ public class JavaProcessJobTest {
private JavaProcessJob job = null;
private Props props = null;
private Logger log = Logger.getLogger(JavaProcessJob.class);
+ private AllJobExecutorTests jobExecutorTests = null;
private static String classPaths;
| 1 | /*
* Copyright 2014 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.jobExecutor;
import java.io.IOException;
import java.io.File;
import java.util.Date;
import java.util.Properties;
import org.apache.log4j.Logger;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.Rule;
import org.junit.rules.TemporaryFolder;
import azkaban.flow.CommonJobProperties;
import azkaban.utils.Props;
public class JavaProcessJobTest {
@ClassRule
public static TemporaryFolder classTemp = new TemporaryFolder();
@Rule
public TemporaryFolder temp = new TemporaryFolder();
private JavaProcessJob job = null;
private Props props = null;
private Logger log = Logger.getLogger(JavaProcessJob.class);
private static String classPaths;
private static final String inputContent =
"Quick Change in Strategy for a Bookseller \n"
+ " By JULIE BOSMAN \n"
+ "Published: August 11, 2010 \n"
+ " \n"
+ "Twelve years later, it may be Joe Fox's turn to worry. Readers have gone from skipping small \n"
+ "bookstores to wondering if they need bookstores at all. More people are ordering books online \n"
+ "or plucking them from the best-seller bin at Wal-Mart";
private static final String errorInputContent =
inputContent
+ "\n stop_here "
+ "But the threat that has the industry and some readers the most rattled is the growth of e-books. \n"
+ " In the first five months of 2009, e-books made up 2.9 percent of trade book sales. In the same period \n"
+ "in 2010, sales of e-books, which generally cost less than hardcover books, grew to 8.5 percent, according \n"
+ "to the Association of American Publishers, spurred by sales of the Amazon Kindle and the new Apple iPad. \n"
+ "For Barnes & Noble, long the largest and most powerful bookstore chain in the country, the new competition \n"
+ "has led to declining profits and store traffic.";
private static String inputFile;
private static String errorInputFile;
private static String outputFile;
@BeforeClass
public static void init() throws IOException {
// Get the classpath
Properties prop = System.getProperties();
classPaths =
String.format("'%s'", prop.getProperty("java.class.path", null));
long time = (new Date()).getTime();
inputFile = classTemp.newFile("azkaban_input_" + time).getCanonicalPath();
errorInputFile =
classTemp.newFile("azkaban_input_error_" + time).getCanonicalPath();
outputFile = classTemp.newFile("azkaban_output_" + time).getCanonicalPath();
// Dump input files
try {
Utils.dumpFile(inputFile, inputContent);
Utils.dumpFile(errorInputFile, errorInputContent);
} catch (IOException e) {
e.printStackTrace(System.err);
Assert.fail("error in creating input file:" + e.getLocalizedMessage());
}
}
@AfterClass
public static void cleanup() {
classTemp.delete();
}
@Before
public void setUp() throws IOException {
File workingDir = temp.newFolder("testJavaProcess");
// Initialize job
props = new Props();
props.put(AbstractProcessJob.WORKING_DIR, workingDir.getCanonicalPath());
props.put("type", "java");
props.put("fullPath", ".");
props.put(CommonJobProperties.PROJECT_NAME, "test_project");
props.put(CommonJobProperties.FLOW_ID, "test_flow");
props.put(CommonJobProperties.JOB_ID, "test_job");
props.put(CommonJobProperties.EXEC_ID, "123");
props.put(CommonJobProperties.SUBMIT_USER, "test_user");
props.put("execute.as.user", "false");
job = new JavaProcessJob("testJavaProcess", props, props, log);
}
@After
public void tearDown() {
temp.delete();
}
@Test
public void testJavaJob() throws Exception {
// initialize the Props
props.put(JavaProcessJob.JAVA_CLASS,
"azkaban.jobExecutor.WordCountLocal");
props.put("input", inputFile);
props.put("output", outputFile);
props.put("classpath", classPaths);
job.run();
}
@Test
public void testJavaJobHashmap() throws Exception {
// initialize the Props
props.put(JavaProcessJob.JAVA_CLASS,
"azkaban.executor.SleepJavaJob");
props.put("seconds", 1);
props.put("input", inputFile);
props.put("output", outputFile);
props.put("classpath", classPaths);
job.run();
}
@Test
public void testFailedJavaJob() throws Exception {
props.put(JavaProcessJob.JAVA_CLASS,
"azkaban.jobExecutor.WordCountLocal");
props.put("input", errorInputFile);
props.put("output", outputFile);
props.put("classpath", classPaths);
try {
job.run();
} catch (RuntimeException e) {
Assert.assertTrue(true);
}
}
}
| 1 | 11,834 | What's the benefit of having jobExecutorTests as a member variable? What do you think about making the method static? Afterall, it is a stateless method. | azkaban-azkaban | java |
@@ -12,6 +12,7 @@ const mdUsageStr = `Usage:
The possible subcommands are:
dump Dump metadata objects
+ check Check metadata objects and their associated blocks for errors
`
| 1 | package main
import (
"fmt"
"github.com/keybase/kbfs/libkbfs"
"golang.org/x/net/context"
)
const mdUsageStr = `Usage:
kbfstool md [<subcommand>] [<args>]
The possible subcommands are:
dump Dump metadata objects
`
func mdMain(ctx context.Context, config libkbfs.Config, args []string) (exitStatus int) {
if len(args) < 1 {
fmt.Print(mdUsageStr)
return 1
}
cmd := args[0]
args = args[1:]
switch cmd {
case "dump":
return mdDump(ctx, config, args)
default:
printError("md", fmt.Errorf("unknown command '%s'", cmd))
return 1
}
}
| 1 | 12,830 | This looks like it's only downloading things, not really checking their true validity. Especially for the MD object. Should we call `BareRootMetadata.IsValidAndSigned()` in `mdGet`? | keybase-kbfs | go |
@@ -136,6 +136,17 @@ void image_data_reader::load() {
select_subset_of_data();
}
+void image_data_reader::setup() {
+ generic_data_reader::setup();
+
+ using InputBuf_T = lbann::cv_image_type<uint8_t>;
+ auto cvMat = cv::Mat(1, get_linearized_data_size(), InputBuf_T::T(1));
+ m_thread_cv_buffer.resize(omp_get_max_threads());
+ for(int tid = 0; tid < omp_get_max_threads(); ++tid) {
+ m_thread_cv_buffer[tid] = cvMat.clone();
+ }
+}
+
std::vector<image_data_reader::sample_t> image_data_reader::get_image_list_of_current_mb() const {
std::vector<sample_t> ret;
ret.reserve(m_mini_batch_size); | 1 | ////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2016, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
//
// data_reader_image .hpp .cpp - generic data reader class for image dataset
////////////////////////////////////////////////////////////////////////////////
#include "lbann/data_readers/data_reader_image.hpp"
#include <fstream>
namespace lbann {
image_data_reader::image_data_reader(bool shuffle)
: generic_data_reader(shuffle) {
set_defaults();
}
image_data_reader::image_data_reader(const image_data_reader& rhs)
: generic_data_reader(rhs),
m_image_dir(rhs.m_image_dir),
m_image_list(rhs.m_image_list),
m_image_width(rhs.m_image_width),
m_image_height(rhs.m_image_height),
m_image_num_channels(rhs.m_image_num_channels),
m_image_linearized_size(rhs.m_image_linearized_size),
m_num_labels(rhs.m_num_labels)
{}
image_data_reader& image_data_reader::operator=(const image_data_reader& rhs) {
generic_data_reader::operator=(rhs);
m_image_dir = rhs.m_image_dir;
m_image_list = rhs.m_image_list;
m_image_width = rhs.m_image_width;
m_image_height = rhs.m_image_height;
m_image_num_channels = rhs.m_image_num_channels;
m_image_linearized_size = rhs.m_image_linearized_size;
m_num_labels = rhs.m_num_labels;
return (*this);
}
void image_data_reader::set_linearized_image_size() {
m_image_linearized_size = m_image_width * m_image_height * m_image_num_channels;
}
void image_data_reader::set_defaults() {
m_image_width = 256;
m_image_height = 256;
m_image_num_channels = 3;
set_linearized_image_size();
m_num_labels = 1000;
}
void image_data_reader::set_input_params(const int width, const int height, const int num_ch, const int num_labels) {
if ((width > 0) && (height > 0)) { // set and valid
m_image_width = width;
m_image_height = height;
} else if (!((width == 0) && (height == 0))) { // set but not valid
std::stringstream err;
err << __FILE__<<" "<<__LINE__<< " :: Imagenet data reader setup error: invalid input image sizes";
throw lbann_exception(err.str());
}
if (num_ch > 0) {
m_image_num_channels = num_ch;
} else if (num_ch < 0) {
std::stringstream err;
err << __FILE__<<" "<<__LINE__<< " :: Imagenet data reader setup error: invalid number of channels of input images";
throw lbann_exception(err.str());
}
set_linearized_image_size();
if (num_labels > 0) {
m_num_labels = num_labels;
} else if (num_labels < 0) {
std::stringstream err;
err << __FILE__<<" "<<__LINE__<< " :: Imagenet data reader setup error: invalid number of labels";
throw lbann_exception(err.str());
}
}
bool image_data_reader::fetch_label(CPUMat& Y, int data_id, int mb_idx, int tid) {
const label_t label = m_image_list[data_id].second;
Y.Set(label, mb_idx, 1);
return true;
}
void image_data_reader::load() {
//const std::string imageDir = get_file_dir();
const std::string imageListFile = get_data_filename();
m_image_list.clear();
// load image list
FILE *fplist = fopen(imageListFile.c_str(), "rt");
if (!fplist) {
throw lbann_exception(
std::string{} + __FILE__ + " " + std::to_string(__LINE__) +
" :: failed to open: " + imageListFile);
}
while (!feof(fplist)) {
char imagepath[512];
label_t imagelabel;
if (fscanf(fplist, "%s%d", imagepath, &imagelabel) <= 1) {
break;
}
m_image_list.emplace_back(imagepath, imagelabel);
}
fclose(fplist);
// reset indices
m_shuffled_indices.clear();
m_shuffled_indices.resize(m_image_list.size());
std::iota(m_shuffled_indices.begin(), m_shuffled_indices.end(), 0);
select_subset_of_data();
}
std::vector<image_data_reader::sample_t> image_data_reader::get_image_list_of_current_mb() const {
std::vector<sample_t> ret;
ret.reserve(m_mini_batch_size);
for (El::Int i = 0; i < m_indices_fetched_per_mb.Height(); ++i) {
El::Int index = m_indices_fetched_per_mb.Get(i, 0);
ret.push_back(m_image_list[index]);
}
return ret;
}
} // namespace lbann
| 1 | 13,089 | Nikoli, I believe that this addresses your concern. One question for you or Jae-Seung is if any allocation from the clone is properly cleaned up when the vector is destroyed. I believe that it should. | LLNL-lbann | cpp |
@@ -48,6 +48,8 @@ const (
KindLambdaApp Kind = "LambdaApp"
// KindCloudRunApp represents deployment configuration for a CloudRun application.
KindCloudRunApp Kind = "CloudRunApp"
+ // KindEcsApp represents deployment configuration for an AWS ECS.
+ KindEcsApp Kind = "EcsApp"
// KindSealedSecret represents a sealed secret.
KindSealedSecret Kind = "SealedSecret"
) | 1 | // Copyright 2020 The PipeCD Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"sigs.k8s.io/yaml"
"github.com/pipe-cd/pipe/pkg/model"
)
const (
SharedConfigurationDirName = ".pipe"
versionV1Beta1 = "pipecd.dev/v1beta1"
)
// Kind represents the kind of configuration the data contains.
type Kind string
const (
// KindKubernetesApp represents deployment configuration for a Kubernetes application.
// This application can be a group of plain-YAML Kubernetes manifests,
// or kustomization manifests or helm manifests.
KindKubernetesApp Kind = "KubernetesApp"
// KindTerraformApp represents deployment configuration for a Terraform application.
// This application contains a single workspace of a terraform root module.
KindTerraformApp Kind = "TerraformApp"
// KindCrossplaneApp represents deployment configuration for a Crossplane application.
KindCrossplaneApp Kind = "CrossplaneApp"
// KindLambdaApp represents deployment configuration for an AWS Lambda application.
KindLambdaApp Kind = "LambdaApp"
// KindCloudRunApp represents deployment configuration for a CloudRun application.
KindCloudRunApp Kind = "CloudRunApp"
// KindSealedSecret represents a sealed secret.
KindSealedSecret Kind = "SealedSecret"
)
const (
// KindPiped represents configuration for piped.
// This configuration will be loaded while the piped is starting up.
KindPiped Kind = "Piped"
// KindControlPlane represents configuration for control plane's services.
KindControlPlane Kind = "ControlPlane"
// KindAnalysisTemplate represents shared analysis template for a repository.
// This configuration file should be placed in .pipe directory
// at the root of the repository.
KindAnalysisTemplate Kind = "AnalysisTemplate"
// KindEventWatcher represents configuration for Event Watcher.
KindEventWatcher Kind = "EventWatcher"
)
var (
ErrNotFound = errors.New("not found")
)
// Config represents configuration data load from file.
// The spec is depend on the kind of configuration.
type Config struct {
Kind Kind
APIVersion string
spec interface{}
// Deployment specs.
KubernetesDeploymentSpec *KubernetesDeploymentSpec
TerraformDeploymentSpec *TerraformDeploymentSpec
CloudRunDeploymentSpec *CloudRunDeploymentSpec
LambdaDeploymentSpec *LambdaDeploymentSpec
PipedSpec *PipedSpec
ControlPlaneSpec *ControlPlaneSpec
AnalysisTemplateSpec *AnalysisTemplateSpec
EventWatcherSpec *EventWatcherSpec
SealedSecretSpec *SealedSecretSpec
}
type genericConfig struct {
Kind Kind `json:"kind"`
APIVersion string `json:"apiVersion,omitempty"`
Spec json.RawMessage `json:"spec"`
}
func (c *Config) init(kind Kind, apiVersion string) error {
c.Kind = kind
c.APIVersion = apiVersion
switch kind {
case KindKubernetesApp:
c.KubernetesDeploymentSpec = &KubernetesDeploymentSpec{
Input: KubernetesDeploymentInput{
AutoRollback: true,
},
}
c.spec = c.KubernetesDeploymentSpec
case KindTerraformApp:
c.TerraformDeploymentSpec = &TerraformDeploymentSpec{}
c.spec = c.TerraformDeploymentSpec
case KindCloudRunApp:
c.CloudRunDeploymentSpec = &CloudRunDeploymentSpec{
Input: CloudRunDeploymentInput{
AutoRollback: true,
},
}
c.spec = c.CloudRunDeploymentSpec
case KindLambdaApp:
c.LambdaDeploymentSpec = &LambdaDeploymentSpec{
Input: LambdaDeploymentInput{
AutoRollback: true,
},
}
c.spec = c.LambdaDeploymentSpec
case KindPiped:
c.PipedSpec = &PipedSpec{}
c.spec = c.PipedSpec
case KindControlPlane:
c.ControlPlaneSpec = &ControlPlaneSpec{}
c.spec = c.ControlPlaneSpec
case KindAnalysisTemplate:
c.AnalysisTemplateSpec = &AnalysisTemplateSpec{}
c.spec = c.AnalysisTemplateSpec
case KindSealedSecret:
c.SealedSecretSpec = &SealedSecretSpec{}
c.spec = c.SealedSecretSpec
case KindEventWatcher:
c.EventWatcherSpec = &EventWatcherSpec{}
c.spec = c.EventWatcherSpec
default:
return fmt.Errorf("unsupported kind: %s", c.Kind)
}
return nil
}
// UnmarshalJSON customizes the way to unmarshal json data into Config struct.
// Firstly, this unmarshal to a generic config and then unmarshal the spec
// which depend on the kind of configuration.
func (c *Config) UnmarshalJSON(data []byte) error {
var (
err error
gc = genericConfig{}
)
dec := json.NewDecoder(bytes.NewReader(data))
dec.DisallowUnknownFields()
if err := dec.Decode(&gc); err != nil {
return err
}
if err = c.init(gc.Kind, gc.APIVersion); err != nil {
return err
}
if len(gc.Spec) > 0 {
dec := json.NewDecoder(bytes.NewReader(gc.Spec))
dec.DisallowUnknownFields()
err = dec.Decode(c.spec)
}
return err
}
type validator interface {
Validate() error
}
// Validate validates the value of all fields.
func (c *Config) Validate() error {
if c.APIVersion != versionV1Beta1 {
return fmt.Errorf("unsupported version: %s", c.APIVersion)
}
if c.Kind == "" {
return fmt.Errorf("kind is required")
}
if c.spec == nil {
return fmt.Errorf("spec is required")
}
spec, ok := c.spec.(validator)
if !ok {
return fmt.Errorf("spec must have Validate function")
}
if err := spec.Validate(); err != nil {
return err
}
return nil
}
// LoadFromYAML reads and decodes a yaml file to construct the Config.
func LoadFromYAML(file string) (*Config, error) {
data, err := ioutil.ReadFile(file)
if err != nil {
return nil, err
}
return DecodeYAML(data)
}
// DecodeYAML unmarshals config YAML data to config struct.
// It also validates the configuration after decoding.
func DecodeYAML(data []byte) (*Config, error) {
js, err := yaml.YAMLToJSON(data)
if err != nil {
return nil, err
}
c := &Config{}
if err := json.Unmarshal(js, c); err != nil {
return nil, err
}
if err := c.Validate(); err != nil {
return nil, err
}
return c, nil
}
// ToApplicationKind converts configuration kind to application kind.
func ToApplicationKind(k Kind) (model.ApplicationKind, bool) {
switch k {
case KindKubernetesApp:
return model.ApplicationKind_KUBERNETES, true
case KindTerraformApp:
return model.ApplicationKind_TERRAFORM, true
case KindCrossplaneApp:
return model.ApplicationKind_CROSSPLANE, true
case KindLambdaApp:
return model.ApplicationKind_LAMBDA, true
case KindCloudRunApp:
return model.ApplicationKind_CLOUDRUN, true
}
return model.ApplicationKind_KUBERNETES, false
}
func (c *Config) GetGenericDeployment() (GenericDeploymentSpec, bool) {
switch c.Kind {
case KindKubernetesApp:
return c.KubernetesDeploymentSpec.GenericDeploymentSpec, true
case KindTerraformApp:
return c.TerraformDeploymentSpec.GenericDeploymentSpec, true
case KindCloudRunApp:
return c.CloudRunDeploymentSpec.GenericDeploymentSpec, true
case KindLambdaApp:
return c.LambdaDeploymentSpec.GenericDeploymentSpec, true
}
return GenericDeploymentSpec{}, false
}
| 1 | 15,066 | should be `ECSApp` | pipe-cd-pipe | go |
@@ -539,7 +539,13 @@ class WebElement(object):
@property
def rect(self):
"""A dictionary with the size and location of the element."""
- return self._execute(Command.GET_ELEMENT_RECT)['value']
+ if self._w3c:
+ return self._execute(Command.GET_ELEMENT_RECT)['value']
+ else:
+ rect = self.size.copy()
+ rect.update(self.location)
+ return rect
+
@property
def screenshot_as_base64(self): | 1 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import base64
import hashlib
import os
import pkgutil
import warnings
import zipfile
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.utils import keys_to_typing
from .command import Command
# Python 3 imports
try:
str = basestring
except NameError:
pass
try:
from StringIO import StringIO as IOStream
except ImportError: # 3+
from io import BytesIO as IOStream
# not relying on __package__ here as it can be `None` in some situations (see #4558)
_pkg = '.'.join(__name__.split('.')[:-1])
getAttribute_js = pkgutil.get_data(_pkg, 'getAttribute.js').decode('utf8')
isDisplayed_js = pkgutil.get_data(_pkg, 'isDisplayed.js').decode('utf8')
class WebElement(object):
"""Represents a DOM element.
Generally, all interesting operations that interact with a document will be
performed through this interface.
All method calls will do a freshness check to ensure that the element
reference is still valid. This essentially determines whether or not the
element is still attached to the DOM. If this test fails, then an
``StaleElementReferenceException`` is thrown, and all future calls to this
instance will fail."""
def __init__(self, parent, id_, w3c=False):
self._parent = parent
self._id = id_
self._w3c = w3c
def __repr__(self):
return '<{0.__module__}.{0.__name__} (session="{1}", element="{2}")>'.format(
type(self), self._parent.session_id, self._id)
@property
def tag_name(self):
"""This element's ``tagName`` property."""
return self._execute(Command.GET_ELEMENT_TAG_NAME)['value']
@property
def text(self):
"""The text of the element."""
return self._execute(Command.GET_ELEMENT_TEXT)['value']
def click(self):
"""Clicks the element."""
self._execute(Command.CLICK_ELEMENT)
def submit(self):
"""Submits a form."""
if self._w3c:
form = self.find_element(By.XPATH, "./ancestor-or-self::form")
self._parent.execute_script(
"var e = arguments[0].ownerDocument.createEvent('Event');"
"e.initEvent('submit', true, true);"
"if (arguments[0].dispatchEvent(e)) { arguments[0].submit() }", form)
else:
self._execute(Command.SUBMIT_ELEMENT)
def clear(self):
"""Clears the text if it's a text entry element."""
self._execute(Command.CLEAR_ELEMENT)
def get_property(self, name):
"""
Gets the given property of the element.
:Args:
- name - Name of the property to retrieve.
Example::
text_length = target_element.get_property("text_length")
"""
try:
return self._execute(Command.GET_ELEMENT_PROPERTY, {"name": name})["value"]
except WebDriverException:
# if we hit an end point that doesnt understand getElementProperty lets fake it
return self.parent.execute_script('return arguments[0][arguments[1]]', self, name)
def get_attribute(self, name):
"""Gets the given attribute or property of the element.
This method will first try to return the value of a property with the
given name. If a property with that name doesn't exist, it returns the
value of the attribute with the same name. If there's no attribute with
that name, ``None`` is returned.
Values which are considered truthy, that is equals "true" or "false",
are returned as booleans. All other non-``None`` values are returned
as strings. For attributes or properties which do not exist, ``None``
is returned.
:Args:
- name - Name of the attribute/property to retrieve.
Example::
# Check if the "active" CSS class is applied to an element.
is_active = "active" in target_element.get_attribute("class")
"""
attributeValue = ''
if self._w3c:
attributeValue = self.parent.execute_script(
"return (%s).apply(null, arguments);" % getAttribute_js,
self, name)
else:
resp = self._execute(Command.GET_ELEMENT_ATTRIBUTE, {'name': name})
attributeValue = resp.get('value')
if attributeValue is not None:
if name != 'value' and attributeValue.lower() in ('true', 'false'):
attributeValue = attributeValue.lower()
return attributeValue
def is_selected(self):
"""Returns whether the element is selected.
Can be used to check if a checkbox or radio button is selected.
"""
return self._execute(Command.IS_ELEMENT_SELECTED)['value']
def is_enabled(self):
"""Returns whether the element is enabled."""
return self._execute(Command.IS_ELEMENT_ENABLED)['value']
def find_element_by_id(self, id_):
"""Finds element within this element's children by ID.
:Args:
- id\_ - ID of child element to locate.
:Returns:
- WebElement - the element if it was found
:Raises:
- NoSuchElementException - if the element wasn't found
:Usage:
foo_element = element.find_element_by_id('foo')
"""
return self.find_element(by=By.ID, value=id_)
def find_elements_by_id(self, id_):
"""Finds a list of elements within this element's children by ID.
Will return a list of webelements if found, or an empty list if not.
:Args:
- id\_ - Id of child element to find.
:Returns:
- list of WebElement - a list with elements if any was found. An
empty list if not
:Usage:
elements = element.find_elements_by_id('foo')
"""
return self.find_elements(by=By.ID, value=id_)
def find_element_by_name(self, name):
"""Finds element within this element's children by name.
:Args:
- name - name property of the element to find.
:Returns:
- WebElement - the element if it was found
:Raises:
- NoSuchElementException - if the element wasn't found
:Usage:
element = element.find_element_by_name('foo')
"""
return self.find_element(by=By.NAME, value=name)
def find_elements_by_name(self, name):
"""Finds a list of elements within this element's children by name.
:Args:
- name - name property to search for.
:Returns:
- list of webelement - a list with elements if any was found. an
empty list if not
:Usage:
elements = element.find_elements_by_name('foo')
"""
return self.find_elements(by=By.NAME, value=name)
def find_element_by_link_text(self, link_text):
"""Finds element within this element's children by visible link text.
:Args:
- link_text - Link text string to search for.
:Returns:
- WebElement - the element if it was found
:Raises:
- NoSuchElementException - if the element wasn't found
:Usage:
element = element.find_element_by_link_text('Sign In')
"""
return self.find_element(by=By.LINK_TEXT, value=link_text)
def find_elements_by_link_text(self, link_text):
"""Finds a list of elements within this element's children by visible link text.
:Args:
- link_text - Link text string to search for.
:Returns:
- list of webelement - a list with elements if any was found. an
empty list if not
:Usage:
elements = element.find_elements_by_link_text('Sign In')
"""
return self.find_elements(by=By.LINK_TEXT, value=link_text)
def find_element_by_partial_link_text(self, link_text):
"""Finds element within this element's children by partially visible link text.
:Args:
- link_text: The text of the element to partially match on.
:Returns:
- WebElement - the element if it was found
:Raises:
- NoSuchElementException - if the element wasn't found
:Usage:
element = element.find_element_by_partial_link_text('Sign')
"""
return self.find_element(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_elements_by_partial_link_text(self, link_text):
"""Finds a list of elements within this element's children by link text.
:Args:
- link_text: The text of the element to partial match on.
:Returns:
- list of webelement - a list with elements if any was found. an
empty list if not
:Usage:
elements = element.find_elements_by_partial_link_text('Sign')
"""
return self.find_elements(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_element_by_tag_name(self, name):
"""Finds element within this element's children by tag name.
:Args:
- name - name of html tag (eg: h1, a, span)
:Returns:
- WebElement - the element if it was found
:Raises:
- NoSuchElementException - if the element wasn't found
:Usage:
element = element.find_element_by_tag_name('h1')
"""
return self.find_element(by=By.TAG_NAME, value=name)
def find_elements_by_tag_name(self, name):
"""Finds a list of elements within this element's children by tag name.
:Args:
- name - name of html tag (eg: h1, a, span)
:Returns:
- list of WebElement - a list with elements if any was found. An
empty list if not
:Usage:
elements = element.find_elements_by_tag_name('h1')
"""
return self.find_elements(by=By.TAG_NAME, value=name)
def find_element_by_xpath(self, xpath):
"""Finds element by xpath.
:Args:
- xpath - xpath of element to locate. "//input[@class='myelement']"
Note: The base path will be relative to this element's location.
This will select the first link under this element.
::
myelement.find_element_by_xpath(".//a")
However, this will select the first link on the page.
::
myelement.find_element_by_xpath("//a")
:Returns:
- WebElement - the element if it was found
:Raises:
- NoSuchElementException - if the element wasn't found
:Usage:
element = element.find_element_by_xpath('//div/td[1]')
"""
return self.find_element(by=By.XPATH, value=xpath)
def find_elements_by_xpath(self, xpath):
"""Finds elements within the element by xpath.
:Args:
- xpath - xpath locator string.
Note: The base path will be relative to this element's location.
This will select all links under this element.
::
myelement.find_elements_by_xpath(".//a")
However, this will select all links in the page itself.
::
myelement.find_elements_by_xpath("//a")
:Returns:
- list of WebElement - a list with elements if any was found. An
empty list if not
:Usage:
elements = element.find_elements_by_xpath("//div[contains(@class, 'foo')]")
"""
return self.find_elements(by=By.XPATH, value=xpath)
def find_element_by_class_name(self, name):
"""Finds element within this element's children by class name.
:Args:
- name: The class name of the element to find.
:Returns:
- WebElement - the element if it was found
:Raises:
- NoSuchElementException - if the element wasn't found
:Usage:
element = element.find_element_by_class_name('foo')
"""
return self.find_element(by=By.CLASS_NAME, value=name)
def find_elements_by_class_name(self, name):
"""Finds a list of elements within this element's children by class name.
:Args:
- name: The class name of the elements to find.
:Returns:
- list of WebElement - a list with elements if any was found. An
empty list if not
:Usage:
elements = element.find_elements_by_class_name('foo')
"""
return self.find_elements(by=By.CLASS_NAME, value=name)
def find_element_by_css_selector(self, css_selector):
"""Finds element within this element's children by CSS selector.
:Args:
- css_selector - CSS selector string, ex: 'a.nav#home'
:Returns:
- WebElement - the element if it was found
:Raises:
- NoSuchElementException - if the element wasn't found
:Usage:
element = element.find_element_by_css_selector('#foo')
"""
return self.find_element(by=By.CSS_SELECTOR, value=css_selector)
def find_elements_by_css_selector(self, css_selector):
"""Finds a list of elements within this element's children by CSS selector.
:Args:
- css_selector - CSS selector string, ex: 'a.nav#home'
:Returns:
- list of WebElement - a list with elements if any was found. An
empty list if not
:Usage:
elements = element.find_elements_by_css_selector('.foo')
"""
return self.find_elements(by=By.CSS_SELECTOR, value=css_selector)
def send_keys(self, *value):
"""Simulates typing into the element.
:Args:
- value - A string for typing, or setting form fields. For setting
file inputs, this could be a local file path.
Use this to send simple key events or to fill out form fields::
form_textfield = driver.find_element_by_name('username')
form_textfield.send_keys("admin")
This can also be used to set file inputs.
::
file_input = driver.find_element_by_name('profilePic')
file_input.send_keys("path/to/profilepic.gif")
# Generally it's better to wrap the file path in one of the methods
# in os.path to return the actual path to support cross OS testing.
# file_input.send_keys(os.path.abspath("path/to/profilepic.gif"))
"""
# transfer file to another machine only if remote driver is used
# the same behaviour as for java binding
if self.parent._is_remote:
local_file = self.parent.file_detector.is_local_file(*value)
if local_file is not None:
value = self._upload(local_file)
self._execute(Command.SEND_KEYS_TO_ELEMENT,
{'text': "".join(keys_to_typing(value)),
'value': keys_to_typing(value)})
# RenderedWebElement Items
def is_displayed(self):
"""Whether the element is visible to a user."""
# Only go into this conditional for browsers that don't use the atom themselves
if self._w3c and self.parent.capabilities['browserName'] == 'safari':
return self.parent.execute_script(
"return (%s).apply(null, arguments);" % isDisplayed_js,
self)
else:
return self._execute(Command.IS_ELEMENT_DISPLAYED)['value']
@property
def location_once_scrolled_into_view(self):
"""THIS PROPERTY MAY CHANGE WITHOUT WARNING. Use this to discover
where on the screen an element is so that we can click it. This method
should cause the element to be scrolled into view.
Returns the top lefthand corner location on the screen, or ``None`` if
the element is not visible.
"""
if self._w3c:
old_loc = self._execute(Command.W3C_EXECUTE_SCRIPT, {
'script': "arguments[0].scrollIntoView(true); return arguments[0].getBoundingClientRect()",
'args': [self]})['value']
return {"x": round(old_loc['x']),
"y": round(old_loc['y'])}
else:
return self._execute(Command.GET_ELEMENT_LOCATION_ONCE_SCROLLED_INTO_VIEW)['value']
@property
def size(self):
"""The size of the element."""
size = {}
if self._w3c:
size = self._execute(Command.GET_ELEMENT_RECT)['value']
else:
size = self._execute(Command.GET_ELEMENT_SIZE)['value']
new_size = {"height": size["height"],
"width": size["width"]}
return new_size
def value_of_css_property(self, property_name):
"""The value of a CSS property."""
return self._execute(Command.GET_ELEMENT_VALUE_OF_CSS_PROPERTY, {
'propertyName': property_name})['value']
@property
def location(self):
"""The location of the element in the renderable canvas."""
if self._w3c:
old_loc = self._execute(Command.GET_ELEMENT_RECT)['value']
else:
old_loc = self._execute(Command.GET_ELEMENT_LOCATION)['value']
new_loc = {"x": round(old_loc['x']),
"y": round(old_loc['y'])}
return new_loc
@property
def rect(self):
"""A dictionary with the size and location of the element."""
return self._execute(Command.GET_ELEMENT_RECT)['value']
@property
def screenshot_as_base64(self):
"""
Gets the screenshot of the current element as a base64 encoded string.
:Usage:
img_b64 = element.screenshot_as_base64
"""
return self._execute(Command.ELEMENT_SCREENSHOT)['value']
@property
def screenshot_as_png(self):
"""
Gets the screenshot of the current element as a binary data.
:Usage:
element_png = element.screenshot_as_png
"""
return base64.b64decode(self.screenshot_as_base64.encode('ascii'))
def screenshot(self, filename):
"""
Saves a screenshot of the current element to a PNG image file. Returns
False if there is any IOError, else returns True. Use full paths in
your filename.
:Args:
- filename: The full path you wish to save your screenshot to. This
should end with a `.png` extension.
:Usage:
element.screenshot('/Screenshots/foo.png')
"""
if not filename.lower().endswith('.png'):
warnings.warn("name used for saved screenshot does not match file "
"type. It should end with a `.png` extension", UserWarning)
png = self.screenshot_as_png
try:
with open(filename, 'wb') as f:
f.write(png)
except IOError:
return False
finally:
del png
return True
@property
def parent(self):
"""Internal reference to the WebDriver instance this element was found from."""
return self._parent
@property
def id(self):
"""Internal ID used by selenium.
This is mainly for internal use. Simple use cases such as checking if 2
webelements refer to the same element, can be done using ``==``::
if element1 == element2:
print("These 2 are equal")
"""
return self._id
def __eq__(self, element):
return hasattr(element, 'id') and self._id == element.id
def __ne__(self, element):
return not self.__eq__(element)
# Private Methods
def _execute(self, command, params=None):
"""Executes a command against the underlying HTML element.
Args:
command: The name of the command to _execute as a string.
params: A dictionary of named parameters to send with the command.
Returns:
The command's JSON response loaded into a dictionary object.
"""
if not params:
params = {}
params['id'] = self._id
return self._parent.execute(command, params)
def find_element(self, by=By.ID, value=None):
"""
Find an element given a By strategy and locator. Prefer the find_element_by_* methods when
possible.
:Usage:
element = element.find_element(By.ID, 'foo')
:rtype: WebElement
"""
if self._w3c:
if by == By.ID:
by = By.CSS_SELECTOR
value = '[id="%s"]' % value
elif by == By.TAG_NAME:
by = By.CSS_SELECTOR
elif by == By.CLASS_NAME:
by = By.CSS_SELECTOR
value = ".%s" % value
elif by == By.NAME:
by = By.CSS_SELECTOR
value = '[name="%s"]' % value
return self._execute(Command.FIND_CHILD_ELEMENT,
{"using": by, "value": value})['value']
def find_elements(self, by=By.ID, value=None):
"""
Find elements given a By strategy and locator. Prefer the find_elements_by_* methods when
possible.
:Usage:
element = element.find_elements(By.CLASS_NAME, 'foo')
:rtype: list of WebElement
"""
if self._w3c:
if by == By.ID:
by = By.CSS_SELECTOR
value = '[id="%s"]' % value
elif by == By.TAG_NAME:
by = By.CSS_SELECTOR
elif by == By.CLASS_NAME:
by = By.CSS_SELECTOR
value = ".%s" % value
elif by == By.NAME:
by = By.CSS_SELECTOR
value = '[name="%s"]' % value
return self._execute(Command.FIND_CHILD_ELEMENTS,
{"using": by, "value": value})['value']
def __hash__(self):
return int(hashlib.md5(self._id.encode('utf-8')).hexdigest(), 16)
def _upload(self, filename):
fp = IOStream()
zipped = zipfile.ZipFile(fp, 'w', zipfile.ZIP_DEFLATED)
zipped.write(filename, os.path.split(filename)[1])
zipped.close()
content = base64.encodestring(fp.getvalue())
if not isinstance(content, str):
content = content.decode('utf-8')
try:
return self._execute(Command.UPLOAD_FILE, {'file': content})['value']
except WebDriverException as e:
if "Unrecognized command: POST" in e.__str__():
return filename
elif "Command not found: POST " in e.__str__():
return filename
elif '{"status":405,"value":["GET","HEAD","DELETE"]}' in e.__str__():
return filename
else:
raise e
| 1 | 15,820 | flake8 is going to fail on this having 2 lines | SeleniumHQ-selenium | py |
@@ -15,7 +15,6 @@ package podfailure
import (
"context"
- "errors"
"fmt"
"time"
| 1 | // Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package podfailure
import (
"context"
"errors"
"fmt"
"time"
"golang.org/x/sync/errgroup"
"github.com/go-logr/logr"
"github.com/pingcap/chaos-mesh/api/v1alpha1"
"github.com/pingcap/chaos-mesh/controllers/twophase"
"github.com/pingcap/chaos-mesh/pkg/utils"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
v1 "k8s.io/api/core/v1"
k8serror "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/cache"
)
const (
// fakeImage is a not-existing image.
fakeImage = "pingcap.com/fake-chaos-mesh:latest"
podFailureActionMsg = "pause pod duration %s"
)
func NewReconciler(c client.Client, log logr.Logger, req ctrl.Request) twophase.Reconciler {
return twophase.Reconciler{
InnerReconciler: &Reconciler{
Client: c,
Log: log,
},
Client: c,
Log: log,
}
}
type Reconciler struct {
client.Client
Log logr.Logger
}
func (r *Reconciler) Object() twophase.InnerObject {
return &v1alpha1.PodChaos{}
}
func (r *Reconciler) Apply(ctx context.Context, req ctrl.Request, chaos twophase.InnerObject) error {
podchaos, ok := chaos.(*v1alpha1.PodChaos)
if !ok {
err := errors.New("chaos is not PodChaos")
r.Log.Error(err, "chaos is not PodChaos", "chaos", chaos)
return err
}
pods, err := utils.SelectAndGeneratePods(ctx, r.Client, &podchaos.Spec)
if err != nil {
r.Log.Error(err, "failed to select and generate pods")
return err
}
err = r.failAllPods(ctx, pods, podchaos)
if err != nil {
return err
}
podchaos.Status.Experiment.StartTime = &metav1.Time{
Time: time.Now(),
}
podchaos.Status.Experiment.Pods = []v1alpha1.PodStatus{}
podchaos.Status.Experiment.Phase = v1alpha1.ExperimentPhaseRunning
for _, pod := range pods {
ps := v1alpha1.PodStatus{
Namespace: pod.Namespace,
Name: pod.Name,
HostIP: pod.Status.HostIP,
PodIP: pod.Status.PodIP,
Action: string(podchaos.Spec.Action),
Message: fmt.Sprintf(podFailureActionMsg, *podchaos.Spec.Duration),
}
podchaos.Status.Experiment.Pods = append(podchaos.Status.Experiment.Pods, ps)
}
return nil
}
func (r *Reconciler) Recover(ctx context.Context, req ctrl.Request, chaos twophase.InnerObject) error {
podchaos, ok := chaos.(*v1alpha1.PodChaos)
if !ok {
err := errors.New("chaos is not PodChaos")
r.Log.Error(err, "chaos is not PodChaos", "chaos", chaos)
return err
}
err := r.cleanFinalizersAndRecover(ctx, podchaos)
if err != nil {
return err
}
podchaos.Status.Experiment.EndTime = &metav1.Time{
Time: time.Now(),
}
podchaos.Status.Experiment.Phase = v1alpha1.ExperimentPhaseFinished
return nil
}
func (r *Reconciler) cleanFinalizersAndRecover(ctx context.Context, podchaos *v1alpha1.PodChaos) error {
if len(podchaos.Finalizers) == 0 {
return nil
}
for _, key := range podchaos.Finalizers {
ns, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return err
}
var pod v1.Pod
err = r.Get(ctx, types.NamespacedName{
Namespace: ns,
Name: name,
}, &pod)
if err != nil {
if !k8serror.IsNotFound(err) {
return err
}
r.Log.Info("Pod not found", "namespace", ns, "name", name)
podchaos.Finalizers = utils.RemoveFromFinalizer(podchaos.Finalizers, key)
continue
}
err = r.recoverPod(ctx, &pod, podchaos)
if err != nil {
return err
}
podchaos.Finalizers = utils.RemoveFromFinalizer(podchaos.Finalizers, key)
}
return nil
}
func (r *Reconciler) failAllPods(ctx context.Context, pods []v1.Pod, podchaos *v1alpha1.PodChaos) error {
g := errgroup.Group{}
for index := range pods {
pod := &pods[index]
key, err := cache.MetaNamespaceKeyFunc(pod)
if err != nil {
return err
}
podchaos.Finalizers = utils.InsertFinalizer(podchaos.Finalizers, key)
g.Go(func() error {
return r.failPod(ctx, pod, podchaos)
})
}
return g.Wait()
}
func (r *Reconciler) failPod(ctx context.Context, pod *v1.Pod, podchaos *v1alpha1.PodChaos) error {
r.Log.Info("Try to inject pod-failure", "namespace", pod.Namespace, "name", pod.Name)
// TODO: check the annotations or others in case that this pod is used by other chaos
for index := range pod.Spec.InitContainers {
originImage := pod.Spec.InitContainers[index].Image
name := pod.Spec.InitContainers[index].Name
key := utils.GenAnnotationKeyForImage(podchaos, name)
if pod.Annotations == nil {
pod.Annotations = make(map[string]string)
}
if _, ok := pod.Annotations[key]; ok {
return fmt.Errorf("annotation %s exist", key)
}
pod.Annotations[key] = originImage
pod.Spec.InitContainers[index].Image = fakeImage
}
for index := range pod.Spec.Containers {
originImage := pod.Spec.Containers[index].Image
name := pod.Spec.Containers[index].Name
key := utils.GenAnnotationKeyForImage(podchaos, name)
if pod.Annotations == nil {
pod.Annotations = make(map[string]string)
}
if _, ok := pod.Annotations[key]; ok {
return fmt.Errorf("annotation %s exist", key)
}
pod.Annotations[key] = originImage
pod.Spec.Containers[index].Image = fakeImage
}
if err := r.Update(ctx, pod); err != nil {
r.Log.Error(err, "unable to use fake image on pod")
return err
}
ps := v1alpha1.PodStatus{
Namespace: pod.Namespace,
Name: pod.Name,
HostIP: pod.Status.HostIP,
PodIP: pod.Status.PodIP,
Action: string(podchaos.Spec.Action),
Message: fmt.Sprintf(podFailureActionMsg, *podchaos.Spec.Duration),
}
podchaos.Status.Experiment.Pods = append(podchaos.Status.Experiment.Pods, ps)
return nil
}
func (r *Reconciler) recoverPod(ctx context.Context, pod *v1.Pod, podchaos *v1alpha1.PodChaos) error {
r.Log.Info("Recovering", "namespace", pod.Namespace, "name", pod.Name)
for index := range pod.Spec.Containers {
name := pod.Spec.Containers[index].Name
_ = utils.GenAnnotationKeyForImage(podchaos, name)
if pod.Annotations == nil {
pod.Annotations = make(map[string]string)
}
// FIXME: Check annotations and return error.
}
// chaos-mesh don't support
return r.Delete(ctx, pod, &client.DeleteOptions{
GracePeriodSeconds: new(int64), // PeriodSeconds has to be set specifically
})
}
| 1 | 12,930 | should we make a dir named controllers/scheduler/podchaos | chaos-mesh-chaos-mesh | go |
@@ -50,9 +50,9 @@ import (
)
const (
- //KeyNode represents the key values used for specifying the Node Affinity
+ //KeyNodeHostname represents the key values used for specifying the Node Affinity
// based on the hostname
- KeyNode = "kubernetes.io/hostname"
+ KeyNodeHostname = "kubernetes.io/hostname"
)
// NewProvisioner will create a new Provisioner object and initialize | 1 | /*
Copyright 2019 The OpenEBS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
This file contains the volume creation and deletion handlers invoked by
the github.com/kubernetes-sigs/sig-storage-lib-external-provisioner/controller.
The handler that are madatory to be implemented:
- Provision - is called by controller to perform custom validation on the PVC
request and return a valid PV spec. The controller will create the PV object
using the spec passed to it and bind it to the PVC.
- Delete - is called by controller to perform cleanup tasks on the PV before
deleting it.
*/
package app
import (
"fmt"
"strings"
"github.com/golang/glog"
"github.com/pkg/errors"
pvController "github.com/kubernetes-sigs/sig-storage-lib-external-provisioner/controller"
mconfig "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1"
menv "github.com/openebs/maya/pkg/env/v1alpha1"
analytics "github.com/openebs/maya/pkg/usage"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
//metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
)
const (
//KeyNode represents the key values used for specifying the Node Affinity
// based on the hostname
KeyNode = "kubernetes.io/hostname"
)
// NewProvisioner will create a new Provisioner object and initialize
// it with global information used across PV create and delete operations.
func NewProvisioner(stopCh chan struct{}, kubeClient *clientset.Clientset) (*Provisioner, error) {
namespace := getOpenEBSNamespace() //menv.Get(menv.OpenEBSNamespace)
if len(strings.TrimSpace(namespace)) == 0 {
return nil, fmt.Errorf("Cannot start Provisioner: failed to get namespace")
}
p := &Provisioner{
stopCh: stopCh,
kubeClient: kubeClient,
namespace: namespace,
helperImage: getDefaultHelperImage(),
defaultConfig: []mconfig.Config{
{
Name: KeyPVBasePath,
Value: getDefaultBasePath(),
},
},
}
p.getVolumeConfig = p.GetVolumeConfig
return p, nil
}
// SupportsBlock will be used by controller to determine if block mode is
// supported by the host path provisioner. Return false.
func (p *Provisioner) SupportsBlock() bool {
return false
}
// Provision is invoked by the PVC controller which expect the PV
// to be provisioned and a valid PV spec returned.
func (p *Provisioner) Provision(opts pvController.VolumeOptions) (*v1.PersistentVolume, error) {
pvc := opts.PVC
if pvc.Spec.Selector != nil {
return nil, fmt.Errorf("claim.Spec.Selector is not supported")
}
for _, accessMode := range pvc.Spec.AccessModes {
if accessMode != v1.ReadWriteOnce {
return nil, fmt.Errorf("Only support ReadWriteOnce access mode")
}
}
//node := opts.SelectedNode
if opts.SelectedNode == nil {
return nil, fmt.Errorf("configuration error, no node was specified")
}
name := opts.PVName
// Create a new Config instance for the PV by merging the
// default configuration with configuration provided
// via PVC and the associated StorageClass
pvCASConfig, err := p.getVolumeConfig(name, pvc)
if err != nil {
return nil, err
}
//TODO: Determine if hostpath or device based Local PV should be created
stgType := pvCASConfig.GetStorageType()
size := resource.Quantity{}
reqMap := pvc.Spec.Resources.Requests
if reqMap != nil {
size = pvc.Spec.Resources.Requests["storage"]
}
sendEventOrIgnore(name, size.String(), stgType, analytics.VolumeProvision)
if stgType == "hostpath" {
return p.ProvisionHostPath(opts, pvCASConfig)
}
if stgType == "device" {
return p.ProvisionBlockDevice(opts, pvCASConfig)
}
return nil, fmt.Errorf("PV with StorageType %v is not supported", stgType)
}
// Delete is invoked by the PVC controller to perform clean-up
// activities before deleteing the PV object. If reclaim policy is
// set to not-retain, then this function will create a helper pod
// to delete the host path from the node.
func (p *Provisioner) Delete(pv *v1.PersistentVolume) (err error) {
defer func() {
err = errors.Wrapf(err, "failed to delete volume %v", pv.Name)
}()
//Initiate clean up only when reclaim policy is not retain.
if pv.Spec.PersistentVolumeReclaimPolicy != v1.PersistentVolumeReclaimRetain {
//TODO: Determine the type of PV
pvType := GetLocalPVType(pv)
size := resource.Quantity{}
reqMap := pv.Spec.Capacity
if reqMap != nil {
size = pv.Spec.Capacity["storage"]
}
sendEventOrIgnore(pv.Name, size.String(), pvType, analytics.VolumeDeprovision)
if pvType == "local-device" {
return p.DeleteBlockDevice(pv)
}
return p.DeleteHostPath(pv)
}
glog.Infof("Retained volume %v", pv.Name)
return nil
}
// sendEventOrIgnore sends anonymous local-pv provision/delete events
func sendEventOrIgnore(pvName, capacity, stgType, method string) {
if method == analytics.VolumeProvision {
stgType = "local-" + stgType
}
if menv.Truthy(menv.OpenEBSEnableAnalytics) {
analytics.New().Build().ApplicationBuilder().
SetVolumeType(stgType, method).
SetDocumentTitle(pvName).
SetLabel(analytics.EventLabelCapacity).
SetReplicaCount(analytics.LocalPVReplicaCount, method).
SetCategory(method).
SetVolumeCapacity(capacity).Send()
}
}
| 1 | 17,254 | there seems to be one KeyNode in kubernetes.go of PV.. would it make sense to use it? | openebs-maya | go |
@@ -22,12 +22,12 @@ import (
"fmt"
"time"
+ "github.com/mysteriumnetwork/node/cmd/commands"
+
"github.com/mysteriumnetwork/node/cmd/commands/cli/clio"
"github.com/mysteriumnetwork/node/config"
- "github.com/mysteriumnetwork/node/config/urfavecli/clicontext"
"github.com/mysteriumnetwork/node/core/connection"
"github.com/mysteriumnetwork/node/core/connection/connectionstate"
- "github.com/mysteriumnetwork/node/core/node"
"github.com/mysteriumnetwork/node/datasize"
"github.com/mysteriumnetwork/node/identity/registry"
"github.com/mysteriumnetwork/node/metadata" | 1 | /*
* Copyright (C) 2020 The "MysteriumNetwork/node" Authors.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package connection
import (
"errors"
"fmt"
"time"
"github.com/mysteriumnetwork/node/cmd/commands/cli/clio"
"github.com/mysteriumnetwork/node/config"
"github.com/mysteriumnetwork/node/config/urfavecli/clicontext"
"github.com/mysteriumnetwork/node/core/connection"
"github.com/mysteriumnetwork/node/core/connection/connectionstate"
"github.com/mysteriumnetwork/node/core/node"
"github.com/mysteriumnetwork/node/datasize"
"github.com/mysteriumnetwork/node/identity/registry"
"github.com/mysteriumnetwork/node/metadata"
"github.com/mysteriumnetwork/node/money"
tequilapi_client "github.com/mysteriumnetwork/node/tequilapi/client"
"github.com/mysteriumnetwork/node/tequilapi/contract"
"github.com/urfave/cli/v2"
)
// CommandName is the name of this command
const CommandName = "connection"
var (
flagCountry = cli.StringFlag{
Name: "country",
Usage: "Two letter (ISO 3166-1 alpha-2) country code to filter proposals.",
}
flagLocationType = cli.StringFlag{
Name: "location-type",
Usage: "Node location types to filter by eg.'hosting', 'residential', 'mobile' etc.",
}
)
const serviceWireguard = "wireguard"
// NewCommand function creates license command.
func NewCommand() *cli.Command {
var cmd *command
return &cli.Command{
Name: CommandName,
Usage: "Manage your connection",
Description: "Using the connection subcommands you can manage your connection or get additional information about it",
Before: func(ctx *cli.Context) error {
if err := clicontext.LoadUserConfigQuietly(ctx); err != nil {
return err
}
config.ParseFlagsNode(ctx)
nodeOptions := node.GetOptions()
tc := tequilapi_client.NewClient(nodeOptions.TequilapiAddress, nodeOptions.TequilapiPort)
cmd = &command{tequilapi: tc}
return nil
},
Subcommands: []*cli.Command{
{
Name: "proposals",
Usage: "List all possible proposals to which you can connect",
Flags: []cli.Flag{&flagCountry, &flagLocationType},
Action: func(ctx *cli.Context) error {
cmd.proposals(ctx)
return nil
},
},
{
Name: "up",
ArgsUsage: "[ProviderIdentityAddress]",
Usage: "Create a new connection",
Flags: []cli.Flag{&config.FlagAgreedTermsConditions},
Action: func(ctx *cli.Context) error {
cmd.up(ctx)
return nil
},
},
{
Name: "down",
Usage: "Disconnect from your current connection",
Action: func(ctx *cli.Context) error {
cmd.down()
return nil
},
},
{
Name: "info",
Usage: "Show information about your connection",
Action: func(ctx *cli.Context) error {
cmd.info()
return nil
},
},
},
}
}
type command struct {
tequilapi *tequilapi_client.Client
}
func (c *command) proposals(ctx *cli.Context) {
locationType := ctx.String(flagLocationType.Name)
locationCountry := ctx.String(flagCountry.Name)
if locationCountry != "" && len(locationCountry) != 2 {
clio.Warn("Country code must be in ISO 3166-1 alpha-2 format. Example: 'UK', 'US'")
return
}
proposals, err := c.tequilapi.ProposalsByLocationAndService(serviceWireguard, locationType, locationCountry)
if err != nil {
clio.Warn("Failed to fetch proposal list")
return
}
if len(proposals) == 0 {
clio.Info("No proposals found")
return
}
clio.Info("Found proposals:")
for _, p := range proposals {
printProposal(&p)
}
}
func (c *command) down() {
status, err := c.tequilapi.ConnectionStatus()
if err != nil {
clio.Warn("Could not get connection status")
return
}
if status.Status != string(connectionstate.NotConnected) {
if err := c.tequilapi.ConnectionDestroy(); err != nil {
clio.Warn(err)
return
}
}
clio.Success("Disconnected")
}
func (c *command) handleTOS(ctx *cli.Context) error {
if ctx.Bool(config.FlagAgreedTermsConditions.Name) {
c.acceptTOS()
return nil
}
agreed := config.Current.GetBool(contract.TermsConsumerAgreed)
if !agreed {
return errors.New("You must agree with consumer terms of use in order to use this command")
}
version := config.Current.GetString(contract.TermsVersion)
if version != metadata.CurrentTermsVersion {
return fmt.Errorf("You've agreed to terms of use version %s, but version %s is required", version, metadata.CurrentTermsVersion)
}
return nil
}
func (c *command) acceptTOS() {
t := true
if err := c.tequilapi.UpdateTerms(contract.TermsRequest{
AgreedConsumer: &t,
AgreedVersion: metadata.CurrentTermsVersion,
}); err != nil {
clio.Info("Failed to save terms of use agreement, you will have to re-agree on next launch")
}
}
func (c *command) up(ctx *cli.Context) {
if err := c.handleTOS(ctx); err != nil {
clio.PrintTOSError(err)
return
}
status, err := c.tequilapi.ConnectionStatus()
if err != nil {
clio.Warn("Could not get connection status")
return
}
switch connectionstate.State(status.Status) {
case
connectionstate.Connected,
connectionstate.Connecting,
connectionstate.Disconnecting,
connectionstate.Reconnecting:
msg := fmt.Sprintf("You can't create a new connection, you're in state '%s'", status.Status)
clio.Warn(msg)
return
}
providerID := ctx.Args().First()
if providerID == "" {
clio.Warn("First argument must be provider identity address")
return
}
id, err := c.tequilapi.CurrentIdentity("", "")
if err != nil {
clio.Error("Failed to get your identity")
return
}
identityStatus, err := c.tequilapi.Identity(id.Address)
if err != nil {
clio.Warn("Failed to get identity status")
return
}
if identityStatus.RegistrationStatus != registry.Registered.String() {
clio.Warn("Your identity is not registered, please execute `myst account register` first")
return
}
clio.Status("CONNECTING", "Creating connection from:", id.Address, "to:", providerID)
connectOptions := contract.ConnectOptions{
DNS: connection.DNSOptionAuto,
DisableKillSwitch: false,
}
hermesID := config.GetString(config.FlagHermesID)
_, err = c.tequilapi.ConnectionCreate(id.Address, providerID, hermesID, serviceWireguard, connectOptions)
if err != nil {
clio.Error("Failed to create a new connection")
return
}
clio.Success("Connected")
}
func (c *command) info() {
inf := newConnInfo()
id, err := c.tequilapi.CurrentIdentity("", "")
if err == nil {
inf.set(infIdentity, id.Address)
}
status, err := c.tequilapi.ConnectionStatus()
if err == nil {
if status.Status == string(connectionstate.Connected) {
inf.isConnected = true
inf.set(infProposal, status.Proposal.String())
}
inf.set(infStatus, status.Status)
inf.set(infSessionID, status.SessionID)
}
ip, err := c.tequilapi.ConnectionIP()
if err == nil {
inf.set(infIP, ip.IP)
}
location, err := c.tequilapi.ConnectionLocation()
if err == nil {
inf.set(infLocation, fmt.Sprintf("%s, %s (%s - %s)", location.City, location.Country, location.UserType, location.ISP))
}
if status.Status != string(connectionstate.Connected) {
inf.printAll()
return
}
statistics, err := c.tequilapi.ConnectionStatistics()
if err == nil {
inf.set(infDuration, fmt.Sprint(time.Duration(statistics.Duration)*time.Second))
inf.set(infTransferred, fmt.Sprintf("%s/%s", datasize.FromBytes(statistics.BytesReceived), datasize.FromBytes(statistics.BytesSent)))
inf.set(infThroughput, fmt.Sprintf("%s/%s", datasize.BitSpeed(statistics.ThroughputReceived), datasize.BitSpeed(statistics.ThroughputSent)))
inf.set(infSpent, money.New(statistics.TokensSpent).String())
}
inf.printAll()
}
| 1 | 16,853 | Im gonna be a little annoying here but did you configure your linter correctly? This empty line should not be here. Maybe go to settings and check if `goimports` is enabled? | mysteriumnetwork-node | go |
@@ -122,6 +122,7 @@ public class EmojiPlugin extends Plugin
case FRIENDSCHAT:
case PRIVATECHAT:
case PRIVATECHATOUT:
+ case MODPRIVATECHAT:
break;
default:
return; | 1 | /*
* Copyright (c) 2019, Lotto <https://github.com/devLotto>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package net.runelite.client.plugins.emojis;
import java.awt.image.BufferedImage;
import java.util.Arrays;
import javax.annotation.Nullable;
import javax.inject.Inject;
import joptsimple.internal.Strings;
import lombok.extern.slf4j.Slf4j;
import net.runelite.api.Client;
import net.runelite.api.GameState;
import net.runelite.api.IndexedSprite;
import net.runelite.api.MessageNode;
import net.runelite.api.Player;
import net.runelite.api.events.ChatMessage;
import net.runelite.api.events.GameStateChanged;
import net.runelite.api.events.OverheadTextChanged;
import net.runelite.client.chat.ChatMessageManager;
import net.runelite.client.eventbus.Subscribe;
import net.runelite.client.plugins.Plugin;
import net.runelite.client.plugins.PluginDescriptor;
import net.runelite.client.util.ImageUtil;
@PluginDescriptor(
name = "Emojis",
description = "Replaces common emoticons such as :) with their corresponding emoji in the chat",
enabledByDefault = false
)
@Slf4j
public class EmojiPlugin extends Plugin
{
@Inject
private Client client;
@Inject
private ChatMessageManager chatMessageManager;
private int modIconsStart = -1;
@Override
protected void startUp()
{
loadEmojiIcons();
}
@Subscribe
public void onGameStateChanged(GameStateChanged gameStateChanged)
{
if (gameStateChanged.getGameState() == GameState.LOGGED_IN)
{
loadEmojiIcons();
}
}
private void loadEmojiIcons()
{
final IndexedSprite[] modIcons = client.getModIcons();
if (modIconsStart != -1 || modIcons == null)
{
return;
}
final Emoji[] emojis = Emoji.values();
final IndexedSprite[] newModIcons = Arrays.copyOf(modIcons, modIcons.length + emojis.length);
modIconsStart = modIcons.length;
for (int i = 0; i < emojis.length; i++)
{
final Emoji emoji = emojis[i];
try
{
final BufferedImage image = emoji.loadImage();
final IndexedSprite sprite = ImageUtil.getImageIndexedSprite(image, client);
newModIcons[modIconsStart + i] = sprite;
}
catch (Exception ex)
{
log.warn("Failed to load the sprite for emoji " + emoji, ex);
}
}
log.debug("Adding emoji icons");
client.setModIcons(newModIcons);
}
@Subscribe
public void onChatMessage(ChatMessage chatMessage)
{
if (client.getGameState() != GameState.LOGGED_IN || modIconsStart == -1)
{
return;
}
switch (chatMessage.getType())
{
case PUBLICCHAT:
case MODCHAT:
case FRIENDSCHAT:
case PRIVATECHAT:
case PRIVATECHATOUT:
break;
default:
return;
}
final String message = chatMessage.getMessage();
final String updatedMessage = updateMessage(message);
if (updatedMessage == null)
{
return;
}
final MessageNode messageNode = chatMessage.getMessageNode();
messageNode.setRuneLiteFormatMessage(updatedMessage);
chatMessageManager.update(messageNode);
client.refreshChat();
}
@Subscribe
public void onOverheadTextChanged(final OverheadTextChanged event)
{
if (!(event.getActor() instanceof Player))
{
return;
}
final String message = event.getOverheadText();
final String updatedMessage = updateMessage(message);
if (updatedMessage == null)
{
return;
}
event.getActor().setOverheadText(updatedMessage);
}
@Nullable
private String updateMessage(final String message)
{
final String[] messageWords = message.split(" ");
boolean editedMessage = false;
for (int i = 0; i < messageWords.length; i++)
{
final Emoji emoji = Emoji.getEmoji(messageWords[i]);
if (emoji == null)
{
continue;
}
final int emojiId = modIconsStart + emoji.ordinal();
messageWords[i] = "<img=" + emojiId + ">";
editedMessage = true;
}
// If we haven't edited the message any, don't update it.
if (!editedMessage)
{
return null;
}
return Strings.join(messageWords, " ");
}
}
| 1 | 14,938 | Don't think this belongs in this pr | open-osrs-runelite | java |
@@ -206,6 +206,16 @@ func (d *Dispatcher) Inbounds() Inbounds {
return inbounds
}
+// Outbounds returns a copy of the map of outbounds for this RPC object.
+// The outbounds are already wrapped with middleware
+func (d *Dispatcher) Outbounds() Outbounds {
+ outbounds := make(Outbounds, len(d.outbounds))
+ for k, v := range d.outbounds {
+ outbounds[k] = v
+ }
+ return outbounds
+}
+
// ClientConfig provides the configuration needed to talk to the given
// service through an outboundKey. This configuration may be directly
// passed into encoding-specific RPC clients. | 1 | // Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package yarpc
import (
"fmt"
"sync"
"go.uber.org/yarpc/api/middleware"
"go.uber.org/yarpc/api/transport"
"go.uber.org/yarpc/internal"
"go.uber.org/yarpc/internal/clientconfig"
"go.uber.org/yarpc/internal/errors"
"go.uber.org/yarpc/internal/request"
intsync "go.uber.org/yarpc/internal/sync"
"github.com/opentracing/opentracing-go"
)
// Config specifies the parameters of a new Dispatcher constructed via
// NewDispatcher.
type Config struct {
// Name of the service. This is the name used by other services when
// making requests to this service.
Name string
// Inbounds define how this service receives incoming requests from other
// services.
//
// This may be nil if this service does not receive any requests.
Inbounds Inbounds
// Outbounds defines how this service makes requests to other services.
//
// This may be nil if this service does not send any requests.
Outbounds Outbounds
// Inbound and Outbound Middleware that will be applied to all incoming
// and outgoing requests respectively.
//
// These may be nil if there is no middleware to apply.
InboundMiddleware InboundMiddleware
OutboundMiddleware OutboundMiddleware
// Tracer is deprecated. The dispatcher does nothing with this propery.
Tracer opentracing.Tracer
// RouterMiddleware is middleware to control how requests are routed.
RouterMiddleware middleware.Router
}
// Inbounds contains a list of inbound transports. Each inbound transport
// specifies a source through which incoming requests are received.
type Inbounds []transport.Inbound
// Outbounds provides access to outbounds for a remote service. Outbounds
// define how requests are sent from this service to the remote service.
type Outbounds map[string]transport.Outbounds
// OutboundMiddleware contains the different types of outbound middlewares.
type OutboundMiddleware struct {
Unary middleware.UnaryOutbound
Oneway middleware.OnewayOutbound
}
// InboundMiddleware contains the different types of inbound middlewares.
type InboundMiddleware struct {
Unary middleware.UnaryInbound
Oneway middleware.OnewayInbound
}
// RouterMiddleware wraps the Router middleware
type RouterMiddleware middleware.Router
// NewDispatcher builds a new Dispatcher using the specified Config. At
// minimum, a service name must be specified.
//
// Invalid configurations or errors in constructing the Dispatcher will cause
// panics.
func NewDispatcher(cfg Config) *Dispatcher {
if cfg.Name == "" {
panic("yarpc.NewDispatcher expects a service name")
}
if err := internal.ValidateServiceName(cfg.Name); err != nil {
panic("yarpc.NewDispatcher expects a valid service name: %s" + err.Error())
}
return &Dispatcher{
name: cfg.Name,
table: middleware.ApplyRouteTable(NewMapRouter(cfg.Name), cfg.RouterMiddleware),
inbounds: cfg.Inbounds,
outbounds: convertOutbounds(cfg.Outbounds, cfg.OutboundMiddleware),
transports: collectTransports(cfg.Inbounds, cfg.Outbounds),
inboundMiddleware: cfg.InboundMiddleware,
}
}
// convertOutbounds applys outbound middleware and creates validator outbounds
func convertOutbounds(outbounds Outbounds, mw OutboundMiddleware) Outbounds {
outboundSpecs := make(Outbounds, len(outbounds))
for outboundKey, outs := range outbounds {
if outs.Unary == nil && outs.Oneway == nil {
panic(fmt.Sprintf("no outbound set for outbound key %q in dispatcher", outboundKey))
}
var (
unaryOutbound transport.UnaryOutbound
onewayOutbound transport.OnewayOutbound
)
serviceName := outboundKey
// apply outbound middleware and create ValidatorOutbounds
if outs.Unary != nil {
unaryOutbound = middleware.ApplyUnaryOutbound(outs.Unary, mw.Unary)
unaryOutbound = request.UnaryValidatorOutbound{UnaryOutbound: unaryOutbound}
}
if outs.Oneway != nil {
onewayOutbound = middleware.ApplyOnewayOutbound(outs.Oneway, mw.Oneway)
onewayOutbound = request.OnewayValidatorOutbound{OnewayOutbound: onewayOutbound}
}
if outs.ServiceName != "" {
serviceName = outs.ServiceName
}
outboundSpecs[outboundKey] = transport.Outbounds{
ServiceName: serviceName,
Unary: unaryOutbound,
Oneway: onewayOutbound,
}
}
return outboundSpecs
}
// collectTransports iterates over all inbounds and outbounds and collects all
// of their unique underlying transports. Multiple inbounds and outbounds may
// share a transport, and we only want the dispatcher to manage their lifecycle
// once.
func collectTransports(inbounds Inbounds, outbounds Outbounds) []transport.Transport {
// Collect all unique transports from inbounds and outbounds.
transports := make(map[transport.Transport]struct{})
for _, inbound := range inbounds {
for _, transport := range inbound.Transports() {
transports[transport] = struct{}{}
}
}
for _, outbound := range outbounds {
if unary := outbound.Unary; unary != nil {
for _, transport := range unary.Transports() {
transports[transport] = struct{}{}
}
}
if oneway := outbound.Oneway; oneway != nil {
for _, transport := range oneway.Transports() {
transports[transport] = struct{}{}
}
}
}
keys := make([]transport.Transport, 0, len(transports))
for key := range transports {
keys = append(keys, key)
}
return keys
}
// Dispatcher encapsulates a YARPC application. It acts as the entry point to
// send and receive YARPC requests in a transport and encoding agnostic way.
type Dispatcher struct {
table transport.RouteTable
name string
inbounds Inbounds
outbounds Outbounds
transports []transport.Transport
inboundMiddleware InboundMiddleware
}
// Inbounds returns a copy of the list of inbounds for this RPC object.
//
// The Inbounds will be returned in the same order that was used in the
// configuration.
func (d *Dispatcher) Inbounds() Inbounds {
inbounds := make(Inbounds, len(d.inbounds))
copy(inbounds, d.inbounds)
return inbounds
}
// ClientConfig provides the configuration needed to talk to the given
// service through an outboundKey. This configuration may be directly
// passed into encoding-specific RPC clients.
//
// keyvalueClient := json.New(dispatcher.ClientConfig("keyvalue"))
//
// This function panics if the outboundKey is not known.
func (d *Dispatcher) ClientConfig(outboundKey string) transport.ClientConfig {
if rs, ok := d.outbounds[outboundKey]; ok {
return clientconfig.MultiOutbound(d.name, rs.ServiceName, rs)
}
panic(noOutboundForOutboundKey{OutboundKey: outboundKey})
}
// Register registers zero or more procedures with this dispatcher. Incoming
// requests to these procedures will be routed to the handlers specified in
// the given Procedures.
func (d *Dispatcher) Register(rs []transport.Procedure) {
procedures := make([]transport.Procedure, 0, len(rs))
for _, r := range rs {
switch r.HandlerSpec.Type() {
case transport.Unary:
h := middleware.ApplyUnaryInbound(r.HandlerSpec.Unary(),
d.inboundMiddleware.Unary)
r.HandlerSpec = transport.NewUnaryHandlerSpec(h)
case transport.Oneway:
h := middleware.ApplyOnewayInbound(r.HandlerSpec.Oneway(),
d.inboundMiddleware.Oneway)
r.HandlerSpec = transport.NewOnewayHandlerSpec(h)
default:
panic(fmt.Sprintf("unknown handler type %q for service %q, procedure %q",
r.HandlerSpec.Type(), r.Service, r.Name))
}
procedures = append(procedures, r)
}
d.table.Register(procedures)
}
// Start starts the Dispatcher, allowing it to accept and processing new
// incoming requests.
//
// This starts all inbounds and outbounds configured on this Dispatcher.
//
// This function returns immediately after everything has been started.
// Servers should add a `select {}` to block to process all incoming requests.
//
// if err := dispatcher.Start(); err != nil {
// log.Fatal(err)
// }
// defer dispatcher.Stop()
//
// select {}
func (d *Dispatcher) Start() error {
// NOTE: These MUST be started in the order transports, outbounds, and
// then inbounds.
//
// If the outbounds are started before the transports, we might get a
// network request before the transports are ready.
//
// If the inbounds are started before the outbounds, an inbound request
// might result in an outbound call before the outbound is ready.
var (
mu sync.Mutex
allStarted []transport.Lifecycle
)
start := func(s transport.Lifecycle) func() error {
return func() error {
if s == nil {
return nil
}
if err := s.Start(); err != nil {
return err
}
mu.Lock()
allStarted = append(allStarted, s)
mu.Unlock()
return nil
}
}
abort := func(errs []error) error {
// Failed to start so stop everything that was started.
wait := intsync.ErrorWaiter{}
for _, s := range allStarted {
wait.Submit(s.Stop)
}
if newErrors := wait.Wait(); len(newErrors) > 0 {
errs = append(errs, newErrors...)
}
return errors.ErrorGroup(errs)
}
// Set router for all inbounds
for _, i := range d.inbounds {
i.SetRouter(d.table)
}
// Start Transports
wait := intsync.ErrorWaiter{}
for _, t := range d.transports {
wait.Submit(start(t))
}
if errs := wait.Wait(); len(errs) != 0 {
return abort(errs)
}
// Start Outbounds
wait = intsync.ErrorWaiter{}
for _, o := range d.outbounds {
wait.Submit(start(o.Unary))
wait.Submit(start(o.Oneway))
}
if errs := wait.Wait(); len(errs) != 0 {
return abort(errs)
}
// Start Inbounds
wait = intsync.ErrorWaiter{}
for _, i := range d.inbounds {
wait.Submit(start(i))
}
if errs := wait.Wait(); len(errs) != 0 {
return abort(errs)
}
addDispatcherToDebugPages(d)
return nil
}
// Stop stops the Dispatcher.
//
// This stops all outbounds and inbounds owned by this Dispatcher.
//
// This function returns after everything has been stopped.
func (d *Dispatcher) Stop() error {
// NOTE: These MUST be stopped in the order inbounds, outbounds, and then
// transports.
//
// If the outbounds are stopped before the inbounds, we might receive a
// request which needs to use a stopped outbound from a still-going
// inbound.
//
// If the transports are stopped before the outbounds, the peers contained
// in the outbound might be deleted from the transport's perspective and
// cause issues.
var allErrs []error
// Stop Inbounds
wait := intsync.ErrorWaiter{}
for _, i := range d.inbounds {
wait.Submit(i.Stop)
}
if errs := wait.Wait(); len(errs) > 0 {
allErrs = append(allErrs, errs...)
}
// Stop Outbounds
wait = intsync.ErrorWaiter{}
for _, o := range d.outbounds {
if o.Unary != nil {
wait.Submit(o.Unary.Stop)
}
if o.Oneway != nil {
wait.Submit(o.Oneway.Stop)
}
}
if errs := wait.Wait(); len(errs) > 0 {
allErrs = append(allErrs, errs...)
}
// Stop Transports
wait = intsync.ErrorWaiter{}
for _, t := range d.transports {
wait.Submit(t.Stop)
}
if errs := wait.Wait(); len(errs) > 0 {
allErrs = append(allErrs, errs...)
}
if len(allErrs) > 0 {
return errors.ErrorGroup(allErrs)
}
removeDispatcherFromDebugPages(d)
return nil
}
// Router returns the procedure router.
func (d *Dispatcher) Router() transport.Router {
return d.table
}
// Name returns the name of the dispatcher.
func (d *Dispatcher) Name() string {
return d.name
}
| 1 | 12,640 | We can assert on the existence of Outbounds through ClientConfig calls right? Since this is only for tests, do we need this function? | yarpc-yarpc-go | go |
@@ -37,14 +37,13 @@ namespace MvvmCross.Core.ViewModels
public interface IMvxViewModel<TParameter> : IMvxViewModel where TParameter : class
{
- Task Initialize(TParameter parameter);
+ void Declare(TParameter parameter);
}
//TODO: Can we keep the IMvxViewModel syntax here? Compiler complains
public interface IMvxViewModelResult<TResult> : IMvxViewModel where TResult : class
{
- void SetClose(TaskCompletionSource<TResult> tcs, CancellationToken cancellationToken);
- Task<bool> Close(TResult result);
+ TaskCompletionSource<object> CloseCompletionSource { get; set; }
}
public interface IMvxViewModel<TParameter, TResult> : IMvxViewModel<TParameter>, IMvxViewModelResult<TResult> where TParameter : class where TResult : class | 1 | // IMvxViewModel.cs
// MvvmCross is licensed using Microsoft Public License (Ms-PL)
// Contributions and inspirations noted in readme.md and license.txt
//
// Project Lead - Stuart Lodge, @slodge, [email protected]
using System.Threading;
using System.Threading.Tasks;
namespace MvvmCross.Core.ViewModels
{
public interface IMvxViewModel
{
void ViewCreated();
void ViewAppearing();
void ViewAppeared();
void ViewDisappearing();
void ViewDisappeared();
void ViewDestroy();
void Init(IMvxBundle parameters);
void ReloadState(IMvxBundle state);
void Start();
void SaveState(IMvxBundle state);
Task Initialize();
}
public interface IMvxViewModel<TParameter> : IMvxViewModel where TParameter : class
{
Task Initialize(TParameter parameter);
}
//TODO: Can we keep the IMvxViewModel syntax here? Compiler complains
public interface IMvxViewModelResult<TResult> : IMvxViewModel where TResult : class
{
void SetClose(TaskCompletionSource<TResult> tcs, CancellationToken cancellationToken);
Task<bool> Close(TResult result);
}
public interface IMvxViewModel<TParameter, TResult> : IMvxViewModel<TParameter>, IMvxViewModelResult<TResult> where TParameter : class where TResult : class
{
}
} | 1 | 13,022 | @martijn00 so this PR introduces a new ViewModel lifecyle method? It isn't in the PR description/any new docs | MvvmCross-MvvmCross | .cs |
@@ -89,6 +89,13 @@ class SingleStageDetector(BaseDetector):
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
+ # NOTE the batched image size information may be useful, e.g.
+ # in DETR, this is needed for the construction of masks, which is
+ # then used for the transformer_head.
+ input_img_shape = tuple(img.size()[-2:])
+ for img_meta in img_metas:
+ img_meta['input_img_shape'] = input_img_shape
+
x = self.extract_feat(img)
losses = self.bbox_head.forward_train(x, img_metas, gt_bboxes,
gt_labels, gt_bboxes_ignore) | 1 | import torch
import torch.nn as nn
from mmdet.core import bbox2result
from ..builder import DETECTORS, build_backbone, build_head, build_neck
from .base import BaseDetector
@DETECTORS.register_module()
class SingleStageDetector(BaseDetector):
"""Base class for single-stage detectors.
Single-stage detectors directly and densely predict bounding boxes on the
output features of the backbone+neck.
"""
def __init__(self,
backbone,
neck=None,
bbox_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(SingleStageDetector, self).__init__()
self.backbone = build_backbone(backbone)
if neck is not None:
self.neck = build_neck(neck)
bbox_head.update(train_cfg=train_cfg)
bbox_head.update(test_cfg=test_cfg)
self.bbox_head = build_head(bbox_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.init_weights(pretrained=pretrained)
def init_weights(self, pretrained=None):
"""Initialize the weights in detector.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
super(SingleStageDetector, self).init_weights(pretrained)
self.backbone.init_weights(pretrained=pretrained)
if self.with_neck:
if isinstance(self.neck, nn.Sequential):
for m in self.neck:
m.init_weights()
else:
self.neck.init_weights()
self.bbox_head.init_weights()
def extract_feat(self, img):
"""Directly extract features from the backbone+neck."""
x = self.backbone(img)
if self.with_neck:
x = self.neck(x)
return x
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmdetection/tools/get_flops.py`
"""
x = self.extract_feat(img)
outs = self.bbox_head(x)
return outs
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None):
"""
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
img_metas (list[dict]): A List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
:class:`mmdet.datasets.pipelines.Collect`.
gt_bboxes (list[Tensor]): Each item are the truth boxes for each
image in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): Class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): Specify which bounding
boxes can be ignored when computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
x = self.extract_feat(img)
losses = self.bbox_head.forward_train(x, img_metas, gt_bboxes,
gt_labels, gt_bboxes_ignore)
return losses
def simple_test(self, img, img_metas, rescale=False):
"""Test function without test time augmentation.
Args:
imgs (list[torch.Tensor]): List of multiple images
img_metas (list[dict]): List of image information.
rescale (bool, optional): Whether to rescale the results.
Defaults to False.
Returns:
list[list[np.ndarray]]: BBox results of each image and classes.
The outer list corresponds to each image. The inner list
corresponds to each class.
"""
x = self.extract_feat(img)
outs = self.bbox_head(x)
bbox_list = self.bbox_head.get_bboxes(
*outs, img_metas, rescale=rescale)
# skip post-processing when exporting to ONNX
if torch.onnx.is_in_onnx_export():
return bbox_list
bbox_results = [
bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)
for det_bboxes, det_labels in bbox_list
]
return bbox_results
def aug_test(self, imgs, img_metas, rescale=False):
"""Test function with test time augmentation.
Args:
imgs (list[Tensor]): the outer list indicates test-time
augmentations and inner Tensor should have a shape NxCxHxW,
which contains all images in the batch.
img_metas (list[list[dict]]): the outer list indicates test-time
augs (multiscale, flip, etc.) and the inner list indicates
images in a batch. each dict has image information.
rescale (bool, optional): Whether to rescale the results.
Defaults to False.
Returns:
list[list[np.ndarray]]: BBox results of each image and classes.
The outer list corresponds to each image. The inner list
corresponds to each class.
"""
assert hasattr(self.bbox_head, 'aug_test'), \
f'{self.bbox_head.__class__.__name__}' \
' does not support test-time augmentation'
feats = self.extract_feats(imgs)
return [self.bbox_head.aug_test(feats, img_metas, rescale=rescale)]
| 1 | 21,640 | Are these modification duplicate? Or should we move it into base detector. | open-mmlab-mmdetection | py |
@@ -35,7 +35,7 @@ public class DirectAcyclicGraphSeed {
}
});
- public static byte[] dagSeed(final long block) {
+ private static byte[] dagSeed(final long block) {
final byte[] seed = new byte[32];
if (Long.compareUnsigned(block, EPOCH_LENGTH) >= 0) {
final MessageDigest keccak256 = KECCAK_256.get(); | 1 | /*
* Copyright ConsenSys AG.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.mainnet;
import static org.hyperledger.besu.ethereum.mainnet.EthHash.EPOCH_LENGTH;
import org.hyperledger.besu.crypto.Hash;
import org.hyperledger.besu.crypto.MessageDigestFactory;
import java.security.DigestException;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
public class DirectAcyclicGraphSeed {
public static final ThreadLocal<MessageDigest> KECCAK_256 =
ThreadLocal.withInitial(
() -> {
try {
return MessageDigestFactory.create(Hash.KECCAK256_ALG);
} catch (final NoSuchAlgorithmException ex) {
throw new IllegalStateException(ex);
}
});
public static byte[] dagSeed(final long block) {
final byte[] seed = new byte[32];
if (Long.compareUnsigned(block, EPOCH_LENGTH) >= 0) {
final MessageDigest keccak256 = KECCAK_256.get();
for (int i = 0; i < Long.divideUnsigned(block, EPOCH_LENGTH); ++i) {
keccak256.update(seed);
try {
keccak256.digest(seed, 0, seed.length);
} catch (final DigestException ex) {
throw new IllegalStateException(ex);
}
}
}
return seed;
}
}
| 1 | 23,947 | Since this is private and single use it should be un-wrapped inside of the two-arg dagSeed method. | hyperledger-besu | java |
@@ -160,11 +160,10 @@ abstract class Tx_Solr_PluginBase_PluginBase extends tslib_pibase {
* @param array $configuration configuration array as provided by the TYPO3 core
*/
protected function initialize($configuration) {
- $this->conf = $configuration;
-
- $this->conf = t3lib_div::array_merge_recursive_overrule(
- $GLOBALS['TSFE']->tmpl->setup['plugin.']['tx_solr.'],
- $this->conf
+ $this->conf = $GLOBALS['TSFE']->tmpl->setup['plugin.']['tx_solr.'];
+ \TYPO3\CMS\Core\Utility\ArrayUtility::mergeRecursiveWithOverrule(
+ $this->conf,
+ $configuration
);
$this->pi_setPiVarDefaults(); | 1 | <?php
/***************************************************************
* Copyright notice
*
* (c) 2010-2011 Timo Schmidt <[email protected]>
* (c) 2012-2014 Ingo Renner <[email protected]>
* All rights reserved
*
* This script is part of the TYPO3 project. The TYPO3 project is
* free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* The GNU General Public License can be found at
* http://www.gnu.org/copyleft/gpl.html.
*
* This script is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* This copyright notice MUST APPEAR in all copies of the script!
***************************************************************/
/**
* Abstract base class for all solr plugins.
*
* Implements a main method and several abstract methods which
* need to be implemented by an inheriting plugin.
*
* @author Ingo Renner <[email protected]>
* @author Timo Schmidt <[email protected]
* @package TYPO3
* @subpackage solr
*/
abstract class Tx_Solr_PluginBase_PluginBase extends tslib_pibase {
public $prefixId = 'tx_solr';
public $extKey = 'solr';
/**
* an instance of Tx_Solr_Search
*
* @var Tx_Solr_Search
*/
protected $search;
/**
* The plugin's query
*
* @var Tx_Solr_Query
*/
protected $query = NULL;
/**
* Determines whether the solr server is available or not.
*/
protected $solrAvailable;
/**
* An instance of Tx_Solr_Template
*
* @var Tx_Solr_Template
*/
protected $template;
/**
* An instance of Tx_Solr_JavascriptManager
*
* @var Tx_Solr_JavascriptManager
*/
protected $javascriptManager;
/**
* The user's raw query.
*
* Private to enforce API usage.
*
* @var string
*/
private $rawUserQuery;
// Main
/**
* The main method of the plugin
*
* @param string $content The plugin content
* @param array $configuration The plugin configuration
* @return string The content that is displayed on the website
*/
public function main($content, $configuration) {
/** @noinspection PhpUnusedLocalVariableInspection */
$content = '';
try {
$this->initialize($configuration);
$this->preRender();
$actionResult = $this->performAction();
if ($this->solrAvailable) {
$content = $this->render($actionResult);
} else {
$content = $this->renderError();
}
$content = $this->postRender($content);
} catch(Exception $e) {
if ($GLOBALS['TSFE']->tmpl->setup['plugin.']['tx_solr.']['logging.']['exceptions']) {
t3lib_div::devLog(
$e->getCode() . ': ' . $e->__toString(),
'solr',
3,
(array) $e
);
}
$this->initializeTemplateEngine();
$content = $this->renderException();
}
return $this->baseWrap($content);
}
/**
* Adds the possibility to use stdWrap on the plugins content instead of wrapInBaseClass.
* Defaults to wrapInBaseClass to ensure downward compatibility.
*
* @param string $content The plugin content
* @return string
*/
protected function baseWrap($content) {
if (isset($this->conf['general.']['baseWrap.'])) {
return $this->cObj->stdWrap($content, $this->conf['general.']['baseWrap.']);
} else {
return $this->pi_wrapInBaseClass($content);
}
}
/**
* Implements the action logic. The result of this method is passed to the
* render method.
*
* @return string Action result
*/
protected abstract function performAction();
// Initialization
/**
* Initializes the plugin - configuration, language, caching, search...
*
* @param array $configuration configuration array as provided by the TYPO3 core
*/
protected function initialize($configuration) {
$this->conf = $configuration;
$this->conf = t3lib_div::array_merge_recursive_overrule(
$GLOBALS['TSFE']->tmpl->setup['plugin.']['tx_solr.'],
$this->conf
);
$this->pi_setPiVarDefaults();
$this->pi_loadLL();
$this->pi_initPIflexForm();
$this->overrideTyposcriptWithFlexformSettings();
$this->initializeQuery();
$this->initializeSearch();
$this->initializeTemplateEngine();
$this->initializeJavascriptManager();
$this->postInitialize();
}
/**
* Overwrites pi_setPiVarDefaults to add stdWrap-functionality to _DEFAULT_PI_VARS
*
* @author Grigori Prokhorov <[email protected]>
* @author Ivan Kartolo <[email protected]>
* @return void
*/
function pi_setPiVarDefaults() {
if (is_array($this->conf['_DEFAULT_PI_VARS.'])) {
foreach ($this->conf['_DEFAULT_PI_VARS.'] as $key => $defaultValue) {
$this->conf['_DEFAULT_PI_VARS.'][$key] = $this->cObj->cObjGetSingle($this->conf['_DEFAULT_PI_VARS.'][$key], $this->conf['_DEFAULT_PI_VARS.'][$key . '.']);
}
$this->piVars = t3lib_div::array_merge_recursive_overrule(
$this->conf['_DEFAULT_PI_VARS.'],
is_array($this->piVars) ? $this->piVars : array()
);
}
}
/**
* Overwrites pi_loadLL() to handle custom location of language files.
*
* Loads local-language values by looking for a "locallang" file in the
* plugin class directory ($this->scriptRelPath) and if found includes it.
* Also locallang values set in the TypoScript property "_LOCAL_LANG" are
* merged onto the values found in the "locallang" file.
* Supported file extensions xlf, xml, php
*
* @return void
*/
public function pi_loadLL() {
if (!$this->LOCAL_LANG_loaded && $this->scriptRelPath) {
list($languageFileName) = explode('/', $this->scriptRelPath);
$languageFileName = str_replace('Pi', 'Plugin', $languageFileName);
$basePath = 'EXT:' . $this->extKey . '/Resources/Private/Language/' . $languageFileName . '.xml';
// Read the strings in the required charset (since TYPO3 4.2)
$this->LOCAL_LANG = t3lib_div::readLLfile($basePath, $this->LLkey, $GLOBALS['TSFE']->renderCharset);
$alternativeLanguageKeys = t3lib_div::trimExplode(',', $this->altLLkey, TRUE);
foreach ($alternativeLanguageKeys as $languageKey) {
$tempLL = t3lib_div::readLLfile($basePath, $languageKey);
if ($this->LLkey !== 'default' && isset($tempLL[$languageKey])) {
$this->LOCAL_LANG[$languageKey] = $tempLL[$languageKey];
}
}
// Overlaying labels from TypoScript (including fictitious language keys for non-system languages!):
if (isset($this->conf['_LOCAL_LANG.'])) {
// Clear the "unset memory"
$this->LOCAL_LANG_UNSET = array();
foreach ($this->conf['_LOCAL_LANG.'] as $languageKey => $languageArray) {
// Remove the dot after the language key
$languageKey = substr($languageKey, 0, -1);
// Don't process label if the language is not loaded
if (is_array($languageArray) && isset($this->LOCAL_LANG[$languageKey])) {
foreach ($languageArray as $labelKey => $labelValue) {
if (!is_array($labelValue)) {
$this->LOCAL_LANG[$languageKey][$labelKey][0]['target'] = $labelValue;
if ($labelValue === '') {
$this->LOCAL_LANG_UNSET[$languageKey][$labelKey] = '';
}
$this->LOCAL_LANG_charset[$languageKey][$labelKey] = 'utf-8';
}
}
}
}
}
}
$this->LOCAL_LANG_loaded = 1;
}
/**
* Allows to override TypoScript settings with Flexform values.
*
*/
protected function overrideTyposcriptWithFlexformSettings() {}
/**
* Initializes the query from the GET query parameter.
*
*/
protected function initializeQuery() {
$this->rawUserQuery = t3lib_div::_GET('q');
}
/**
* Initializes the Solr connection and tests the connection through a ping.
*
*/
protected function initializeSearch() {
$solrConnection = t3lib_div::makeInstance('Tx_Solr_ConnectionManager')->getConnectionByPageId(
$GLOBALS['TSFE']->id,
$GLOBALS['TSFE']->sys_language_uid,
$GLOBALS['TSFE']->MP
);
$this->search = t3lib_div::makeInstance('Tx_Solr_Search', $solrConnection);
$this->solrAvailable = $this->search->ping();
}
/**
* Initializes the template engine and returns the initialized instance.
*
* @return Tx_Solr_Template
* @throws UnexpectedValueException if a view helper provider fails to implement interface Tx_Solr_ViewHelperProvider
*/
protected function initializeTemplateEngine() {
$templateFile = $this->getTemplateFile();
$subPart = $this->getSubpart();
$flexformTemplateFile = $this->pi_getFFvalue(
$this->cObj->data['pi_flexform'],
'templateFile',
'sOptions'
);
if (!empty($flexformTemplateFile)) {
$templateFile = $flexformTemplateFile;
}
/** @var Tx_Solr_Template $template */
$template = t3lib_div::makeInstance(
'Tx_Solr_Template',
$this->cObj,
$templateFile,
$subPart
);
$template->addViewHelperIncludePath($this->extKey, 'Classes/ViewHelper/');
$template->addViewHelper('LLL', array(
'languageFile' => $GLOBALS['PATH_solr'] .'Resources/Private/Language/' . str_replace('Pi', 'Plugin', $this->getPluginKey()) . '.xml',
'llKey' => $this->LLkey
));
// can be used for view helpers that need configuration during initialization
if (is_array($GLOBALS['TYPO3_CONF_VARS']['EXTCONF']['solr'][$this->getPluginKey()]['addViewHelpers'])) {
foreach ($GLOBALS['TYPO3_CONF_VARS']['EXTCONF']['solr'][$this->getPluginKey()]['addViewHelpers'] as $classReference) {
$viewHelperProvider = &t3lib_div::getUserObj($classReference);
if ($viewHelperProvider instanceof Tx_Solr_ViewHelperProvider) {
$viewHelpers = $viewHelperProvider->getViewHelpers();
foreach ($viewHelpers as $helperName => $helperObject) {
// TODO check whether $helperAdded is TRUE, throw an exception if not
$helperAdded = $template->addViewHelperObject($helperName, $helperObject);
}
} else {
throw new UnexpectedValueException(
get_class($viewHelperProvider) . ' must implement interface Tx_Solr_ViewHelperProvider',
1310387296
);
}
}
}
$template = $this->postInitializeTemplateEngine($template);
$this->template = $template;
}
/**
* Initializes the javascript manager.
*
*/
protected function initializeJavascriptManager() {
$this->javascriptManager = t3lib_div::makeInstance('Tx_Solr_JavascriptManager');
}
/**
* This method is called after initializing in the initialize method.
* Overwrite this method to do your own initialization.
*
* @return void
*/
protected function postInitialize() {}
/**
* Overwrite this method to do own initialisations of the template.
*
* @param Tx_Solr_Template $template Template
* @return Tx_Solr_Template
*/
protected function postInitializeTemplateEngine(Tx_Solr_Template $template) {
return $template;
}
// Rendering
/**
* This method executes the requested commands and applies the changes to
* the template.
*
* @param $actionResult
* @return string Rendered plugin content
*/
protected abstract function render($actionResult);
/**
* Renders a solr error.
*
* @return string A representation of the error that should be understandable for the user.
*/
protected function renderError() {
$this->template->workOnSubpart('solr_search_unavailable');
return $this->template->render();
}
/**
* Renders a solr exception.
*
* @return string A representation of the exception that should be understandable for the user.
*/
protected function renderException() {
$this->template->workOnSubpart('solr_search_error');
return $this->template->render();
}
/**
* Should be overwritten to do things before rendering.
*
*/
protected function preRender() {}
/**
* Overwrite this method to perform changes to the content after rendering.
*
* @param string $content The content rendered by the plugin so far
* @return string The content that should be presented on the website, might be different from the output rendered before
*/
protected function postRender($content) {
if (isset($this->conf['stdWrap.'])) {
$content = $this->cObj->stdWrap($content, $this->conf['stdWrap.']);
}
return $content;
}
// Helper methods
/**
* Determines the template file from the configuration.
*
* Overwrite this method to use a diffrent template.
*
* @return string The template file name to be used for the plugin
*/
protected function getTemplateFile() {
return $this->conf['templateFiles.'][$this->getTemplateFileKey()];
}
/**
* This method should be implemented to return the TSconfig key which
* contains the template name for this template.
*
* @see Tx_Solr_PluginBase_PluginBase#initializeTemplateEngine()
* @return string The TSconfig key containing the template name
*/
protected abstract function getTemplateFileKey();
/**
* Gets the plugin's template instance.
*
* @return Tx_Solr_Template The plugin's template.
*/
public function getTemplate() {
return $this->template;
}
/**
* Gets the plugin's javascript manager.
*
* @return Tx_Solr_JavascriptManager The plugin's javascript manager.
*/
public function getJavascriptManager() {
return $this->javascriptManager;
}
/**
* Should return the relevant subpart of the template.
*
* @see Tx_Solr_PluginBase_PluginBase#initializeTemplateEngine()
* @return string The subpart of the template to be used
*/
protected abstract function getSubpart();
/**
* This method should return the plugin key. Reads some configuration
* options in initializeTemplateEngine()
*
* @see Tx_Solr_pluginBase_PluginBase#initializeTemplateEngine()
* @return string The plugin key
*/
protected abstract function getPluginKey();
/**
* Gets the target page Id for links. Might have been set through either
* flexform or TypoScript. If none is set, TSFE->id is used.
*
* @return integer The page Id to be used for links
*/
public function getLinkTargetPageId() {
return $this->conf['search.']['targetPage'];
}
/**
* Gets the Tx_Solr_Search instance used for the query. Mainly used as a
* helper function for result document modifiers.
*
* @return Tx_Solr_Search
*/
public function getSearch() {
return $this->search;
}
/**
* Sets the Tx_Solr_Search instance used for the query. Mainly used as a
* helper function for result document modifiers.
*
* @param Tx_Solr_Search $search Search instance
*/
public function setSearch(Tx_Solr_Search $search) {
$this->search = $search;
}
/**
* Gets the user's query term and cleans it so that it can be used in
* templates f.e.
*
* @return string The cleaned user query.
*/
public function getCleanUserQuery() {
$userQuery = $this->getRawUserQuery();
if (!is_null($userQuery)) {
$userQuery = Tx_Solr_Query::cleanKeywords($userQuery);
}
// escape triple hashes as they are used in the template engine
// TODO remove after switching to fluid templates
$userQuery = Tx_Solr_Template::escapeMarkers($userQuery);
return $userQuery;
}
/**
* Gets the raw user query
*
* @return string Raw user query.
*/
public function getRawUserQuery() {
return $this->rawUserQuery;
}
}
if (defined('TYPO3_MODE') && $GLOBALS['TYPO3_CONF_VARS'][TYPO3_MODE]['XCLASS']['ext/solr/Classes/pluginbase/PluginBase.php']) {
include_once($GLOBALS['TYPO3_CONF_VARS'][TYPO3_MODE]['XCLASS']['ext/solr/Classes/pluginbase/PluginBase.php']);
}
?> | 1 | 5,455 | why the order change? Also for the next chunk. (I understand the result will be the same though) | TYPO3-Solr-ext-solr | php |
@@ -42,7 +42,9 @@ type FuncDef struct {
// Not part of the grammar. Used to indicate internal targets that can only
// be called using keyword arguments.
KeywordsOnly bool
- IsPrivate bool
+ // allowed return types of the FuncDef
+ Returns []string
+ IsPrivate bool
}
// A ForStatement implements the 'for' statement. | 1 | package asp
// A FileInput is the top-level structure of a BUILD file.
type FileInput struct {
Statements []*Statement `{ @@ } EOF`
}
// A Statement is the type we work with externally the most; it's a single Python statement.
// Note that some mildly excessive fiddling is needed since the parser we're using doesn't
// support backoff (i.e. if an earlier entry matches to its completion but can't consume
// following tokens, it doesn't then make another choice :( )
type Statement struct {
Pos Position
EndPos Position
FuncDef *FuncDef `| @@`
For *ForStatement `| @@`
If *IfStatement `| @@`
Return *ReturnStatement `| "return" @@ EOL`
Raise *Expression `| "raise" @@ EOL`
Assert *struct {
Expr *Expression `@@`
Message string `["," @String]`
} `| "assert" @@ EOL`
Ident *IdentStatement `| @@ EOL`
Literal *Expression `| @@ EOL)`
Pass bool `( @"pass" EOL`
Continue bool `| @"continue" EOL`
}
// A ReturnStatement implements the Python 'return' statement.
type ReturnStatement struct {
Values []*Expression `[ @@ { "," @@ } ]`
}
// A FuncDef implements definition of a new function.
type FuncDef struct {
Name string `"def" @Ident`
Arguments []Argument `"(" [ @@ { "," @@ } ] ")" Colon EOL`
Docstring string `[ @String EOL ]`
Statements []*Statement `{ @@ } Unindent`
EoDef Position
// Not part of the grammar. Used to indicate internal targets that can only
// be called using keyword arguments.
KeywordsOnly bool
IsPrivate bool
}
// A ForStatement implements the 'for' statement.
// Note that it does not support Python's "for-else" construction.
type ForStatement struct {
Names []string `"for" @Ident [ { "," @Ident } ] "in"`
Expr Expression `@@ Colon EOL`
Statements []*Statement `{ @@ } Unindent`
}
// An IfStatement implements the if-elif-else statement.
type IfStatement struct {
Condition Expression `"if" @@ Colon EOL`
Statements []*Statement `{ @@ } Unindent`
Elif []struct {
Condition Expression `"elif" @@ Colon EOL`
Statements []*Statement `{ @@ } Unindent`
} `{ @@ }`
ElseStatements []*Statement `[ "else" Colon EOL { @@ } Unindent ]`
}
// An Argument represents an argument to a function definition.
type Argument struct {
Name string `@Ident`
Type []string `[ ":" @( { ( "bool" | "str" | "int" | "list" | "dict" | "function" ) [ "|" ] } ) ]`
// Aliases are an experimental non-Python concept where function arguments can be aliased to different names.
// We use this to support compatibility with Bazel & Buck etc in some cases.
Aliases []string `[ "&" ( { @Ident [ "&" ] } ) ]`
Value *Expression `[ "=" @@ ]`
IsPrivate bool
}
// An Expression is a generalised Python expression, i.e. anything that can appear where an
// expression is allowed (including the extra parts like inline if-then-else, operators, etc).
type Expression struct {
Pos Position
EndPos Position
UnaryOp *UnaryOp `( @@`
Val *ValueExpression `| @@ )`
Op []OpExpression `{ @@ }`
If *InlineIf `[ @@ ]`
// For internal optimisation - do not use outside this package.
Optimised *OptimisedExpression
}
// An OptimisedExpression contains information to optimise certain aspects of execution of
// an expression. It must be public for serialisation but shouldn't be used outside this package.
type OptimisedExpression struct {
// Used to optimise constant expressions.
Constant pyObject
// Similarly applied to optimise simple lookups of local variables.
Local string
// And similarly applied to optimise lookups into configuration.
Config string
}
// An OpExpression is a operator combined with its following expression.
type OpExpression struct {
Op Operator `@("+" | "-" | "%" | "<" | ">" | "and" | "or" | "is" | "in" | "not" "in" | "==" | "!=" | ">=" | "<=")`
Expr *Expression `@@`
}
// A ValueExpression is the value part of an expression, i.e. without surrounding operators.
type ValueExpression struct {
String string `( @String`
FString *FString `| @FString`
Int *struct {
Int int `@Int`
} `| @@` // Should just be *int, but https://github.com/golang/go/issues/23498 :(
Bool string `| @( "True" | "False" | "None" )`
List *List `| "[" @@ "]"`
Dict *Dict `| "{" @@ "}"`
Tuple *List `| "(" @@ ")"`
Lambda *Lambda `| "lambda" @@`
Ident *IdentExpr `| @@ )`
Slice *Slice `[ @@ ]`
Property *IdentExpr `[ ( "." @@`
Call *Call `| "(" @@ ")" ) ]`
}
// A FString represents a minimal version of a Python literal format string.
// Note that we only support a very small subset of what Python allows there; essentially only
// variable substitution, which gives a much simpler AST structure here.
type FString struct {
Vars []struct {
Prefix string // Preceding string bit
Var string // Variable name to interpolate
Config string // Config variable to look up
}
Suffix string // Following string bit
}
// A UnaryOp represents a unary operation - in our case the only ones we support are negation and not.
type UnaryOp struct {
Op string `@( "-" | "not" )`
Expr ValueExpression `@@`
}
// An IdentStatement implements a statement that begins with an identifier (i.e. anything that
// starts off with a variable name). It is a little fiddly due to parser limitations.
type IdentStatement struct {
Name string `@Ident`
Unpack *struct {
Names []string `@Ident { "," @Ident }`
Expr *Expression `"=" @@`
} `( "," @@ `
Index *struct {
Expr *Expression `@@ "]"`
Assign *Expression `( "=" @@`
AugAssign *Expression `| "+=" @@ )`
} `| "[" @@`
Action *IdentStatementAction `| @@ )`
}
// An IdentStatementAction implements actions on an IdentStatement.
type IdentStatementAction struct {
Property *IdentExpr ` "." @@`
Call *Call `| "(" @@ ")"`
Assign *Expression `| "=" @@`
AugAssign *Expression `| "+=" @@`
}
// An IdentExpr implements parts of an expression that begin with an identifier (i.e. anything
// that might be a variable name).
type IdentExpr struct {
Pos Position
EndPos Position
Name string `@Ident`
Action []struct {
Property *IdentExpr ` "." @@`
Call *Call `| "(" @@ ")"`
} `{ @@ }`
}
// A Call represents a call site of a function.
type Call struct {
Arguments []CallArgument `[ @@ ] { "," [ @@ ] }`
}
// A CallArgument represents a single argument at a call site of a function.
type CallArgument struct {
Pos Position
Name string `[ @@ "=" ]`
Value Expression `@@`
}
// A List represents a list literal, either with or without a comprehension clause.
type List struct {
Values []*Expression `[ @@ ] { "," [ @@ ] }`
Comprehension *Comprehension `[ @@ ]`
}
// A Dict represents a dict literal, either with or without a comprehension clause.
type Dict struct {
Items []*DictItem `[ @@ ] { "," [ @@ ] }`
Comprehension *Comprehension `[ @@ ]`
}
// A DictItem represents a single key-value pair in a dict literal.
type DictItem struct {
Key Expression `@( Ident | String ) ":"`
Value Expression `@@`
}
// A Slice represents a slice or index expression (e.g. [1], [1:2], [2:], [:], etc).
type Slice struct {
Start *Expression `"[" [ @@ ]`
Colon string `[ @":" ]`
End *Expression `[ @@ ] "]"`
}
// An InlineIf implements the single-line if-then-else construction
type InlineIf struct {
Condition *Expression `"if" @@`
Else *Expression `[ "else" @@ ]`
}
// A Comprehension represents a list or dict comprehension clause.
type Comprehension struct {
Names []string `"for" @Ident [ { "," @Ident } ] "in"`
Expr *Expression `@@`
Second *struct {
Names []string `"for" @Ident [ { "," @Ident } ] "in"`
Expr *Expression `@@`
} `[ @@ ]`
If *Expression `[ "if" @@ ]`
}
// A Lambda is the inline lambda function.
type Lambda struct {
Arguments []Argument `[ @@ { "," @@ } ] Colon`
Expr Expression `@@`
}
// An Operator defines a unary or binary operator.
type Operator rune
const (
// Add etc are arithmetic operators - these are implemented on a per-type basis
Add Operator = '+'
// Subtract implements binary - (only works on integers)
Subtract = '-'
// Modulo implements % (including string interpolation)
Modulo = '%'
// LessThan implements <
LessThan = '<'
// GreaterThan implements >
GreaterThan = '>'
// LessThanOrEqual implements <=
LessThanOrEqual = '≤'
// GreaterThanOrEqual implements >=
GreaterThanOrEqual = '≥'
// Equal etc are comparison operators - also on a per-type basis but have slightly different rules.
Equal = '='
// NotEqual implements !=
NotEqual = '≠'
// In implements the in operator
In = '∈'
// NotIn implements "not in" as a single operator.
NotIn = '∉'
// And etc are logical operators - these are implemented type-independently
And Operator = '&'
// Or implements the or operator
Or = '|'
// Is implements type identity.
Is = '≡'
// Index is used in the parser, but not when parsing code.
Index = '['
)
// String implements the fmt.Stringer interface. It is not especially efficient and is
// normally only used for errors & debugging.
func (o Operator) String() string {
for k, v := range operators {
if o == v {
return k
}
}
return "unknown"
}
var operators = map[string]Operator{
"+": Add,
"-": Subtract,
"%": Modulo,
"<": LessThan,
">": GreaterThan,
"and": And,
"or": Or,
"is": Is,
"in": In,
"not in": NotIn,
"==": Equal,
"!=": NotEqual,
">=": GreaterThanOrEqual,
"<=": LessThanOrEqual,
}
| 1 | 8,584 | I wonder if this should just be a single `string`? We don't have any cases where we return different types and it would make the parser simpler to just read a single identifier. | thought-machine-please | go |
@@ -125,7 +125,7 @@ abstract class BaseDataReader<T> implements Closeable {
protected InputFile getInputFile(FileScanTask task) {
Preconditions.checkArgument(!task.isDataTask(), "Invalid task type");
- return inputFiles.get(task.file().path().toString());
+ return getInputFile(task.file().path().toString());
}
protected InputFile getInputFile(String location) { | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.spark.source;
import java.io.Closeable;
import java.io.IOException;
import java.math.BigDecimal;
import java.nio.ByteBuffer;
import java.util.Collections;
import java.util.Iterator;
import java.util.Map;
import java.util.stream.Stream;
import org.apache.avro.generic.GenericData;
import org.apache.avro.util.Utf8;
import org.apache.iceberg.CombinedScanTask;
import org.apache.iceberg.FileScanTask;
import org.apache.iceberg.encryption.EncryptedFiles;
import org.apache.iceberg.encryption.EncryptedInputFile;
import org.apache.iceberg.encryption.EncryptionManager;
import org.apache.iceberg.io.CloseableIterator;
import org.apache.iceberg.io.FileIO;
import org.apache.iceberg.io.InputFile;
import org.apache.iceberg.relocated.com.google.common.base.Preconditions;
import org.apache.iceberg.relocated.com.google.common.collect.Maps;
import org.apache.iceberg.types.Type;
import org.apache.iceberg.util.ByteBuffers;
import org.apache.spark.rdd.InputFileBlockHolder;
import org.apache.spark.sql.types.Decimal;
import org.apache.spark.unsafe.types.UTF8String;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Base class of Spark readers.
*
* @param <T> is the Java class returned by this reader whose objects contain one or more rows.
*/
abstract class BaseDataReader<T> implements Closeable {
private static final Logger LOG = LoggerFactory.getLogger(BaseDataReader.class);
private final Iterator<FileScanTask> tasks;
private final Map<String, InputFile> inputFiles;
private CloseableIterator<T> currentIterator;
private T current = null;
private FileScanTask currentTask = null;
BaseDataReader(CombinedScanTask task, FileIO io, EncryptionManager encryptionManager) {
this.tasks = task.files().iterator();
Map<String, ByteBuffer> keyMetadata = Maps.newHashMap();
task.files().stream()
.flatMap(fileScanTask -> Stream.concat(Stream.of(fileScanTask.file()), fileScanTask.deletes().stream()))
.forEach(file -> keyMetadata.put(file.path().toString(), file.keyMetadata()));
Stream<EncryptedInputFile> encrypted = keyMetadata.entrySet().stream()
.map(entry -> EncryptedFiles.encryptedInput(io.newInputFile(entry.getKey()), entry.getValue()));
// decrypt with the batch call to avoid multiple RPCs to a key server, if possible
Iterable<InputFile> decryptedFiles = encryptionManager.decrypt(encrypted::iterator);
Map<String, InputFile> files = Maps.newHashMapWithExpectedSize(task.files().size());
decryptedFiles.forEach(decrypted -> files.putIfAbsent(decrypted.location(), decrypted));
this.inputFiles = Collections.unmodifiableMap(files);
this.currentIterator = CloseableIterator.empty();
}
public boolean next() throws IOException {
try {
while (true) {
if (currentIterator.hasNext()) {
this.current = currentIterator.next();
return true;
} else if (tasks.hasNext()) {
this.currentIterator.close();
this.currentTask = tasks.next();
this.currentIterator = open(currentTask);
} else {
this.currentIterator.close();
return false;
}
}
} catch (IOException | RuntimeException e) {
if (currentTask != null && !currentTask.isDataTask()) {
LOG.error("Error reading file: {}", getInputFile(currentTask).location(), e);
}
throw e;
}
}
public T get() {
return current;
}
abstract CloseableIterator<T> open(FileScanTask task);
@Override
public void close() throws IOException {
InputFileBlockHolder.unset();
// close the current iterator
this.currentIterator.close();
// exhaust the task iterator
while (tasks.hasNext()) {
tasks.next();
}
}
protected InputFile getInputFile(FileScanTask task) {
Preconditions.checkArgument(!task.isDataTask(), "Invalid task type");
return inputFiles.get(task.file().path().toString());
}
protected InputFile getInputFile(String location) {
return inputFiles.get(location);
}
protected static Object convertConstant(Type type, Object value) {
if (value == null) {
return null;
}
switch (type.typeId()) {
case DECIMAL:
return Decimal.apply((BigDecimal) value);
case STRING:
if (value instanceof Utf8) {
Utf8 utf8 = (Utf8) value;
return UTF8String.fromBytes(utf8.getBytes(), 0, utf8.getByteLength());
}
return UTF8String.fromString(value.toString());
case FIXED:
if (value instanceof byte[]) {
return value;
} else if (value instanceof GenericData.Fixed) {
return ((GenericData.Fixed) value).bytes();
}
return ByteBuffers.toByteArray((ByteBuffer) value);
case BINARY:
return ByteBuffers.toByteArray((ByteBuffer) value);
default:
}
return value;
}
}
| 1 | 32,844 | I don't think this needs to change. Can you go back to the previous implementation? | apache-iceberg | java |
@@ -32,7 +32,7 @@ from jinja2 import PackageLoader
import molecule.utilities as utilities
import molecule.validators as validators
-from provisioners import Ansible
+from molecule.core import Molecule
class Commands(object): | 1 | # Copyright (c) 2015 Cisco Systems
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
import pexpect
import signal
import sys
from subprocess import CalledProcessError
import prettytable
import sh
from colorama import Fore
from jinja2 import Environment
from jinja2 import PackageLoader
import molecule.utilities as utilities
import molecule.validators as validators
from provisioners import Ansible
class Commands(object):
def __init__(self, args):
self.args = args
def main(self):
self.molecule = Ansible(self.args)
self.molecule.main()
if self.molecule._provider in ['virtualbox', 'openstack', None]:
self.commands = BaseCommands(self.molecule)
if self.molecule._provider is 'metal':
self.commands = MetalCommands(self.molecule)
def destroy(self):
self.commands.destroy()
def create(self):
self.commands.create()
def converge(self):
self.commands.converge()
def idempotence(self):
self.commands.idempotence()
def verify(self):
self.commands.verify()
def test(self):
self.commands.test()
def list(self):
self.commands.list()
def status(self):
self.commands.status()
def login(self):
self.commands.login()
def init(self):
self.commands.init()
class BaseCommands(object):
def __init__(self, molecule):
self.molecule = molecule
def create(self):
"""
Creates all instances defined in molecule.yml.
Creates all template files used by molecule, vagrant, ansible-playbook.
:return: None
"""
self.molecule._create_templates()
if not self.molecule._created:
try:
self.molecule._vagrant.up(no_provision=True)
self.molecule._created = True
except CalledProcessError as e:
print('ERROR: {}'.format(e))
sys.exit(e.returncode)
def destroy(self):
"""
Halts and destroys all instances created by molecule.
Removes template files.
Clears state file of all info (default platform).
:return: None
"""
self.molecule._create_templates()
try:
self.molecule._vagrant.halt()
self.molecule._vagrant.destroy()
self.molecule._set_default_platform(platform=False)
except CalledProcessError as e:
print('ERROR: {}'.format(e))
sys.exit(e.returncode)
self.molecule._remove_templates()
def converge(self, idempotent=False):
"""
Provisions all instances using ansible-playbook.
:param idempotent: Optionally provision servers quietly so output can be parsed for idempotence
:return: Provisioning output if idempotent=True, otherwise return code of underlying call to ansible-playbook
"""
if not idempotent:
self.create()
self.molecule._create_inventory_file()
playbook, args, kwargs = self.molecule._create_playbook_args()
if idempotent:
kwargs.pop('_out', None)
kwargs.pop('_err', None)
kwargs['_env']['ANSIBLE_NOCOLOR'] = 'true'
kwargs['_env']['ANSIBLE_FORCE_COLOR'] = 'false'
try:
output = sh.ansible_playbook(playbook, *args, **kwargs)
return output
except sh.ErrorReturnCode as e:
print('ERROR: {}'.format(e))
sys.exit(e.exit_code)
try:
output = sh.ansible_playbook(playbook, *args, **kwargs)
return output.exit_code
except sh.ErrorReturnCode as e:
print('ERROR: {}'.format(e))
sys.exit(e.exit_code)
def idempotence(self):
"""
Provisions instances and parses output to determine idempotence
:return: None
"""
print('{}Idempotence test in progress...{}'.format(Fore.CYAN, Fore.RESET)),
output = self.converge(idempotent=True)
idempotent = self.molecule._parse_provisioning_output(output.stdout)
if idempotent:
print('{}OKAY{}'.format(Fore.GREEN, Fore.RESET))
return
print('{}FAILED{}'.format(Fore.RED, Fore.RESET))
sys.exit(1)
def verify(self):
"""
Performs verification steps on running instances, including:
* Checks files for trailing whitespace and newlines
* Runs testinfra against instances
* Runs serverspec against instances (also calls rubocop on spec files)
:return: None if no tests are found, otherwise return code of underlying command
"""
validators.check_trailing_cruft(ignore_paths=self.molecule._config.config['molecule']['ignore_paths'])
# no tests found
if not os.path.isdir(self.molecule._config.config['molecule']['serverspec_dir']) and not os.path.isdir(
self.molecule._config.config['molecule'][
'testinfra_dir']):
msg = '{}Skipping tests, could not find {}/ or {}/.{}'
print(msg.format(Fore.YELLOW, self.molecule._config.config['molecule']['serverspec_dir'],
self.molecule._config.config[
'molecule']['testinfra_dir'], Fore.RESET))
return
self.molecule._write_ssh_config()
kwargs = {'_env': self.molecule._env, '_out': utilities.print_stdout, '_err': utilities.print_stderr}
args = []
# testinfra
if os.path.isdir(self.molecule._config.config['molecule']['testinfra_dir']):
try:
ti_args = [
'--sudo', '--connection=ansible',
'--ansible-inventory=' + self.molecule._config.config['ansible']['inventory_file']
]
output = sh.testinfra(*ti_args, **kwargs)
return output.exit_code
except sh.ErrorReturnCode as e:
print('ERROR: {}'.format(e))
sys.exit(e.exit_code)
# serverspec
if os.path.isdir(self.molecule._config.config['molecule']['serverspec_dir']):
self.molecule._rubocop()
if 'rakefile_file' in self.molecule._config.config['molecule']:
kwargs['rakefile'] = self.molecule._config.config['molecule']['rakefile_file']
if self.molecule._args['--debug']:
args.append('--trace')
try:
rakecmd = sh.Command("rake")
output = rakecmd(*args, **kwargs)
return output.exit_code
except sh.ErrorReturnCode as e:
print('ERROR: {}'.format(e))
sys.exit(e.exit_code)
def test(self):
"""
Runs a series of commands (defined in config) against instances for a full test/verify run
:return: None
"""
for task in self.molecule._config.config['molecule']['test']['sequence']:
m = getattr(self, task)
m()
def list(self):
"""
Prints a list of currently available platforms
:return: None
"""
print
self.molecule._print_valid_platforms()
def status(self):
"""
Prints status of currently converged instances, similar to `vagrant status`
:return: Return code of underlying command if there's an exception, otherwise None
"""
if not os.path.isfile(self.molecule._config.config['molecule']['vagrantfile_file']):
errmsg = '{}ERROR: No instances created. Try `{} create` first.{}'
print(errmsg.format(Fore.RED, os.path.basename(sys.argv[0]), Fore.RESET))
sys.exit(1)
try:
status = self.molecule._vagrant.status()
except CalledProcessError as e:
print('ERROR: {}'.format(e))
return e.returncode
x = prettytable.PrettyTable(['Name', 'State', 'Provider'])
x.align = 'l'
for item in status:
if item.state != 'not_created':
state = Fore.GREEN + item.state + Fore.RESET
else:
state = item.state
x.add_row([item.name, state, item.provider])
print(x)
print
self.molecule._print_valid_platforms()
def login(self):
"""
Initiates an interactive ssh session with a given instance name.
:return: None
"""
# make sure vagrant knows about this host
try:
conf = self.molecule._vagrant.conf(vm_name=self.molecule._args['<host>'])
ssh_args = [conf['HostName'], conf['User'], conf['Port'], conf['IdentityFile'],
' '.join(self.molecule._config.config['molecule']['raw_ssh_args'])]
ssh_cmd = 'ssh {} -l {} -p {} -i {} {}'
except CalledProcessError:
# gets appended to python-vagrant's error message
conf_format = [Fore.RED, self.molecule._args['<host>'], Fore.YELLOW, Fore.RESET]
conf_errmsg = '\n{0}Unknown host {1}. Try {2}molecule status{0} to see available hosts.{3}'
print(conf_errmsg.format(*conf_format))
sys.exit(1)
lines, columns = os.popen('stty size', 'r').read().split()
dimensions = (int(lines), int(columns))
self.molecule._pt = pexpect.spawn('/usr/bin/env ' + ssh_cmd.format(*ssh_args), dimensions=dimensions)
signal.signal(signal.SIGWINCH, self.molecule._sigwinch_passthrough)
self.molecule._pt.interact()
def init(self):
"""
Creates the scaffolding for a new role intended for use with molecule
:return: None
"""
role = self.molecule._args['<role>']
role_path = './' + role + '/'
if not role:
msg = '{}The init command requires a role name. Try:\n\n{}{} init <role>{}'
print(msg.format(Fore.RED, Fore.YELLOW, os.path.basename(sys.argv[0]), Fore.RESET))
sys.exit(1)
if os.path.isdir(role):
msg = '{}The directory {} already exists. Cannot create new role.{}'
print(msg.format(Fore.RED, role_path, Fore.RESET))
sys.exit(1)
try:
sh.ansible_galaxy('init', role)
except (CalledProcessError, sh.ErrorReturnCode_1) as e:
print('ERROR: {}'.format(e))
sys.exit(e.returncode)
env = Environment(loader=PackageLoader('molecule', 'templates'), keep_trailing_newline=True)
t_molecule = env.get_template(self.molecule._config.config['molecule']['init']['templates']['molecule'])
t_playbook = env.get_template(self.molecule._config.config['molecule']['init']['templates']['playbook'])
t_default_spec = env.get_template(self.molecule._config.config['molecule']['init']['templates']['default_spec'])
t_spec_helper = env.get_template(self.molecule._config.config['molecule']['init']['templates']['spec_helper'])
with open(role_path + self.molecule._config.config['molecule']['molecule_file'], 'w') as f:
f.write(t_molecule.render(config=self.molecule._config.config))
with open(role_path + self.molecule._config.config['ansible']['playbook'], 'w') as f:
f.write(t_playbook.render(role=role))
serverspec_path = role_path + self.molecule._config.config['molecule']['serverspec_dir'] + '/'
os.makedirs(serverspec_path)
os.makedirs(serverspec_path + 'hosts')
os.makedirs(serverspec_path + 'groups')
with open(serverspec_path + 'default_spec.rb', 'w') as f:
f.write(t_default_spec.render())
with open(serverspec_path + 'spec_helper.rb', 'w') as f:
f.write(t_spec_helper.render())
msg = '{}Successfully initialized new role in {}{}'
print(msg.format(Fore.GREEN, role_path, Fore.RESET))
sys.exit(0)
class MetalCommands(BaseCommands):
def __init__(self, molecule):
super(self.__class__, self).__init__(molecule)
| 1 | 5,759 | Bump it 2 lines higher, 'c' > 'u'. | ansible-community-molecule | py |
@@ -0,0 +1,11 @@
+/**
+ * Copyright @ 2018-present. All rights reserved by Vesoft Inc.
+ * Author: Fenglin Hou <[email protected]>
+ */
+#include "parser/AstTypes.h"
+
+namespace vesoft {
+
+
+
+} | 1 | 1 | 13,920 | Please do not add author here | vesoft-inc-nebula | cpp |
|
@@ -4,6 +4,7 @@ import (
"database/sql"
"encoding/hex"
+ "github.com/iotexproject/iotex-core/action"
"github.com/iotexproject/iotex-core/blockchain"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/db/rds" | 1 | package indexservice
import (
"database/sql"
"encoding/hex"
"github.com/iotexproject/iotex-core/blockchain"
"github.com/iotexproject/iotex-core/config"
"github.com/iotexproject/iotex-core/db/rds"
"github.com/iotexproject/iotex-core/pkg/hash"
"github.com/pkg/errors"
)
type (
// TransferHistory defines the schema of "transfer history" table
TransferHistory struct {
NodeAddress string
UserAddress string
TrasferHash string
}
// TransferToBlock defines the schema of "transfer hash to block hash" table
TransferToBlock struct {
NodeAddress string
TrasferHash string
BlockHash string
}
// VoteHistory defines the schema of "vote history" table
VoteHistory struct {
NodeAddress string
UserAddress string
VoteHash string
}
// VoteToBlock defines the schema of "vote hash to block hash" table
VoteToBlock struct {
NodeAddress string
VoteHash string
BlockHash string
}
// ExecutionHistory defines the schema of "execution history" table
ExecutionHistory struct {
NodeAddress string
UserAddress string
ExecutionHash string
}
// ExecutionToBlock defines the schema of "execution hash to block hash" table
ExecutionToBlock struct {
NodeAddress string
ExecutionHash string
BlockHash string
}
)
// Indexer handle the index build for blocks
type Indexer struct {
cfg config.Indexer
rds rds.Store
hexEncodedNodeAddr string
}
var (
// ErrNotExist indicates certain item does not exist in Blockchain database
ErrNotExist = errors.New("not exist in DB")
// ErrAlreadyExist indicates certain item already exists in Blockchain database
ErrAlreadyExist = errors.New("already exist in DB")
)
// BuildIndex build the index for a block
func (idx *Indexer) BuildIndex(blk *blockchain.Block) error {
idx.rds.Transact(func(tx *sql.Tx) error {
// log transfer to transfer history table
if err := idx.UpdateTransferHistory(blk, tx); err != nil {
return errors.Wrapf(err, "failed to update transfer to transfer history table")
}
// map transfer to block
if err := idx.UpdateTransferToBlock(blk, tx); err != nil {
return errors.Wrapf(err, "failed to update transfer to block")
}
// log vote to vote history table
if err := idx.UpdateVoteHistory(blk, tx); err != nil {
return errors.Wrapf(err, "failed to update vote to vote history table")
}
// map vote to block
if err := idx.UpdateVoteToBlock(blk, tx); err != nil {
return errors.Wrapf(err, "failed to update vote to block")
}
// log execution to execution history table
if err := idx.UpdateExecutionHistory(blk, tx); err != nil {
return errors.Wrapf(err, "failed to update execution to execution history table")
}
// map execution to block
if err := idx.UpdateExecutionToBlock(blk, tx); err != nil {
return errors.Wrapf(err, "failed to update execution to block")
}
return nil
})
return nil
}
// UpdateTransferHistory stores transfer information into transfer history table
func (idx *Indexer) UpdateTransferHistory(blk *blockchain.Block, tx *sql.Tx) error {
insertQuery := "INSERT transfer_history SET node_address=?,user_address=?,transfer_hash=?"
for _, transfer := range blk.Transfers {
transferHash := transfer.Hash()
// put new transfer for sender
senderAddr := transfer.Sender()
if _, err := tx.Exec(insertQuery, idx.hexEncodedNodeAddr, senderAddr, transferHash[:]); err != nil {
return err
}
// put new transfer for recipient
receiverAddr := transfer.Recipient()
if _, err := tx.Exec(insertQuery, idx.hexEncodedNodeAddr, receiverAddr, transferHash[:]); err != nil {
return err
}
}
return nil
}
// GetTransferHistory get transfer history
func (idx *Indexer) GetTransferHistory(userAddr string) ([]hash.Hash32B, error) {
getQuery := "SELECT * FROM transfer_history WHERE node_address=? AND user_address=?"
db := idx.rds.GetDB()
stmt, err := db.Prepare(getQuery)
if err != nil {
return nil, errors.Wrapf(err, "failed to prepare get query")
}
rows, err := stmt.Query(idx.hexEncodedNodeAddr, userAddr)
if err != nil {
return nil, errors.Wrapf(err, "failed to execute get query")
}
var transferHistory TransferHistory
parsedRows, err := rds.ParseRows(rows, &transferHistory)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse results")
}
var transferHashes []hash.Hash32B
for _, parsedRow := range parsedRows {
var hash hash.Hash32B
copy(hash[:], parsedRow.(*TransferHistory).TrasferHash)
transferHashes = append(transferHashes, hash)
}
return transferHashes, nil
}
// UpdateTransferToBlock map transfer hash to block hash
func (idx *Indexer) UpdateTransferToBlock(blk *blockchain.Block, tx *sql.Tx) error {
blockHash := blk.HashBlock()
insertQuery := "INSERT transfer_to_block SET node_address=?,transfer_hash=?,block_hash=?"
for _, transfer := range blk.Transfers {
transferHash := transfer.Hash()
if _, err := tx.Exec(insertQuery, idx.hexEncodedNodeAddr, hex.EncodeToString(transferHash[:]), blockHash[:]); err != nil {
return err
}
}
return nil
}
// GetBlockByTransfer return block hash by transfer hash
func (idx *Indexer) GetBlockByTransfer(transferHash hash.Hash32B) (hash.Hash32B, error) {
getQuery := "SELECT * FROM transfer_to_block WHERE node_address=? AND transfer_hash=?"
db := idx.rds.GetDB()
stmt, err := db.Prepare(getQuery)
if err != nil {
return hash.ZeroHash32B, errors.Wrapf(err, "failed to prepare get query")
}
rows, err := stmt.Query(idx.hexEncodedNodeAddr, hex.EncodeToString(transferHash[:]))
if err != nil {
return hash.ZeroHash32B, errors.Wrapf(err, "failed to execute get query")
}
var transferToBlock TransferToBlock
parsedRows, err := rds.ParseRows(rows, &transferToBlock)
if err != nil {
return hash.ZeroHash32B, errors.Wrapf(err, "failed to parse results")
}
if len(parsedRows) == 0 {
return hash.ZeroHash32B, ErrNotExist
}
var hash hash.Hash32B
copy(hash[:], parsedRows[0].(*TransferToBlock).BlockHash)
return hash, nil
}
// UpdateVoteHistory stores vote information into vote history table
func (idx *Indexer) UpdateVoteHistory(blk *blockchain.Block, tx *sql.Tx) error {
insertQuery := "INSERT vote_history SET node_address=?,user_address=?,vote_hash=?"
for _, vote := range blk.Votes {
voteHash := vote.Hash()
// put new vote for sender
senderAddr := vote.Voter()
if _, err := tx.Exec(insertQuery, idx.hexEncodedNodeAddr, senderAddr, voteHash[:]); err != nil {
return err
}
// put new vote for recipient
recipientAddr := vote.Votee()
if _, err := tx.Exec(insertQuery, idx.hexEncodedNodeAddr, recipientAddr, voteHash[:]); err != nil {
return err
}
}
return nil
}
// GetVoteHistory get vote history
func (idx *Indexer) GetVoteHistory(userAddr string) ([]hash.Hash32B, error) {
getQuery := "SELECT * FROM vote_history WHERE node_address=? AND user_address=?"
db := idx.rds.GetDB()
stmt, err := db.Prepare(getQuery)
if err != nil {
return nil, errors.Wrapf(err, "failed to prepare get query")
}
rows, err := stmt.Query(idx.hexEncodedNodeAddr, userAddr)
if err != nil {
return nil, errors.Wrapf(err, "failed to execute get query")
}
var voteHistory VoteHistory
parsedRows, err := rds.ParseRows(rows, &voteHistory)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse results")
}
var voteHashes []hash.Hash32B
for _, parsedRow := range parsedRows {
var hash hash.Hash32B
copy(hash[:], parsedRow.(*VoteHistory).VoteHash)
voteHashes = append(voteHashes, hash)
}
return voteHashes, nil
}
// UpdateVoteToBlock map vote hash to block hash
func (idx *Indexer) UpdateVoteToBlock(blk *blockchain.Block, tx *sql.Tx) error {
blockHash := blk.HashBlock()
insertQuery := "INSERT vote_to_block SET node_address=?,vote_hash=?,block_hash=?"
for _, vote := range blk.Votes {
voteHash := vote.Hash()
if _, err := tx.Exec(insertQuery, idx.hexEncodedNodeAddr, hex.EncodeToString(voteHash[:]), blockHash[:]); err != nil {
return err
}
}
return nil
}
// GetBlockByVote return block hash by vote hash
func (idx *Indexer) GetBlockByVote(voteHash hash.Hash32B) (hash.Hash32B, error) {
getQuery := "SELECT * FROM vote_to_block WHERE node_address=? AND vote_hash=?"
db := idx.rds.GetDB()
stmt, err := db.Prepare(getQuery)
if err != nil {
return hash.ZeroHash32B, errors.Wrapf(err, "failed to prepare get query")
}
rows, err := stmt.Query(idx.hexEncodedNodeAddr, hex.EncodeToString(voteHash[:]))
if err != nil {
return hash.ZeroHash32B, errors.Wrapf(err, "failed to execute get query")
}
var voteToBlock VoteToBlock
parsedRows, err := rds.ParseRows(rows, &voteToBlock)
if err != nil {
return hash.ZeroHash32B, errors.Wrapf(err, "failed to parse results")
}
if len(parsedRows) == 0 {
return hash.ZeroHash32B, ErrNotExist
}
var hash hash.Hash32B
copy(hash[:], parsedRows[0].(*VoteToBlock).BlockHash)
return hash, nil
}
// UpdateExecutionHistory stores execution information into execution history table
func (idx *Indexer) UpdateExecutionHistory(blk *blockchain.Block, tx *sql.Tx) error {
insertQuery := "INSERT execution_history SET node_address=?,user_address=?,execution_hash=?"
for _, execution := range blk.Executions {
executionHash := execution.Hash()
// put new execution for executor
executorAddr := execution.Executor()
if _, err := tx.Exec(insertQuery, idx.hexEncodedNodeAddr, executorAddr, executionHash[:]); err != nil {
return err
}
// put new execution for contract
contractAddr := execution.Contract()
if _, err := tx.Exec(insertQuery, idx.hexEncodedNodeAddr, contractAddr, executionHash[:]); err != nil {
return err
}
}
return nil
}
// GetExecutionHistory get execution history
func (idx *Indexer) GetExecutionHistory(userAddr string) ([]hash.Hash32B, error) {
getQuery := "SELECT * FROM execution_history WHERE node_address=? AND user_address=?"
db := idx.rds.GetDB()
stmt, err := db.Prepare(getQuery)
if err != nil {
return nil, errors.Wrapf(err, "failed to prepare get query")
}
rows, err := stmt.Query(idx.hexEncodedNodeAddr, userAddr)
if err != nil {
return nil, errors.Wrapf(err, "failed to execute get query")
}
var executionHistory ExecutionHistory
parsedRows, err := rds.ParseRows(rows, &executionHistory)
if err != nil {
return nil, errors.Wrapf(err, "failed to parse results")
}
var executionHashes []hash.Hash32B
for _, parsedRow := range parsedRows {
var hash hash.Hash32B
copy(hash[:], parsedRow.(*ExecutionHistory).ExecutionHash)
executionHashes = append(executionHashes, hash)
}
return executionHashes, nil
}
// UpdateExecutionToBlock map execution hash to block hash
func (idx *Indexer) UpdateExecutionToBlock(blk *blockchain.Block, tx *sql.Tx) error {
blockHash := blk.HashBlock()
insertQuery := "INSERT execution_to_block SET node_address=?,execution_hash=?,block_hash=?"
for _, execution := range blk.Executions {
executionHash := execution.Hash()
if _, err := tx.Exec(insertQuery, idx.hexEncodedNodeAddr, hex.EncodeToString(executionHash[:]), blockHash[:]); err != nil {
return err
}
}
return nil
}
// GetBlockByExecution return block hash by execution hash
func (idx *Indexer) GetBlockByExecution(executionHash hash.Hash32B) (hash.Hash32B, error) {
getQuery := "SELECT * FROM execution_to_block WHERE node_address=? AND execution_hash=?"
db := idx.rds.GetDB()
stmt, err := db.Prepare(getQuery)
if err != nil {
return hash.ZeroHash32B, errors.Wrapf(err, "failed to prepare get query")
}
rows, err := stmt.Query(idx.hexEncodedNodeAddr, hex.EncodeToString(executionHash[:]))
if err != nil {
return hash.ZeroHash32B, errors.Wrapf(err, "failed to execute get query")
}
var executionToBlock ExecutionToBlock
parsedRows, err := rds.ParseRows(rows, &executionToBlock)
if err != nil {
return hash.ZeroHash32B, errors.Wrapf(err, "failed to parse results")
}
if len(parsedRows) == 0 {
return hash.ZeroHash32B, ErrNotExist
}
var hash hash.Hash32B
copy(hash[:], parsedRows[0].(*ExecutionToBlock).BlockHash)
return hash, nil
}
| 1 | 12,881 | Similarly, we should be able to persist action uniformly | iotexproject-iotex-core | go |
@@ -1,3 +1,8 @@
+#This prevents caching via the browser
+#in testing mode
+module ActionController::ConditionalGet
+ def expires_in(*args) ; end
+end
Workshops::Application.configure do
# Settings specified here will take precedence over those in config/application.rb
| 1 | Workshops::Application.configure do
# Settings specified here will take precedence over those in config/application.rb
# The test environment is used exclusively to run your application's
# test suite. You never need to work with it otherwise. Remember that
# your test database is "scratch space" for the test suite and is wiped
# and recreated between test runs. Don't rely on the data there!
config.cache_classes = true
# Configure static asset server for tests with Cache-Control for performance
config.serve_static_assets = true
config.static_cache_control = "public, max-age=3600"
# Do not compress assets
config.assets.compress = false
# Log error messages when you accidentally call methods on nil
config.whiny_nils = true
# Show full error reports and disable caching
config.consider_all_requests_local = true
config.action_controller.perform_caching = false
# Raise exceptions instead of rendering exception templates
config.action_dispatch.show_exceptions = false
# Disable request forgery protection in test environment
config.action_controller.allow_forgery_protection = false
# Tell Action Mailer not to deliver emails to the real world.
# The :test delivery method accumulates sent emails in the
# ActionMailer::Base.deliveries array.
config.action_mailer.delivery_method = :test
# Use SQL instead of Active Record's schema dumper when creating the test database.
# This is necessary if your schema can't be completely dumped by the schema dumper,
# like if you have constraints or database-specific column types
# config.active_record.schema_format = :sql
# Print deprecation notices to the stderr
config.active_support.deprecation = :stderr
HOST = 'www.example.com'
config.action_mailer.default_url_options = { host: HOST }
config.after_initialize do
Timecop.travel(Time.now)
end
PAYPAL_USERNAME = "username"
PAYPAL_PASSWORD = "password"
PAYPAL_SIGNATURE = "signature"
PAPERCLIP_STORAGE_OPTIONS = {}
GITHUB_KEY = 'githubkey'
GITHUB_SECRET = 'githubsecret'
end
| 1 | 6,395 | I think i'd prefer this override to be in the test helper instead, if that's possible? | thoughtbot-upcase | rb |
@@ -134,6 +134,13 @@ func Untar(source string, dest string, extractionDir string) error {
case tar.TypeReg:
fallthrough
case tar.TypeRegA:
+ // Always ensure the directory is created before trying to move the file.
+ fullPathDir := filepath.Dir(fullPath)
+ err = os.MkdirAll(fullPathDir, 0755)
+ if err != nil {
+ return err
+ }
+
// For a regular file, create and copy the file.
exFile, err := os.Create(fullPath)
if err != nil { | 1 | package archive
import (
"archive/tar"
"archive/zip"
"compress/gzip"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"github.com/drud/ddev/pkg/util"
)
// Ungzip accepts a gzipped file and uncompresses it to the provided destination path.
func Ungzip(source string, dest string) error {
f, err := os.Open(source)
if err != nil {
return err
}
defer func() {
if e := f.Close(); e != nil {
err = e
}
}()
gf, err := gzip.NewReader(f)
if err != nil {
return err
}
defer func() {
if e := gf.Close(); e != nil {
err = e
}
}()
fname := strings.TrimSuffix(filepath.Base(f.Name()), ".gz")
exFile, err := os.Create(filepath.Join(dest, fname))
if err != nil {
return err
}
defer func() {
if e := exFile.Close(); e != nil {
err = e
}
}()
_, err = io.Copy(exFile, gf)
if err != nil {
return err
}
err = exFile.Sync()
if err != nil {
return err
}
return nil
}
// Untar accepts a tar or tar.gz file and extracts the contents to the provided destination path.
// extractionDir is the path at which extraction should start; nothing will be extracted except the contents of
// extractionDir
func Untar(source string, dest string, extractionDir string) error {
var tf *tar.Reader
f, err := os.Open(source)
if err != nil {
return err
}
defer util.CheckClose(f)
if strings.HasSuffix(source, "gz") {
gf, err := gzip.NewReader(f)
if err != nil {
return err
}
defer util.CheckClose(gf)
tf = tar.NewReader(gf)
} else {
tf = tar.NewReader(f)
}
for {
file, err := tf.Next()
if err == io.EOF {
break
}
if err != nil {
return fmt.Errorf("Error during read of tar archive %v, err: %v", source, err)
}
// If we have an extractionDir and this doesn't match, skip it.
if !strings.HasPrefix(file.Name, extractionDir) {
continue
}
// If extractionDir matches file name and isn't a directory, we should be extracting a specific file.
if file.Name == extractionDir && file.Typeflag != tar.TypeDir {
file.Name = filepath.Base(file.Name)
} else {
// Transform the filename to skip the extractionDir
file.Name = strings.TrimPrefix(file.Name, extractionDir)
}
// If file.Name is now empty this is the root directory we want to extract, and need not do anything.
if file.Name == "" && file.Typeflag == tar.TypeDir {
continue
}
fullPath := filepath.Join(dest, file.Name)
// At this point only directories and block-files are handled. Symlinks and the like are ignored.
switch file.Typeflag {
case tar.TypeDir:
// For a directory, if it doesn't exist, we create it.
finfo, err := os.Stat(fullPath)
if err == nil && finfo.IsDir() {
continue
}
err = os.MkdirAll(fullPath, 0755)
if err != nil {
return err
}
case tar.TypeReg:
fallthrough
case tar.TypeRegA:
// For a regular file, create and copy the file.
exFile, err := os.Create(fullPath)
if err != nil {
return fmt.Errorf("Failed to create file %v, err: %v", fullPath, err)
}
_, err = io.Copy(exFile, tf)
_ = exFile.Close()
if err != nil {
return fmt.Errorf("Failed to copy to file %v, err: %v", fullPath, err)
}
}
}
return nil
}
// Unzip accepts a zip file and extracts the contents to the provided destination path.
// extractionDir is the path at which extraction should szipt; nothing will be extracted except the contents of
// extractionDir
func Unzip(source string, dest string, extractionDir string) error {
zf, err := zip.OpenReader(source)
if err != nil {
return fmt.Errorf("Failed to open zipfile %s, err:%v", source, err)
}
defer util.CheckClose(zf)
for _, file := range zf.File {
// If we have an extractionDir and this doesn't match, skip it.
if !strings.HasPrefix(file.Name, extractionDir) {
continue
}
// If extractionDir matches file name and isn't a directory, we should be extracting a specific file.
fileInfo := file.FileInfo()
if file.Name == extractionDir && !fileInfo.IsDir() {
file.Name = filepath.Base(file.Name)
} else {
// Transform the filename to skip the extractionDir
file.Name = strings.TrimPrefix(file.Name, extractionDir)
}
fullPath := filepath.Join(dest, file.Name)
if strings.HasSuffix(file.Name, "/") {
err = os.MkdirAll(fullPath, 0777)
if err != nil {
return fmt.Errorf("Failed to mkdir %s, err:%v", fullPath, err)
}
continue
}
// If file.Name is now empty this is the root directory we want to extract, and need not do anything.
if file.Name == "" {
continue
}
rc, err := file.Open()
if err != nil {
return err
}
// create and copy the file.
exFile, err := os.Create(fullPath)
if err != nil {
return fmt.Errorf("Failed to create file %v, err: %v", fullPath, err)
}
_, err = io.Copy(exFile, rc)
_ = exFile.Close()
if err != nil {
return fmt.Errorf("Failed to copy to file %v, err: %v", fullPath, err)
}
}
return nil
}
| 1 | 11,554 | This would be better with context added via wrapping or fmt.errorf() | drud-ddev | go |
@@ -0,0 +1,19 @@
+// Copyright 2020 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package assert provides convenience functions that help a method or constructor check whether
+// it was invoked correctly (whether its preconditions have been met).
+//
+// When the assertion fails, a panic occurs, allowing bugs to be found in a fail-fast manner.
+package assert | 1 | 1 | 10,954 | What's the purpose of this file? | GoogleCloudPlatform-compute-image-tools | go |
|
@@ -616,12 +616,14 @@ public class PasscodeManager {
*/
public void setPasscodeLength(Context ctx, int passcodeLength) {
if (passcodeLength > this.passcodeLength) {
- if (hasStoredPasscode(ctx)) {
+ if (hasStoredPasscode(ctx) && passcodeLengthKnown) {
this.passcodeChangeRequired = true;
}
+
+ this.passcodeLength = passcodeLength;
}
- this.passcodeLength = passcodeLength;
- this.passcodeLengthKnown = true;
+
+ this.passcodeLengthKnown = true;
storeMobilePolicy(ctx);
}
| 1 | /*
* Copyright (c) 2014-present, salesforce.com, inc.
* All rights reserved.
* Redistribution and use of this software in source and binary forms, with or
* without modification, are permitted provided that the following conditions
* are met:
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of salesforce.com, inc. nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission of salesforce.com, inc.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.salesforce.androidsdk.security;
import android.annotation.SuppressLint;
import android.app.Activity;
import android.content.Context;
import android.content.Intent;
import android.content.SharedPreferences;
import android.content.SharedPreferences.Editor;
import android.os.Handler;
import com.salesforce.androidsdk.accounts.UserAccount;
import com.salesforce.androidsdk.analytics.EventBuilderHelper;
import com.salesforce.androidsdk.analytics.security.Encryptor;
import com.salesforce.androidsdk.app.SalesforceSDKManager;
import com.salesforce.androidsdk.util.EventsObservable;
import com.salesforce.androidsdk.util.EventsObservable.EventType;
import java.io.File;
import java.io.FilenameFilter;
/**
* This class manages the inactivity timeout, and keeps track of if the UI should locked etc.
*
* @author wmathurin
* @author bhariharan
*/
public class PasscodeManager {
// UUID keys
private static final String VKEY = "vkey";
private static final String VSUFFIX = "vsuffix";
private static final String VPREFIX = "vprefix";
private static final String TAG = "PasscodeManager";
// Default min passcode length
public static final int MIN_PASSCODE_LENGTH = 4;
// Key in preference for the passcode
protected static final String KEY_PASSCODE ="passcode";
// Private preference where we stored the passcode (hashed)
protected static final String PASSCODE_PREF_NAME = "user";
// Private preference where we stored the org settings.
protected static final String MOBILE_POLICY_PREF = "mobile_policy";
// Key in preference for the access timeout.
protected static final String KEY_TIMEOUT = "access_timeout";
// Key in preference for the passcode length.
protected static final String KEY_PASSCODE_LENGTH = "passcode_length";
// Key in preferences for actual passcode length known
protected static final String KEY_PASSCODE_LENGTH_KNOWN = "passcode_length_known";
// Key in preference for connect app biometric flag.
protected static final String KEY_BIOMETRIC_ALLOWED = "biometric_allowed";
// Key in preferences to indicate if the user has been prompted to use biometric.
protected static final String KEY_BIOMETRIC_ENROLLMENT = "biometric_enrollment";
// Key in preferences to indicate if the user has enabled biometric.
protected static final String KEY_BIOMETRIC_ENABLED = "biometric_enabled";
// Key in preference to indicate passcode change is required.
protected static final String KEY_PASSCODE_CHANGE_REQUIRED= "passcode_change_required";
// Key in preference for failed attempts
protected static final String KEY_FAILED_ATTEMPTS = "failed_attempts";
// Request code used to start passcode activity
public static final int PASSCODE_REQUEST_CODE = 777;
// Misc
private HashConfig verificationHashConfig;
private Handler handler;
private long lastActivity;
boolean locked;
private int timeoutMs;
private int passcodeLength;
private boolean biometricAllowed;
private boolean biometricEnrollmentShown;
private boolean biometricEnabled;
private boolean passcodeChangeRequired;
private LockChecker lockChecker;
private boolean passcodeLengthKnown;
/**
* Parameterized constructor.
*
* @param ctx Context.
*/
public PasscodeManager(Context ctx) {
this(ctx, new HashConfig(SalesforceKeyGenerator.getUniqueId(VPREFIX),
SalesforceKeyGenerator.getUniqueId(VSUFFIX),
SalesforceKeyGenerator.getUniqueId(VKEY)));
}
public PasscodeManager(Context ctx, HashConfig verificationHashConfig) {
this.passcodeLength = MIN_PASSCODE_LENGTH;
this.lastActivity = now();
this.verificationHashConfig = verificationHashConfig;
readMobilePolicy(ctx);
// Locked at app startup if you're authenticated.
this.locked = true;
lockChecker = new LockChecker();
}
/**
* Returns true if a passcode change is required.
*
* @return true if passcode change required.
*/
public boolean isPasscodeChangeRequired() {
return passcodeChangeRequired;
}
/**
* Set passcode change required flag to the passed value
* @param ctx Context.
* @param passcodeChangeRequired value to set passcode change required flag to
*/
public void setPasscodeChangeRequired(Context ctx, boolean passcodeChangeRequired) {
this.passcodeChangeRequired = passcodeChangeRequired;
storeMobilePolicy(ctx);
}
/**
* Returns the timeout value for the specified account.
*
* @param account UserAccount instance.
* @return Timeout value.
*/
public int getTimeoutMsForOrg(UserAccount account) {
if (account == null) {
return 0;
}
final Context context = SalesforceSDKManager.getInstance().getAppContext();
final SharedPreferences sp = context.getSharedPreferences(MOBILE_POLICY_PREF
+ account.getOrgLevelFilenameSuffix(), Context.MODE_PRIVATE);
return sp.getInt(KEY_TIMEOUT, 0);
}
/**
* Returns the minimum passcode length for the specified account.
*
* @param account UserAccount instance.
* @return Minimum passcode length.
*/
public int getPasscodeLengthForOrg(UserAccount account) {
if (account == null) {
return MIN_PASSCODE_LENGTH;
}
final Context context = SalesforceSDKManager.getInstance().getAppContext();
final SharedPreferences sp = context.getSharedPreferences(MOBILE_POLICY_PREF
+ account.getOrgLevelFilenameSuffix(), Context.MODE_PRIVATE);
return sp.getInt(KEY_PASSCODE_LENGTH, MIN_PASSCODE_LENGTH);
}
/**
* Stores the mobile policy for the specified account.
*
* @param account UserAccount instance.
* @param timeout Timeout value, in ms.
* @param passLen Minimum passcode length.
*
* @deprecated Will be removed in Mobile SDK 8.0.
* Use {@link PasscodeManager#storeMobilePolicyForOrg(UserAccount, int, int, boolean)} instead.
*/
public void storeMobilePolicyForOrg(UserAccount account, int timeout, int passLen) {
storeMobilePolicyForOrg(account, timeout, passLen, true);
}
/**
* Stores the mobile policy for the specified account.
*
* @param account UserAccount instance.
* @param timeout Timeout value, in ms.
* @param passLen Minimum passcode length.
* @param bioAllowed If biometric Unlock is Allowed by connected App
*/
@SuppressLint("ApplySharedPref")
public void storeMobilePolicyForOrg(UserAccount account, int timeout, int passLen, boolean bioAllowed) {
if (account == null) {
return;
}
final Context context = SalesforceSDKManager.getInstance().getAppContext();
final SharedPreferences sp = context.getSharedPreferences(MOBILE_POLICY_PREF
+ account.getOrgLevelFilenameSuffix(), Context.MODE_PRIVATE);
final Editor e = sp.edit();
e.putInt(KEY_TIMEOUT, timeout);
e.putInt(KEY_PASSCODE_LENGTH, passLen);
e.putBoolean(KEY_PASSCODE_LENGTH_KNOWN, passcodeLengthKnown);
e.putBoolean(KEY_BIOMETRIC_ALLOWED, bioAllowed);
e.commit();
}
/**
* Stores the mobile policy in a private file.
*
* @param context Context.
*/
@SuppressLint("ApplySharedPref")
private void storeMobilePolicy(Context context) {
// Context will be null only in test runs.
if (context != null) {
final SharedPreferences sp = context.getSharedPreferences(MOBILE_POLICY_PREF,
Context.MODE_PRIVATE);
Editor e = sp.edit();
e.putInt(KEY_TIMEOUT, timeoutMs);
e.putInt(KEY_PASSCODE_LENGTH, passcodeLength);
e.putBoolean(KEY_PASSCODE_LENGTH_KNOWN, passcodeLengthKnown);
e.putBoolean(KEY_PASSCODE_CHANGE_REQUIRED, passcodeChangeRequired);
e.putBoolean(KEY_BIOMETRIC_ALLOWED, biometricAllowed);
e.putBoolean(KEY_BIOMETRIC_ENROLLMENT, biometricEnrollmentShown);
e.putBoolean(KEY_BIOMETRIC_ENABLED, biometricEnabled);
e.commit();
}
}
/**
* Reads the mobile policy from a private file.
*
* @param context Context.
*/
private void readMobilePolicy(Context context) {
// Context will be null only in test runs.
if (context != null) {
final SharedPreferences sp = context.getSharedPreferences(MOBILE_POLICY_PREF,
Context.MODE_PRIVATE);
if (!sp.contains(KEY_TIMEOUT) || !sp.contains(KEY_PASSCODE_LENGTH)) {
timeoutMs = 0;
passcodeLength = MIN_PASSCODE_LENGTH;
passcodeChangeRequired = false;
biometricAllowed = true;
biometricEnrollmentShown = false;
biometricEnabled = false;
storeMobilePolicy(context);
return;
}
timeoutMs = sp.getInt(KEY_TIMEOUT, 0);
passcodeLength = sp.getInt(KEY_PASSCODE_LENGTH, MIN_PASSCODE_LENGTH);
passcodeLengthKnown = sp.getBoolean(KEY_PASSCODE_LENGTH_KNOWN, false);
passcodeChangeRequired = sp.getBoolean(KEY_PASSCODE_CHANGE_REQUIRED, false);
biometricAllowed = sp.getBoolean(KEY_BIOMETRIC_ALLOWED, true);
biometricEnrollmentShown = sp.getBoolean(KEY_BIOMETRIC_ENROLLMENT, false);
biometricEnabled = sp.getBoolean(KEY_BIOMETRIC_ENABLED, false);
}
}
/**
* Reset this passcode manager: delete stored passcode and reset fields to their starting value
*/
@SuppressLint("ApplySharedPref")
public void reset(Context ctx) {
// Deletes the underlying org policy files for all orgs.
final String sharedPrefPath = ctx.getApplicationInfo().dataDir + "/shared_prefs";
final File dir = new File(sharedPrefPath);
final PasscodeFileFilter fileFilter = new PasscodeFileFilter();
for (final File file : dir.listFiles()) {
if (file != null && fileFilter.accept(dir, file.getName())) {
file.delete();
}
}
lastActivity = now();
locked = true;
SharedPreferences sp = ctx.getSharedPreferences(PASSCODE_PREF_NAME,
Context.MODE_PRIVATE);
Editor e = sp.edit();
e.remove(KEY_PASSCODE);
e.remove(KEY_FAILED_ATTEMPTS);
e.remove(KEY_PASSCODE_LENGTH_KNOWN);
e.remove(KEY_BIOMETRIC_ALLOWED);
e.remove(KEY_BIOMETRIC_ENROLLMENT);
e.remove(KEY_BIOMETRIC_ENABLED);
e.commit();
timeoutMs = 0;
passcodeLength = MIN_PASSCODE_LENGTH;
passcodeLengthKnown = false;
passcodeChangeRequired = false;
biometricAllowed = true;
biometricEnrollmentShown = false;
biometricEnabled = false;
storeMobilePolicy(ctx);
handler = null;
}
/**
* Resets the passcode policies for a particular org upon logout.
*
* @param context Context.
* @param account User account.
*/
@SuppressLint("ApplySharedPref")
public void reset(Context context, UserAccount account) {
if (account == null) {
return;
}
final SharedPreferences sp = context.getSharedPreferences(MOBILE_POLICY_PREF
+ account.getOrgLevelFilenameSuffix(), Context.MODE_PRIVATE);
final Editor e = sp.edit();
e.clear();
e.commit();
}
/**
* Enable/disable passcode screen.
*/
public void setEnabled(boolean enabled) {
if (enabled) {
handler = new Handler();
handler.postDelayed(lockChecker, 20 * 1000);
} else {
if (handler != null) {
handler.removeCallbacks(lockChecker);
}
handler = null;
}
}
/**
* @return true if passcode manager is enabled.
*/
public boolean isEnabled() {
return (handler != null);
}
/**
* @return the new failure count
*/
public int addFailedPasscodeAttempt() {
int failedAttempts = getFailedPasscodeAttempts() + 1;
setFailedPasscodeAttempts(failedAttempts);
return failedAttempts;
}
/**
* @param ctx Context.
* @param passcode Passcode.
* @return true if passcode matches the one stored (hashed) in private preference
*/
public boolean check(Context ctx, String passcode) {
final SharedPreferences sp = ctx.getSharedPreferences(PASSCODE_PREF_NAME, Context.MODE_PRIVATE);
String hashedPasscode = sp.getString(KEY_PASSCODE, null);
hashedPasscode = removeNewLine(hashedPasscode);
if (hashedPasscode != null) {
return hashedPasscode.equals(hashForVerification(passcode));
}
/*
* If the stored passcode hash is null, there is no passcode.
*/
return true;
}
/**
* Removes a trailing newline character from the hash.
*
* @param hash Hash.
* @return Hash with trailing newline character removed.
*/
private String removeNewLine(String hash) {
int length = hash == null ? 0 : hash.length();
if (length > 0 && hash.endsWith("\n")) {
return hash.substring(0, length - 1);
}
return hash;
}
/**
* Store the given passcode (hashed) in private preference
* @param ctx Context.
* @param passcode Passcode.
*/
@SuppressLint("ApplySharedPref")
public void store(Context ctx, String passcode) {
SharedPreferences sp = ctx.getSharedPreferences(PASSCODE_PREF_NAME, Context.MODE_PRIVATE);
Editor e = sp.edit();
e.putString(KEY_PASSCODE, hashForVerification(passcode));
e.putInt(KEY_PASSCODE_LENGTH, passcode.length());
e.putBoolean(KEY_PASSCODE_LENGTH_KNOWN, true);
e.putBoolean(KEY_BIOMETRIC_ENROLLMENT, biometricEnrollmentShown);
e.putBoolean(KEY_BIOMETRIC_ENABLED, biometricEnabled);
e.commit();
setPasscodeChangeRequired(ctx,false);
}
/**
* @param ctx Context.
* @return true if passcode was already created
*/
public boolean hasStoredPasscode(Context ctx) {
SharedPreferences sp = ctx.getSharedPreferences(PASSCODE_PREF_NAME, Context.MODE_PRIVATE);
return sp.contains(KEY_PASSCODE);
}
/**
* @return number of failed passcode attempts
*/
public int getFailedPasscodeAttempts() {
SharedPreferences sp = SalesforceSDKManager.getInstance().getAppContext().getSharedPreferences(PASSCODE_PREF_NAME, Context.MODE_PRIVATE);
return sp.getInt(KEY_FAILED_ATTEMPTS, 0);
}
@SuppressLint("ApplySharedPref")
private void setFailedPasscodeAttempts(int failedPasscodeAttempts) {
SharedPreferences sp = SalesforceSDKManager.getInstance().getAppContext().getSharedPreferences(PASSCODE_PREF_NAME, Context.MODE_PRIVATE);
Editor e = sp.edit();
e.putInt(KEY_FAILED_ATTEMPTS, failedPasscodeAttempts);
e.commit();
}
/**
* @return true if locked
*/
public boolean isLocked() {
return timeoutMs > 0 && locked;
}
/**
* @param ctx Context.
*/
public void lock(Context ctx) {
showLockActivity(ctx);
}
/**
* @param frontActivity
* @param registerActivity
* @return
*/
public boolean lockIfNeeded(Activity frontActivity, boolean registerActivity) {
if (isEnabled() && (isLocked() || shouldLock() || passcodeChangeRequired)) {
lock(frontActivity);
return true;
} else {
if (registerActivity) updateLast();
return false;
}
}
/**
* To be called by passcode protected activity when being paused
*/
public void onPause(Activity ctx) {
// Disables passcode manager.
setEnabled(false);
}
/**
* To be called by passcode protected activity when being resumed
* When passcode screen is about to be shown, false is returned, the activity will be resumed once
* the user has successfully enter her passcode
*
* @return true if the resume should be allowed to continue and false otherwise
*/
public boolean onResume(Activity ctx) {
// Enables passcode manager.
setEnabled(true);
// Brings up passcode screen if needed.
lockIfNeeded(ctx, true);
// If locked, do nothing - when the app gets unlocked we will be back here.
return !isLocked();
}
/**
* To be called by passcode protected activity whenever there is a user interaction
*/
public void recordUserInteraction() {
updateLast();
}
/**
* Called when the access timeout for the org changes.
*
* @param newTimeout New access timeout value.
*/
public void setTimeoutMs(int newTimeout) {
// Access timeout hasn't changed.
if (timeoutMs == newTimeout) {
return;
}
/*
* Either access timeout has changed from one non-zero value to another,
* which doesn't alter the passcode situation, or the app goes from
* no passcode to passcode, which will trigger the passcode creation flow.
*/
if (timeoutMs == 0 || (timeoutMs > 0 && newTimeout > 0)) {
// Updates timeout only if the new timeout is smaller than the old one.
if (timeoutMs == 0 || timeoutMs > newTimeout) {
timeoutMs = newTimeout;
}
storeMobilePolicy(SalesforceSDKManager.getInstance().getAppContext());
return;
}
// Passcode to no passcode.
timeoutMs = newTimeout;
reset(SalesforceSDKManager.getInstance().getAppContext());
}
/**
* The current inactivity timeout before the app locks, in milliseconds.
*
* @return the inactivity timeout
*/
public int getTimeoutMs() {
return timeoutMs;
}
/**
*
* @deprecated Will be removed in Mobile SDK 8.0. Use {@link PasscodeManager#getPasscodeLength()} instead.
*/
public int getMinPasscodeLength() {
return passcodeLength;
}
/**
* The exact length of the passcode if it is known. It may be unknown on upgrade before first unlock.
* Use {@link PasscodeManager#getPasscodeLengthKnown()} to check if return is exact length or org minimum.
*
* @return passcode length
*/
public int getPasscodeLength() {
return passcodeLength;
}
/**
* Whether or not the exact passcode length is known. It may be unknown on upgrade before first unlock.
* Use {@link PasscodeManager#getPasscodeLength()} to get the length.
*
* @return true if the length is known
*/
public boolean getPasscodeLengthKnown() {
return passcodeLengthKnown;
}
/**
* Whether or not the connected app allows biometric as an alternative to passcode.
*
* @return true if biometric is allowed
*/
public boolean biometricAllowed() {
return biometricAllowed;
}
/**
* Whether or not the user has been shown the screen prompting them to enroll in biometric unlock.
* @return true if the user has been prompted to enable biometric
*/
public boolean biometricEnrollmentShown() {
return biometricEnrollmentShown;
}
/**
* Whether or not the user has enabled the ability to use biometric to bypass passcode.
*
* @return true if the user has enabled biometric
*/
public boolean biometricEnabled() {
return biometricEnabled;
}
/**
* @param ctx Context.
* @param minPasscodeLength The new minimum passcode length to set.
*
* @deprecated Will be removed in Mobile SDK 8.0. Use {@link PasscodeManager#setPasscodeLength(Context, int)} instead.
*/
public void setMinPasscodeLength(Context ctx, int minPasscodeLength) {
setPasscodeLength(ctx, minPasscodeLength);
}
/**
* @param ctx Context.
* @param passcodeLength The new passcode length to set.
*/
public void setPasscodeLength(Context ctx, int passcodeLength) {
if (passcodeLength > this.passcodeLength) {
if (hasStoredPasscode(ctx)) {
this.passcodeChangeRequired = true;
}
}
this.passcodeLength = passcodeLength;
this.passcodeLengthKnown = true;
storeMobilePolicy(ctx);
}
/**
* This method can be used to force the stored or default passcode length to be trusted
* upon upgrade if set to 'true'.
*
* @param ctx Context
* @param lengthKnown Whether or not the passcode length is known.
*/
public void setPasscodeLengthKnown(Context ctx, boolean lengthKnown) {
this.passcodeLengthKnown = lengthKnown;
storeMobilePolicy(ctx);
}
/**
* Called when biometric unlock requirement for the org changes.
*/
public void setBiometricAllowed(Context ctx, boolean allowed) {
if (this.biometricAllowed) {
this.biometricAllowed = allowed;
}
storeMobilePolicy(ctx);
}
/**
* By default biometric enrollment is only shown to the user once.
*
* @param shown set to true to show biometric prompt on next passcode unlock.
*/
public void setBiometricEnrollmentShown(Context ctx, boolean shown) {
biometricEnrollmentShown = shown;
storeMobilePolicy(ctx);
}
/**
* Sets biometric enabled.
*/
public void setBiometricEnabled(Context ctx, boolean enabled) {
biometricEnabled = enabled && biometricAllowed();
storeMobilePolicy(ctx);
}
/**
* @return true if time elapsed since the last user activity in the app exceeds the timeoutMs
*/
public boolean shouldLock() {
return timeoutMs > 0 && now() >= (lastActivity + timeoutMs);
}
public void showLockActivity(Context ctx) {
locked = true;
if (ctx == null) {
ctx = SalesforceSDKManager.getInstance().getAppContext();
}
final Intent i = new Intent(ctx, SalesforceSDKManager.getInstance().getPasscodeActivity());
i.addFlags(Intent.FLAG_ACTIVITY_SINGLE_TOP);
i.addFlags(Intent.FLAG_ACTIVITY_NO_HISTORY);
i.addFlags(Intent.FLAG_ACTIVITY_REORDER_TO_FRONT);
if (ctx == SalesforceSDKManager.getInstance().getAppContext()) {
i.addFlags(Intent.FLAG_ACTIVITY_NEW_TASK);
}
if (ctx instanceof Activity) {
((Activity) ctx).startActivityForResult(i, PASSCODE_REQUEST_CODE);
} else {
ctx.startActivity(i);
}
EventsObservable.get().notifyEvent(EventType.AppLocked);
}
/**
* This is used when unlocking via the fingerprint authentication.
* The passcode hash isn't updated as the authentication is verified by the OS.
*/
public void unlock() {
EventBuilderHelper.createAndStoreEvent("passcodeUnlock", null, TAG, null);
locked = false;
setFailedPasscodeAttempts(0);
updateLast();
EventsObservable.get().notifyEvent(EventType.AppUnlocked);
}
protected long now() {
return System.currentTimeMillis();
}
private void updateLast() {
lastActivity = now();
}
public String hashForVerification(String passcode) {
return hash(passcode, verificationHashConfig);
}
private String hash(String passcode, HashConfig hashConfig) {
return Encryptor.hash(hashConfig.prefix + passcode + hashConfig.suffix, hashConfig.key);
}
/**
* Thread checking periodically to see how much has elapsed since the last recorded activity
* When that elapsed time exceed timeoutMs, it locks the app
*/
private class LockChecker implements Runnable {
public void run() {
try {
if (!locked) {
lockIfNeeded(null, false);
}
} finally {
if (handler != null) {
handler.postDelayed(this, 20 * 1000);
}
}
}
}
/**
* Key for hashing and salts to be preprended and appended to data to increase entropy.
*/
public static class HashConfig {
public final String prefix;
public final String suffix;
public final String key;
public HashConfig(String prefix, String suffix, String key) {
this.prefix = prefix;
this.suffix = suffix;
this.key = key;
}
}
/**
* This class acts as a filter to identify only the relevant passcode files.
*
* @author bhariharan
*/
private static class PasscodeFileFilter implements FilenameFilter {
private static final String PASSCODE_FILE_PREFIX = MOBILE_POLICY_PREF + "_";
@Override
public boolean accept(File dir, String filename) {
return (filename != null && filename.startsWith(PASSCODE_FILE_PREFIX));
}
}
}
| 1 | 17,446 | @bhariharan Why was passcode length requirement allowed to be lowered? | forcedotcom-SalesforceMobileSDK-Android | java |
@@ -6,14 +6,6 @@ import (
"flag"
"io/ioutil"
"log"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/credentials"
- "github.com/aws/aws-sdk-go/aws/session"
- "github.com/google/go-cloud/blob"
- "github.com/google/go-cloud/blob/gcsblob"
- "github.com/google/go-cloud/blob/s3blob"
- "github.com/google/go-cloud/gcp"
)
const bucketName = "my-cool-bucket" | 1 | // Command upload saves files to blob storage on GCP and AWS.
package main
import (
"context"
"flag"
"io/ioutil"
"log"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/google/go-cloud/blob"
"github.com/google/go-cloud/blob/gcsblob"
"github.com/google/go-cloud/blob/s3blob"
"github.com/google/go-cloud/gcp"
)
const bucketName = "my-cool-bucket"
func main() {
// Define our input.
cloud := flag.String("cloud", "", "Cloud storage to use")
flag.Parse()
if flag.NArg() != 1 {
log.Fatal("Failed to provide file to upload")
}
file := flag.Arg(0)
ctx := context.Background()
// Open a connection to the bucket.
var (
b *blob.Bucket
err error
)
switch *cloud {
case "gcp":
b, err = setupGCP(ctx)
case "aws":
b, err = setupAWS(ctx)
default:
log.Fatalf("Failed to recognize cloud. Want gcp or aws, got: %s", *cloud)
}
if err != nil {
log.Fatalf("Failed to setup bucket: %s", err)
}
// Prepare the file for upload.
data, err := ioutil.ReadFile(file)
if err != nil {
log.Fatalf("Failed to read file: %s", err)
}
w, err := b.NewWriter(ctx, file, nil)
if err != nil {
log.Fatalf("Failed to obtain writer: %s", err)
}
_, err = w.Write(data)
if err != nil {
log.Fatalf("Failed to write to bucket: %s", err)
}
if err := w.Close(); err != nil {
log.Fatalf("Failed to close: %s", err)
}
}
func setupGCP(ctx context.Context) (*blob.Bucket, error) {
// DefaultCredentials assumes a user has logged in with gcloud.
// See here for more information:
// https://cloud.google.com/docs/authentication/getting-started
creds, err := gcp.DefaultCredentials(ctx)
if err != nil {
return nil, err
}
c, err := gcp.NewHTTPClient(gcp.DefaultTransport(), gcp.CredentialsTokenSource(creds))
if err != nil {
return nil, err
}
// The bucket name must be globally unique.
return gcsblob.OpenBucket(ctx, bucketName, c)
}
func setupAWS(ctx context.Context) (*blob.Bucket, error) {
c := &aws.Config{
// Either hard-code the region or use AWS_REGION.
Region: aws.String("us-east-2"),
// credentials.NewEnvCredentials assumes two environment variables are
// present:
// 1. AWS_ACCESS_KEY_ID, and
// 2. AWS_SECRET_ACCESS_KEY.
Credentials: credentials.NewEnvCredentials(),
}
s := session.Must(session.NewSession(c))
return s3blob.OpenBucket(ctx, s, bucketName)
}
| 1 | 10,467 | This file needs a license header, too. Sorry I didn't catch that earlier. | google-go-cloud | go |
@@ -289,7 +289,7 @@ func main() {
}
}
- err = s.Initialize(cfg, phonebookAddresses)
+ err = s.Initialize(cfg, phonebookAddresses, string(genesisText[:]))
if err != nil {
fmt.Fprintln(os.Stderr, err)
log.Error(err) | 1 | // Copyright (C) 2019-2020 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
package main
import (
"flag"
"fmt"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/algorand/go-deadlock"
"github.com/gofrs/flock"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/daemon/algod"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/logging/telemetryspec"
"github.com/algorand/go-algorand/network"
"github.com/algorand/go-algorand/protocol"
toolsnet "github.com/algorand/go-algorand/tools/network"
"github.com/algorand/go-algorand/util/metrics"
"github.com/algorand/go-algorand/util/tokens"
)
var dataDirectory = flag.String("d", "", "Root Algorand daemon data path")
var genesisFile = flag.String("g", "", "Genesis configuration file")
var genesisPrint = flag.Bool("G", false, "Print genesis ID")
var versionCheck = flag.Bool("v", false, "Display and write current build version and exit")
var branchCheck = flag.Bool("b", false, "Display the git branch behind the build")
var channelCheck = flag.Bool("c", false, "Display and release channel behind the build")
var initAndExit = flag.Bool("x", false, "Initialize the ledger and exit")
var peerOverride = flag.String("p", "", "Override phonebook with peer ip:port (or semicolon separated list: ip:port;ip:port;ip:port...)")
var listenIP = flag.String("l", "", "Override config.EndpointAddress (REST listening address) with ip:port")
var sessionGUID = flag.String("s", "", "Telemetry Session GUID to use")
var telemetryOverride = flag.String("t", "", `Override telemetry setting if supported (Use "true", "false", "0" or "1"`)
var seed = flag.String("seed", "", "input to math/rand.Seed()")
func main() {
flag.Parse()
dataDir := resolveDataDir()
absolutePath, absPathErr := filepath.Abs(dataDir)
config.UpdateVersionDataDir(absolutePath)
if *seed != "" {
seedVal, err := strconv.ParseInt(*seed, 10, 64)
if err != nil {
fmt.Fprintf(os.Stderr, "bad seed %#v: %s\n", *seed, err)
os.Exit(1)
return
}
rand.Seed(seedVal)
} else {
rand.Seed(time.Now().UnixNano())
}
if *versionCheck {
fmt.Println(config.FormatVersionAndLicense())
return
}
version := config.GetCurrentVersion()
heartbeatGauge := metrics.MakeStringGauge()
heartbeatGauge.Set("version", version.String())
heartbeatGauge.Set("version-num", strconv.FormatUint(version.AsUInt64(), 10))
heartbeatGauge.Set("channel", version.Channel)
heartbeatGauge.Set("branch", version.Branch)
heartbeatGauge.Set("commit-hash", version.GetCommitHash())
if *branchCheck {
fmt.Println(config.Branch)
return
}
if *channelCheck {
fmt.Println(config.Channel)
return
}
// Don't fallback anymore - if not specified, we want to panic to force us to update our tooling and/or processes
if len(dataDir) == 0 {
fmt.Fprintln(os.Stderr, "Data directory not specified. Please use -d or set $ALGORAND_DATA in your environment.")
os.Exit(1)
}
if absPathErr != nil {
fmt.Fprintf(os.Stderr, "Can't convert data directory's path to absolute, %v\n", dataDir)
os.Exit(1)
}
if *genesisFile == "" {
*genesisFile = filepath.Join(dataDir, config.GenesisJSONFile)
}
// Load genesis
genesisText, err := ioutil.ReadFile(*genesisFile)
if err != nil {
fmt.Fprintf(os.Stderr, "Cannot read genesis file %s: %v\n", *genesisFile, err)
os.Exit(1)
}
var genesis bookkeeping.Genesis
err = protocol.DecodeJSON(genesisText, &genesis)
if err != nil {
fmt.Fprintf(os.Stderr, "Cannot parse genesis file %s: %v\n", *genesisFile, err)
os.Exit(1)
}
if *genesisPrint {
fmt.Println(genesis.ID())
return
}
// If data directory doesn't exist, we can't run. Don't bother trying.
if _, err := os.Stat(absolutePath); err != nil {
fmt.Fprintf(os.Stderr, "Data directory %s does not appear to be valid\n", dataDir)
os.Exit(1)
}
log := logging.Base()
// before doing anything further, attempt to acquire the algod lock
// to ensure this is the only node running against this data directory
lockPath := filepath.Join(absolutePath, "algod.lock")
fileLock := flock.New(lockPath)
locked, err := fileLock.TryLock()
if err != nil {
fmt.Fprintf(os.Stderr, "unexpected failure in establishing algod.lock: %s \n", err.Error())
os.Exit(1)
}
if !locked {
fmt.Fprintln(os.Stderr, "failed to lock algod.lock; is an instance of algod already running in this data directory?")
os.Exit(1)
}
defer fileLock.Unlock()
cfg, err := config.LoadConfigFromDisk(absolutePath)
if err != nil && !os.IsNotExist(err) {
// log is not setup yet, this will log to stderr
log.Fatalf("Cannot load config: %v", err)
}
err = config.LoadConfigurableConsensusProtocols(absolutePath)
if err != nil {
// log is not setup yet, this will log to stderr
log.Fatalf("Unable to load optional consensus protocols file: %v", err)
}
// Enable telemetry hook in daemon to send logs to cloud
// If ALGOTEST env variable is set, telemetry is disabled - allows disabling telemetry for tests
isTest := os.Getenv("ALGOTEST") != ""
remoteTelemetryEnabled := false
if !isTest {
telemetryConfig, err := logging.EnsureTelemetryConfig(&dataDir, genesis.ID())
if err != nil {
fmt.Fprintln(os.Stdout, "error loading telemetry config", err)
}
if os.IsPermission(err) {
fmt.Fprintf(os.Stderr, "Permission error on accessing telemetry config: %v", err)
os.Exit(1)
}
fmt.Fprintf(os.Stdout, "Telemetry configured from '%s'\n", telemetryConfig.FilePath)
telemetryConfig.SendToLog = telemetryConfig.SendToLog || cfg.TelemetryToLog
// Apply telemetry override.
telemetryConfig.Enable = logging.TelemetryOverride(*telemetryOverride)
remoteTelemetryEnabled = telemetryConfig.Enable
if telemetryConfig.Enable || telemetryConfig.SendToLog {
// If session GUID specified, use it.
if *sessionGUID != "" {
if len(*sessionGUID) == 36 {
telemetryConfig.SessionGUID = *sessionGUID
}
}
err = log.EnableTelemetry(telemetryConfig)
if err != nil {
fmt.Fprintln(os.Stdout, "error creating telemetry hook", err)
}
}
}
s := algod.Server{
RootPath: absolutePath,
Genesis: genesis,
}
// Generate a REST API token if one was not provided
apiToken, wroteNewToken, err := tokens.ValidateOrGenerateAPIToken(s.RootPath, tokens.AlgodTokenFilename)
if err != nil {
log.Fatalf("API token error: %v", err)
}
if wroteNewToken {
fmt.Printf("No REST API Token found. Generated token: %s\n", apiToken)
}
// Generate a admin REST API token if one was not provided
adminAPIToken, wroteNewToken, err := tokens.ValidateOrGenerateAPIToken(s.RootPath, tokens.AlgodAdminTokenFilename)
if err != nil {
log.Fatalf("Admin API token error: %v", err)
}
if wroteNewToken {
fmt.Printf("No Admin REST API Token found. Generated token: %s\n", adminAPIToken)
}
// Allow overriding default listening address
if *listenIP != "" {
cfg.EndpointAddress = *listenIP
}
// If overriding peers, disable SRV lookup
telemetryDNSBootstrapID := cfg.DNSBootstrapID
var peerOverrideArray []string
if *peerOverride != "" {
peerOverrideArray = strings.Split(*peerOverride, ";")
cfg.DNSBootstrapID = ""
// The networking code waits until we have GossipFanout
// connections before declaring the network stack to be
// ready, which triggers things like catchup. If the
// user explicitly specified a set of peers, make sure
// GossipFanout is no larger than this set, otherwise
// we will have to wait for a minute-long timeout until
// the network stack declares itself to be ready.
if cfg.GossipFanout > len(peerOverrideArray) {
cfg.GossipFanout = len(peerOverrideArray)
}
// make sure that the format of each entry is valid:
for idx, peer := range peerOverrideArray {
url, err := network.ParseHostOrURL(peer)
if err != nil {
fmt.Fprintf(os.Stderr, "Provided command line parameter '%s' is not a valid host:port pair\n", peer)
os.Exit(1)
return
}
peerOverrideArray[idx] = url.Host
}
}
// Apply the default deadlock setting before starting the server.
// It will potentially override it based on the config file DefaultDeadlock setting
if strings.ToLower(config.DefaultDeadlock) == "enable" {
deadlock.Opts.Disable = false
} else if strings.ToLower(config.DefaultDeadlock) == "disable" {
deadlock.Opts.Disable = true
} else if config.DefaultDeadlock != "" {
log.Fatalf("DefaultDeadlock is somehow not set to an expected value (enable / disable): %s", config.DefaultDeadlock)
}
var phonebookAddresses []string
if peerOverrideArray != nil {
phonebookAddresses = peerOverrideArray
} else {
ex, err := os.Executable()
if err != nil {
log.Errorf("cannot locate node executable: %s", err)
} else {
phonebookDir := filepath.Dir(ex)
phonebookAddresses, err = config.LoadPhonebook(phonebookDir)
if err != nil {
log.Debugf("Cannot load static phonebook: %v", err)
}
}
}
err = s.Initialize(cfg, phonebookAddresses)
if err != nil {
fmt.Fprintln(os.Stderr, err)
log.Error(err)
return
}
if *initAndExit {
return
}
deadlockState := "enabled"
if deadlock.Opts.Disable {
deadlockState = "disabled"
}
fmt.Fprintf(os.Stdout, "Deadlock detection is set to: %s (Default state is '%s')\n", deadlockState, config.DefaultDeadlock)
if log.GetTelemetryEnabled() {
done := make(chan struct{})
defer close(done)
// Make a copy of config and reset DNSBootstrapID in case it was disabled.
cfgCopy := cfg
cfgCopy.DNSBootstrapID = telemetryDNSBootstrapID
// If the telemetry URI is not set, periodically check SRV records for new telemetry URI
if remoteTelemetryEnabled && log.GetTelemetryURI() == "" {
toolsnet.StartTelemetryURIUpdateService(time.Minute, cfg, s.Genesis.Network, log, done)
}
currentVersion := config.GetCurrentVersion()
startupDetails := telemetryspec.StartupEventDetails{
Version: currentVersion.String(),
CommitHash: currentVersion.CommitHash,
Branch: currentVersion.Branch,
Channel: currentVersion.Channel,
InstanceHash: crypto.Hash([]byte(absolutePath)).String(),
}
log.EventWithDetails(telemetryspec.ApplicationState, telemetryspec.StartupEvent, startupDetails)
// Send a heartbeat event every 10 minutes as a sign of life
go func() {
ticker := time.NewTicker(10 * time.Minute)
defer ticker.Stop()
sendHeartbeat := func() {
values := make(map[string]string)
metrics.DefaultRegistry().AddMetrics(values)
heartbeatDetails := telemetryspec.HeartbeatEventDetails{
Metrics: values,
}
log.EventWithDetails(telemetryspec.ApplicationState, telemetryspec.HeartbeatEvent, heartbeatDetails)
}
// Send initial heartbeat, followed by one every 10 minutes.
sendHeartbeat()
for {
select {
case <-ticker.C:
sendHeartbeat()
case <-done:
return
}
}
}()
}
s.Start()
}
func resolveDataDir() string {
// Figure out what data directory to tell algod to use.
// If not specified on cmdline with '-d', look for default in environment.
var dir string
if dataDirectory == nil || *dataDirectory == "" {
dir = os.Getenv("ALGORAND_DATA")
} else {
dir = *dataDirectory
}
return dir
}
| 1 | 40,037 | probably don't need `[:]` why not reference into `github.com/algorand/go-algorand/daemon/algod/api/server/lib` here and skip bouncing off daemon/algod/server.go ? | algorand-go-algorand | go |
@@ -86,8 +86,13 @@ public:
const table& y_data,
const table& result_values) {
auto reference = compute_reference(scale, shift, x_data, y_data);
+ const table reference_table = dal::detail::homogen_table_builder{}
+ .reset(reference.get_array(),
+ reference.get_row_count(),
+ reference.get_column_count())
+ .build();
const double tol = te::get_tolerance<Float>(3e-4, 1e-9);
- const double diff = la::l_inf_norm(reference, la::matrix<double>::wrap(result_values));
+ const double diff = te::abs_error(reference_table, result_values);
CHECK(diff < tol);
}
| 1 | /*******************************************************************************
* Copyright 2021 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#include "oneapi/dal/algo/linear_kernel/compute.hpp"
#include "oneapi/dal/test/engine/common.hpp"
#include "oneapi/dal/test/engine/fixtures.hpp"
#include "oneapi/dal/test/engine/dataframe.hpp"
#include "oneapi/dal/test/engine/math.hpp"
namespace oneapi::dal::linear_kernel::test {
namespace te = dal::test::engine;
namespace la = te::linalg;
template <typename TestType>
class linear_kernel_batch_test : public te::algo_fixture {
public:
using Float = std::tuple_element_t<0, TestType>;
using Method = std::tuple_element_t<1, TestType>;
auto get_descriptor(double scale, double shift) const {
return linear_kernel::descriptor<Float, Method>{}.set_scale(scale).set_shift(shift);
}
te::table_id get_homogen_table_id() const {
return te::table_id::homogen<Float>();
}
void general_checks(const te::dataframe& x_data,
const te::dataframe& y_data,
double scale,
double shift,
const te::table_id& x_data_table_id,
const te::table_id& y_data_table_id) {
CAPTURE(scale);
CAPTURE(shift);
const table x = x_data.get_table(this->get_policy(), x_data_table_id);
const table y = y_data.get_table(this->get_policy(), y_data_table_id);
INFO("create descriptor")
const auto linear_kernel_desc = get_descriptor(scale, shift);
INFO("run compute");
const auto compute_result = this->compute(linear_kernel_desc, x, y);
check_compute_result(scale, shift, x, y, compute_result);
}
void check_compute_result(double scale,
double shift,
const table& x_data,
const table& y_data,
const linear_kernel::compute_result<>& result) {
const auto result_values = result.get_values();
SECTION("result values table shape is expected") {
REQUIRE(result_values.get_row_count() == x_data.get_row_count());
REQUIRE(result_values.get_column_count() == y_data.get_row_count());
}
SECTION("there is no NaN in result values table") {
REQUIRE(te::has_no_nans(result_values));
}
SECTION("result values are expected") {
check_result_values(scale, shift, x_data, y_data, result_values);
}
}
void check_result_values(double scale,
double shift,
const table& x_data,
const table& y_data,
const table& result_values) {
auto reference = compute_reference(scale, shift, x_data, y_data);
const double tol = te::get_tolerance<Float>(3e-4, 1e-9);
const double diff = la::l_inf_norm(reference, la::matrix<double>::wrap(result_values));
CHECK(diff < tol);
}
la::matrix<double> compute_reference(double scale,
double shift,
const table& x_data,
const table& y_data) {
const auto x_data_matrix = la::matrix<double>::wrap(x_data);
const auto y_data_matrix = la::matrix<double>::wrap(y_data);
auto reference = la::dot(x_data_matrix, y_data_matrix.t(), scale);
la::enumerate_linear(reference, [&](std::int64_t i, double) {
reference.set(i) += shift;
});
return reference;
}
};
using linear_kernel_types = COMBINE_TYPES((float, double), (linear_kernel::method::dense));
TEMPLATE_LIST_TEST_M(linear_kernel_batch_test,
"linear_kernel common flow",
"[linear_kernel][integration][batch]",
linear_kernel_types) {
const te::dataframe x_data =
GENERATE_DATAFRAME(te::dataframe_builder{ 50, 50 }.fill_uniform(-3, 3, 7777),
te::dataframe_builder{ 100, 50 }.fill_uniform(-3, 3, 7777),
te::dataframe_builder{ 250, 50 }.fill_uniform(-3, 3, 7777),
te::dataframe_builder{ 1100, 50 }.fill_uniform(-3, 3, 7777));
// Homogen floating point type is the same as algorithm's floating point type
const auto x_data_table_id = this->get_homogen_table_id();
const te::dataframe y_data =
GENERATE_DATAFRAME(te::dataframe_builder{ 50, 50 }.fill_uniform(-3, 3, 7777),
te::dataframe_builder{ 100, 50 }.fill_uniform(-3, 3, 8888),
te::dataframe_builder{ 200, 50 }.fill_uniform(-3, 3, 8888),
te::dataframe_builder{ 1000, 50 }.fill_uniform(-3, 3, 8888));
// Homogen floating point type is the same as algorithm's floating point type
const auto y_data_table_id = this->get_homogen_table_id();
const double scale = GENERATE_COPY(1.0, 2.0);
const double shift = GENERATE_COPY(0.0, 1.0);
this->general_checks(x_data, y_data, scale, shift, x_data_table_id, y_data_table_id);
}
TEMPLATE_LIST_TEST_M(linear_kernel_batch_test,
"linear_kernel compute one element matrix",
"[linear_kernel][integration][batch]",
linear_kernel_types) {
const te::dataframe x_data =
GENERATE_DATAFRAME(te::dataframe_builder{ 1, 1 }.fill_uniform(-3, 3, 7777));
// Homogen floating point type is the same as algorithm's floating point type
const auto x_data_table_id = this->get_homogen_table_id();
const te::dataframe y_data =
GENERATE_DATAFRAME(te::dataframe_builder{ 1, 1 }.fill_uniform(-3, 3, 8888));
// Homogen floating point type is the same as algorithm's floating point type
const auto y_data_table_id = this->get_homogen_table_id();
const double scale = GENERATE_COPY(1.0, 2.0);
const double shift = GENERATE_COPY(0.0, 1.0);
this->general_checks(x_data, y_data, scale, shift, x_data_table_id, y_data_table_id);
}
} // namespace oneapi::dal::linear_kernel::test
| 1 | 27,587 | Why reference is converted to table? | oneapi-src-oneDAL | cpp |
@@ -347,6 +347,19 @@ func (s *DataStore) ListRegistrationEntries(ctx context.Context,
s.mu.Lock()
defer s.mu.Unlock()
+ // no pagination allow for this fake, for now it return only one page
+ if req.Pagination != nil {
+ if req.Pagination.Token == 0 {
+ req.Pagination.Token = 1
+ } else {
+ // for now only 1 page is returned
+ return &datastore.ListRegistrationEntriesResponse{
+ Entries: []*common.RegistrationEntry{},
+ Pagination: req.Pagination,
+ }, nil
+ }
+ }
+
// add the registration entries to the map
entriesSet := make(map[string]*common.RegistrationEntry)
for _, entry := range s.registrationEntries { | 1 | package fakedatastore
import (
"context"
"errors"
"fmt"
"sort"
"sync"
"github.com/golang/protobuf/proto"
_ "github.com/jinzhu/gorm/dialects/sqlite"
uuid "github.com/satori/go.uuid"
"github.com/spiffe/spire/pkg/common/bundleutil"
"github.com/spiffe/spire/pkg/common/selector"
"github.com/spiffe/spire/pkg/common/util"
"github.com/spiffe/spire/proto/common"
spi "github.com/spiffe/spire/proto/common/plugin"
"github.com/spiffe/spire/proto/server/datastore"
)
const (
selectorKeySeparator = '|'
)
var (
ErrBundleAlreadyExists = errors.New("bundle already exists")
ErrNoSuchBundle = errors.New("no such bundle")
ErrAttestedNodeAlreadyExists = errors.New("attested node entry already exists")
ErrNoSuchAttestedNode = errors.New("no such attested node entry")
ErrNoSuchRegistrationEntry = errors.New("no such registration entry")
ErrNoSuchToken = errors.New("no such token")
ErrTokenAlreadyExists = errors.New("token already exists")
)
type DataStore struct {
mu sync.Mutex
bundles map[string]*datastore.Bundle
attestedNodes map[string]*datastore.AttestedNode
nodeSelectors map[string][]*common.Selector
registrationEntries map[string]*datastore.RegistrationEntry
tokens map[string]*datastore.JoinToken
// relates bundles with entries that federate with them
bundleEntries map[string]map[string]bool
}
var _ datastore.DataStore = (*DataStore)(nil)
func New() *DataStore {
return &DataStore{
bundles: make(map[string]*datastore.Bundle),
attestedNodes: make(map[string]*datastore.AttestedNode),
nodeSelectors: make(map[string][]*common.Selector),
registrationEntries: make(map[string]*datastore.RegistrationEntry),
tokens: make(map[string]*datastore.JoinToken),
bundleEntries: make(map[string]map[string]bool),
}
}
// CreateBundle stores the given bundle
func (s *DataStore) CreateBundle(ctx context.Context, req *datastore.CreateBundleRequest) (*datastore.CreateBundleResponse, error) {
s.mu.Lock()
defer s.mu.Unlock()
bundle := req.Bundle
if _, ok := s.bundles[bundle.TrustDomainId]; ok {
return nil, ErrBundleAlreadyExists
}
s.bundles[bundle.TrustDomainId] = cloneBundle(bundle)
return &datastore.CreateBundleResponse{
Bundle: cloneBundle(bundle),
}, nil
}
// UpdateBundle updates an existing bundle with the given CAs. Overwrites any
// existing certificates.
func (s *DataStore) UpdateBundle(ctx context.Context, req *datastore.UpdateBundleRequest) (*datastore.UpdateBundleResponse, error) {
s.mu.Lock()
defer s.mu.Unlock()
bundle := req.Bundle
if _, ok := s.bundles[bundle.TrustDomainId]; !ok {
return nil, ErrNoSuchBundle
}
s.bundles[bundle.TrustDomainId] = cloneBundle(bundle)
return &datastore.UpdateBundleResponse{
Bundle: cloneBundle(bundle),
}, nil
}
// AppendBundle updates an existing bundle with the given CAs. Overwrites any
// existing certificates.
func (s *DataStore) AppendBundle(ctx context.Context, req *datastore.AppendBundleRequest) (*datastore.AppendBundleResponse, error) {
s.mu.Lock()
defer s.mu.Unlock()
bundle := req.Bundle
if existingBundle, ok := s.bundles[bundle.TrustDomainId]; ok {
bundle, _ = bundleutil.MergeBundles(existingBundle, bundle)
}
s.bundles[bundle.TrustDomainId] = cloneBundle(bundle)
return &datastore.AppendBundleResponse{
Bundle: cloneBundle(bundle),
}, nil
}
// DeleteBundle deletes the bundle with the matching TrustDomainId. Any CACert data passed is ignored.
func (s *DataStore) DeleteBundle(ctx context.Context, req *datastore.DeleteBundleRequest) (*datastore.DeleteBundleResponse, error) {
s.mu.Lock()
defer s.mu.Unlock()
bundle, ok := s.bundles[req.TrustDomainId]
if !ok {
return nil, ErrNoSuchBundle
}
if bundleEntries := s.bundleEntries[req.TrustDomainId]; len(bundleEntries) > 0 {
switch req.Mode {
case datastore.DeleteBundleRequest_DELETE:
for entryID := range bundleEntries {
delete(s.registrationEntries, entryID)
}
case datastore.DeleteBundleRequest_DISSOCIATE:
for entryID := range bundleEntries {
if entry := s.registrationEntries[entryID]; entry != nil {
entry.FederatesWith = removeString(entry.FederatesWith, req.TrustDomainId)
}
}
default:
return nil, fmt.Errorf("cannot delete bundle; federated with %d registration entries", len(bundleEntries))
}
}
delete(s.bundles, req.TrustDomainId)
return &datastore.DeleteBundleResponse{
Bundle: cloneBundle(bundle),
}, nil
}
// FetchBundle returns the bundle matching the specified Trust Domain.
func (s *DataStore) FetchBundle(ctx context.Context, req *datastore.FetchBundleRequest) (*datastore.FetchBundleResponse, error) {
s.mu.Lock()
defer s.mu.Unlock()
bundle, ok := s.bundles[req.TrustDomainId]
if !ok {
return &datastore.FetchBundleResponse{}, nil
}
return &datastore.FetchBundleResponse{
Bundle: cloneBundle(bundle),
}, nil
}
// ListBundles can be used to fetch all existing bundles.
func (s *DataStore) ListBundles(ctx context.Context, req *datastore.ListBundlesRequest) (*datastore.ListBundlesResponse, error) {
s.mu.Lock()
defer s.mu.Unlock()
// get an ordered list of keys so tests can rely on ordering for stability
keys := make([]string, 0, len(s.bundles))
for key := range s.bundles {
keys = append(keys, key)
}
sort.Strings(keys)
resp := new(datastore.ListBundlesResponse)
for _, key := range keys {
resp.Bundles = append(resp.Bundles, cloneBundle(s.bundles[key]))
}
return resp, nil
}
func (s *DataStore) CreateAttestedNode(ctx context.Context,
req *datastore.CreateAttestedNodeRequest) (*datastore.CreateAttestedNodeResponse, error) {
s.mu.Lock()
defer s.mu.Unlock()
node := req.Node
if _, ok := s.attestedNodes[node.SpiffeId]; ok {
return nil, ErrAttestedNodeAlreadyExists
}
s.attestedNodes[node.SpiffeId] = cloneAttestedNode(node)
return &datastore.CreateAttestedNodeResponse{
Node: cloneAttestedNode(node),
}, nil
}
func (s *DataStore) FetchAttestedNode(ctx context.Context,
req *datastore.FetchAttestedNodeRequest) (*datastore.FetchAttestedNodeResponse, error) {
s.mu.Lock()
defer s.mu.Unlock()
resp := new(datastore.FetchAttestedNodeResponse)
node, ok := s.attestedNodes[req.SpiffeId]
if !ok {
return resp, nil
}
resp.Node = cloneAttestedNode(node)
return resp, nil
}
func (s *DataStore) ListAttestedNodes(ctx context.Context,
req *datastore.ListAttestedNodesRequest) (*datastore.ListAttestedNodesResponse, error) {
s.mu.Lock()
defer s.mu.Unlock()
// get an ordered list of keys so tests can rely on ordering for stability
keys := make([]string, 0, len(s.attestedNodes))
for key := range s.attestedNodes {
keys = append(keys, key)
}
sort.Strings(keys)
resp := new(datastore.ListAttestedNodesResponse)
for _, key := range keys {
attestedNodeEntry := s.attestedNodes[key]
if req.ByExpiresBefore != nil {
if attestedNodeEntry.CertNotAfter >= req.ByExpiresBefore.Value {
continue
}
}
resp.Nodes = append(resp.Nodes, cloneAttestedNode(attestedNodeEntry))
}
return resp, nil
}
func (s *DataStore) UpdateAttestedNode(ctx context.Context,
req *datastore.UpdateAttestedNodeRequest) (*datastore.UpdateAttestedNodeResponse, error) {
s.mu.Lock()
defer s.mu.Unlock()
node, ok := s.attestedNodes[req.SpiffeId]
if !ok {
return nil, ErrNoSuchAttestedNode
}
node.CertSerialNumber = req.CertSerialNumber
node.CertNotAfter = req.CertNotAfter
return &datastore.UpdateAttestedNodeResponse{
Node: cloneAttestedNode(node),
}, nil
}
func (s *DataStore) DeleteAttestedNode(ctx context.Context,
req *datastore.DeleteAttestedNodeRequest) (*datastore.DeleteAttestedNodeResponse, error) {
s.mu.Lock()
defer s.mu.Unlock()
node, ok := s.attestedNodes[req.SpiffeId]
if !ok {
return nil, ErrNoSuchAttestedNode
}
delete(s.attestedNodes, req.SpiffeId)
return &datastore.DeleteAttestedNodeResponse{
Node: cloneAttestedNode(node),
}, nil
}
func (s *DataStore) SetNodeSelectors(ctx context.Context,
req *datastore.SetNodeSelectorsRequest) (*datastore.SetNodeSelectorsResponse, error) {
s.mu.Lock()
defer s.mu.Unlock()
s.nodeSelectors[req.Selectors.SpiffeId] = cloneSelectors(req.Selectors.Selectors)
return &datastore.SetNodeSelectorsResponse{}, nil
}
func (s *DataStore) GetNodeSelectors(ctx context.Context,
req *datastore.GetNodeSelectorsRequest) (*datastore.GetNodeSelectorsResponse, error) {
s.mu.Lock()
defer s.mu.Unlock()
selectors := s.nodeSelectors[req.SpiffeId]
return &datastore.GetNodeSelectorsResponse{
Selectors: &datastore.NodeSelectors{
SpiffeId: req.SpiffeId,
Selectors: cloneSelectors(selectors),
},
}, nil
}
func (s *DataStore) CreateRegistrationEntry(ctx context.Context,
req *datastore.CreateRegistrationEntryRequest) (*datastore.CreateRegistrationEntryResponse, error) {
s.mu.Lock()
defer s.mu.Unlock()
entryID := newRegistrationEntryID()
entry := cloneRegistrationEntry(req.Entry)
entry.EntryId = entryID
s.registrationEntries[entryID] = entry
if err := s.addBundleLinks(entryID, req.Entry.FederatesWith); err != nil {
return nil, err
}
return &datastore.CreateRegistrationEntryResponse{
Entry: cloneRegistrationEntry(entry),
}, nil
}
func (s *DataStore) FetchRegistrationEntry(ctx context.Context,
req *datastore.FetchRegistrationEntryRequest) (*datastore.FetchRegistrationEntryResponse, error) {
s.mu.Lock()
defer s.mu.Unlock()
resp := new(datastore.FetchRegistrationEntryResponse)
entry, ok := s.registrationEntries[req.EntryId]
if !ok {
return resp, nil
}
resp.Entry = cloneRegistrationEntry(entry)
return resp, nil
}
func (s *DataStore) ListRegistrationEntries(ctx context.Context,
req *datastore.ListRegistrationEntriesRequest) (*datastore.ListRegistrationEntriesResponse, error) {
s.mu.Lock()
defer s.mu.Unlock()
// add the registration entries to the map
entriesSet := make(map[string]*common.RegistrationEntry)
for _, entry := range s.registrationEntries {
if req.ByParentId != nil && entry.ParentId != req.ByParentId.Value {
continue
}
if req.BySpiffeId != nil && entry.SpiffeId != req.BySpiffeId.Value {
continue
}
entriesSet[entry.EntryId] = entry
}
if req.BySelectors != nil && len(req.BySelectors.Selectors) > 0 {
var selectorsList [][]*common.Selector
selectorSet := selector.NewSetFromRaw(req.BySelectors.Selectors)
switch req.BySelectors.Match {
case datastore.BySelectors_MATCH_EXACT:
selectorsList = append(selectorsList, selectorSet.Raw())
case datastore.BySelectors_MATCH_SUBSET:
for combination := range selectorSet.Power() {
selectorsList = append(selectorsList, combination.Raw())
}
default:
return nil, fmt.Errorf("unhandled match behavior %q", req.BySelectors.Match)
}
// filter entries that don't match at least one selector set
for entryID, entry := range entriesSet {
matchesOne := false
for _, selectors := range selectorsList {
if !containsSelectors(entry.Selectors, selectors) {
continue
}
if len(entry.Selectors) != len(selectors) {
continue
}
matchesOne = true
break
}
if !matchesOne {
delete(entriesSet, entryID)
}
}
}
// clone and sort entries from the set
entries := make([]*common.RegistrationEntry, 0, len(entriesSet))
for _, entry := range entriesSet {
entries = append(entries, cloneRegistrationEntry(entry))
}
util.SortRegistrationEntries(entries)
return &datastore.ListRegistrationEntriesResponse{
Entries: entries,
}, nil
}
func (s DataStore) UpdateRegistrationEntry(ctx context.Context,
req *datastore.UpdateRegistrationEntryRequest) (*datastore.UpdateRegistrationEntryResponse, error) {
s.mu.Lock()
defer s.mu.Unlock()
oldEntry, ok := s.registrationEntries[req.Entry.EntryId]
if !ok {
return nil, ErrNoSuchRegistrationEntry
}
s.removeBundleLinks(oldEntry.EntryId, oldEntry.FederatesWith)
entry := cloneRegistrationEntry(req.Entry)
s.registrationEntries[req.Entry.EntryId] = entry
if err := s.addBundleLinks(entry.EntryId, req.Entry.FederatesWith); err != nil {
return nil, err
}
return &datastore.UpdateRegistrationEntryResponse{
Entry: cloneRegistrationEntry(entry),
}, nil
}
func (s *DataStore) DeleteRegistrationEntry(ctx context.Context,
req *datastore.DeleteRegistrationEntryRequest) (*datastore.DeleteRegistrationEntryResponse, error) {
s.mu.Lock()
defer s.mu.Unlock()
registrationEntry, ok := s.registrationEntries[req.EntryId]
if !ok {
return nil, ErrNoSuchRegistrationEntry
}
delete(s.registrationEntries, req.EntryId)
s.removeBundleLinks(req.EntryId, registrationEntry.FederatesWith)
return &datastore.DeleteRegistrationEntryResponse{
Entry: cloneRegistrationEntry(registrationEntry),
}, nil
}
// CreateJoinToken takes a Token message and stores it
func (s *DataStore) CreateJoinToken(ctx context.Context, req *datastore.CreateJoinTokenRequest) (*datastore.CreateJoinTokenResponse, error) {
s.mu.Lock()
defer s.mu.Unlock()
if _, ok := s.tokens[req.JoinToken.Token]; ok {
return nil, ErrTokenAlreadyExists
}
s.tokens[req.JoinToken.Token] = cloneJoinToken(req.JoinToken)
return &datastore.CreateJoinTokenResponse{
JoinToken: cloneJoinToken(req.JoinToken),
}, nil
}
// FetchToken takes a Token message and returns one, populating the fields
// we have knowledge of
func (s *DataStore) FetchJoinToken(ctx context.Context, req *datastore.FetchJoinTokenRequest) (*datastore.FetchJoinTokenResponse, error) {
s.mu.Lock()
defer s.mu.Unlock()
joinToken, ok := s.tokens[req.Token]
if !ok {
return &datastore.FetchJoinTokenResponse{}, nil
}
return &datastore.FetchJoinTokenResponse{
JoinToken: cloneJoinToken(joinToken),
}, nil
}
func (s *DataStore) DeleteJoinToken(ctx context.Context, req *datastore.DeleteJoinTokenRequest) (*datastore.DeleteJoinTokenResponse, error) {
s.mu.Lock()
defer s.mu.Unlock()
joinToken, ok := s.tokens[req.Token]
if !ok {
return nil, ErrNoSuchToken
}
delete(s.tokens, req.Token)
return &datastore.DeleteJoinTokenResponse{
JoinToken: cloneJoinToken(joinToken),
}, nil
}
func (s *DataStore) PruneJoinTokens(ctx context.Context, req *datastore.PruneJoinTokensRequest) (*datastore.PruneJoinTokensResponse, error) {
s.mu.Lock()
defer s.mu.Unlock()
for key, token := range s.tokens {
if token.Expiry <= req.ExpiresBefore {
delete(s.tokens, key)
}
}
return &datastore.PruneJoinTokensResponse{}, nil
}
func (s *DataStore) Configure(ctx context.Context, req *spi.ConfigureRequest) (*spi.ConfigureResponse, error) {
return &spi.ConfigureResponse{}, nil
}
func (DataStore) GetPluginInfo(context.Context, *spi.GetPluginInfoRequest) (*spi.GetPluginInfoResponse, error) {
return &spi.GetPluginInfoResponse{}, nil
}
func (s *DataStore) addBundleLinks(entryID string, bundleIDs []string) error {
for _, bundleID := range bundleIDs {
if _, ok := s.bundles[bundleID]; !ok {
return ErrNoSuchBundle
}
bundleEntries := s.bundleEntries[bundleID]
if bundleEntries == nil {
bundleEntries = make(map[string]bool)
s.bundleEntries[bundleID] = bundleEntries
}
bundleEntries[entryID] = true
}
return nil
}
func (s *DataStore) removeBundleLinks(entryID string, bundleIDs []string) {
for _, bundleID := range bundleIDs {
delete(s.bundleEntries[bundleID], entryID)
}
}
func cloneBytes(bytes []byte) []byte {
return append([]byte(nil), bytes...)
}
func cloneBundle(bundle *datastore.Bundle) *datastore.Bundle {
return proto.Clone(bundle).(*datastore.Bundle)
}
func cloneAttestedNode(attestedNodeEntry *datastore.AttestedNode) *datastore.AttestedNode {
return proto.Clone(attestedNodeEntry).(*datastore.AttestedNode)
}
func cloneSelectors(selectors []*common.Selector) []*common.Selector {
return proto.Clone(&common.Selectors{Entries: selectors}).(*common.Selectors).Entries
}
func cloneRegistrationEntry(registrationEntry *datastore.RegistrationEntry) *datastore.RegistrationEntry {
return proto.Clone(registrationEntry).(*datastore.RegistrationEntry)
}
func cloneJoinToken(token *datastore.JoinToken) *datastore.JoinToken {
return proto.Clone(token).(*datastore.JoinToken)
}
func newRegistrationEntryID() string {
return uuid.NewV4().String()
}
func containsSelectors(selectors, subset []*common.Selector) bool {
nextSelector:
for _, candidate := range subset {
for _, selector := range selectors {
if candidate.Type == selector.Type && candidate.Value == selector.Value {
break nextSelector
}
}
return false
}
return true
}
func removeString(list []string, s string) []string {
out := make([]string, 0, len(list))
for _, entry := range list {
if entry != s {
out = append(out, entry)
}
}
return out
}
| 1 | 10,195 | should we implement pagination in the fake datastore so the server startup code that paginates entries for trust domain validation can be tested? | spiffe-spire | go |
@@ -15,6 +15,12 @@ func Compile(scope Scope, f *semantic.FunctionExpression, in semantic.MonoType)
return nil, errors.Newf(codes.Invalid, "function input must be an object @ %v", f.Location())
}
+ // If the function is vectorizable, `f.Vectorized` will be populated, and
+ // we should use the FunctionExpression it points to instead of `f`
+ if f.Vectorized != nil {
+ f = f.Vectorized
+ }
+
// Retrieve the function argument types and create an object type from them.
fnType := f.TypeOf()
argN, err := fnType.NumArguments() | 1 | package compiler
import (
"github.com/influxdata/flux/codes"
"github.com/influxdata/flux/internal/errors"
"github.com/influxdata/flux/semantic"
"github.com/influxdata/flux/values"
)
func Compile(scope Scope, f *semantic.FunctionExpression, in semantic.MonoType) (Func, error) {
if scope == nil {
scope = NewScope()
}
if in.Nature() != semantic.Object {
return nil, errors.Newf(codes.Invalid, "function input must be an object @ %v", f.Location())
}
// Retrieve the function argument types and create an object type from them.
fnType := f.TypeOf()
argN, err := fnType.NumArguments()
if err != nil {
return nil, err
}
// Iterate over every argument and find the equivalent
// property inside of the input.
// The function expression has a monotype that may have
// tvars contained within it. We have a realized input type
// so we can use that to construct the tvar substitutions.
// Iterate over every argument and find the equivalent
// property inside of the input and then generate the substitutions.
subst := make(map[uint64]semantic.MonoType)
for i := 0; i < argN; i++ {
arg, err := fnType.Argument(i)
if err != nil {
return nil, err
}
name := arg.Name()
argT, err := arg.TypeOf()
if err != nil {
return nil, err
}
prop, ok, err := findProperty(string(name), in)
if err != nil {
return nil, err
} else if ok {
mtyp, err := prop.TypeOf()
if err != nil {
return nil, err
}
if err := substituteTypes(subst, argT, mtyp); err != nil {
return nil, err
}
} else if !arg.Optional() {
return nil, errors.Newf(codes.Invalid, "missing required argument %q", string(name))
}
}
root, err := compile(f.Block, subst)
if err != nil {
return nil, errors.Wrapf(err, codes.Inherit, "cannot compile @ %v", f.Location())
}
return compiledFn{
root: root,
parentScope: scope,
}, nil
}
// substituteTypes will generate a substitution map by recursing through
// inType and mapping any variables to the value in the other record.
// If the input type is not a type variable, it will check to ensure
// that the type in the input matches or it will return an error.
func substituteTypes(subst map[uint64]semantic.MonoType, inferredType, actualType semantic.MonoType) error {
// If the input isn't a valid type, then don't consider it as
// part of substituting types. We will trust type inference has
// the correct type and that we are just handling a null value
// which isn't represented in type inference.
if actualType.Nature() == semantic.Invalid {
return nil
} else if inferredType.Kind() == semantic.Var {
vn, err := inferredType.VarNum()
if err != nil {
return err
}
// If this substitution variable already exists,
// we need to verify that it maps to the same type
// in the input record.
// We can do this by calling substituteTypes with the same
// input parameter and the substituted monotype since
// substituteTypes will verify the types.
if t, ok := subst[vn]; ok {
return substituteTypes(subst, t, actualType)
}
// If the input type is not invalid, mark it down
// as the real type.
if actualType.Nature() != semantic.Invalid {
subst[vn] = actualType
}
return nil
}
if inferredType.Kind() != actualType.Kind() {
return errors.Newf(codes.FailedPrecondition, "type conflict: %s != %s", inferredType, actualType)
}
switch inferredType.Kind() {
case semantic.Basic:
at, err := inferredType.Basic()
if err != nil {
return err
}
// Otherwise we have a valid type and need to ensure they match.
bt, err := actualType.Basic()
if err != nil {
return err
}
if at != bt {
return errors.Newf(codes.FailedPrecondition, "type conflict: %s != %s", inferredType, actualType)
}
return nil
case semantic.Arr:
lt, err := inferredType.ElemType()
if err != nil {
return err
}
rt, err := actualType.ElemType()
if err != nil {
return err
}
return substituteTypes(subst, lt, rt)
case semantic.Dict:
lk, err := inferredType.KeyType()
if err != nil {
return err
}
rk, err := actualType.KeyType()
if err != nil {
return err
}
if err := substituteTypes(subst, lk, rk); err != nil {
return err
}
lv, err := inferredType.ValueType()
if err != nil {
return err
}
rv, err := actualType.ValueType()
if err != nil {
return err
}
return substituteTypes(subst, lv, rv)
case semantic.Record:
// We need to compare the Record type that was inferred
// and the reality. It is ok for Record properties to exist
// in the real type that aren't in the inferred type and
// it is ok for inferred types to be missing from the actual
// input type in the case of null values.
// What isn't ok is that the two types conflict so we are
// going to iterate over all of the properties in the inferred
// type and perform substitutions on them.
nproperties, err := inferredType.NumProperties()
if err != nil {
return err
}
names := make([]string, 0, nproperties)
for i := 0; i < nproperties; i++ {
lprop, err := inferredType.RecordProperty(i)
if err != nil {
return err
}
// Record the name of the property in the input type.
name := lprop.Name()
if containsStr(names, name) {
// The input type may have the same field twice if the record was
// extended with {r with ...}
continue
}
names = append(names, name)
// Find the property in the real type if it
// exists. If it doesn't exist, then no problem!
rprop, ok, err := findProperty(name, actualType)
if err != nil {
return err
} else if !ok {
// It is ok if this property doesn't exist
// in the input type.
continue
}
ltyp, err := lprop.TypeOf()
if err != nil {
return err
}
rtyp, err := rprop.TypeOf()
if err != nil {
return err
}
if err := substituteTypes(subst, ltyp, rtyp); err != nil {
return err
}
}
// If this object extends another, then find all of the labels
// in the in value that were not referenced by the type.
if withType, ok, err := inferredType.Extends(); err != nil {
return err
} else if ok {
// Construct the input by filtering any of the names
// that were referenced above. This way, extends only
// includes the unreferenced labels.
nproperties, err := actualType.NumProperties()
if err != nil {
return err
}
properties := make([]semantic.PropertyType, 0, nproperties)
for i := 0; i < nproperties; i++ {
prop, err := actualType.RecordProperty(i)
if err != nil {
return err
}
name := prop.Name()
if containsStr(names, name) {
// Already referenced so don't pass this
// to the extends portion.
continue
}
typ, err := prop.TypeOf()
if err != nil {
return err
}
properties = append(properties, semantic.PropertyType{
Key: []byte(name),
Value: typ,
})
}
with := semantic.NewObjectType(properties)
if err := substituteTypes(subst, withType, with); err != nil {
return err
}
}
return nil
case semantic.Fun:
// TODO: https://github.com/influxdata/flux/issues/2587
return errors.New(codes.Unimplemented)
default:
return errors.Newf(codes.Internal, "unknown semantic kind: %s", inferredType)
}
}
func findProperty(name string, t semantic.MonoType) (*semantic.RecordProperty, bool, error) {
n, err := t.NumProperties()
if err != nil {
return nil, false, err
}
for i := 0; i < n; i++ {
p, err := t.RecordProperty(i)
if err != nil {
return nil, false, err
}
if p.Name() == name {
return p, true, nil
}
}
return nil, false, nil
}
// apply applies a substitution to a type.
// It will ignore any errors when reading a type.
// This is safe becase we already validated that the function type is a monotype.
func apply(sub map[uint64]semantic.MonoType, props []semantic.PropertyType, t semantic.MonoType) semantic.MonoType {
switch t.Kind() {
case semantic.Unknown, semantic.Basic:
// Basic types do not contain type variables.
// As a result there is nothing to substitute.
return t
case semantic.Var:
tv, err := t.VarNum()
if err != nil {
return t
}
ty, ok := sub[tv]
if !ok {
return t
}
return ty
case semantic.Arr:
element, err := t.ElemType()
if err != nil {
return t
}
return semantic.NewArrayType(apply(sub, props, element))
case semantic.Dict:
key, err := t.KeyType()
if err != nil {
return t
}
val, err := t.ValueType()
if err != nil {
return t
}
return semantic.NewDictType(
apply(sub, props, key),
apply(sub, props, val),
)
case semantic.Record:
n, err := t.NumProperties()
if err != nil {
return t
}
for i := 0; i < n; i++ {
pr, err := t.RecordProperty(i)
if err != nil {
return t
}
ty, err := pr.TypeOf()
if err != nil {
return t
}
props = append(props, semantic.PropertyType{
Key: []byte(pr.Name()),
Value: apply(sub, nil, ty),
})
}
r, extends, err := t.Extends()
if err != nil {
return t
}
if !extends {
return semantic.NewObjectType(props)
}
r = apply(sub, nil, r)
switch r.Kind() {
case semantic.Record:
return apply(sub, props, r)
case semantic.Var:
tv, err := r.VarNum()
if err != nil {
return t
}
return semantic.ExtendObjectType(props, &tv)
}
case semantic.Fun:
n, err := t.NumArguments()
if err != nil {
return t
}
args := make([]semantic.ArgumentType, n)
for i := 0; i < n; i++ {
arg, err := t.Argument(i)
if err != nil {
return t
}
typ, err := arg.TypeOf()
if err != nil {
return t
}
args[i] = semantic.ArgumentType{
Name: arg.Name(),
Type: apply(sub, nil, typ),
Pipe: arg.Pipe(),
Optional: arg.Optional(),
}
}
retn, err := t.ReturnType()
if err != nil {
return t
}
return semantic.NewFunctionType(apply(sub, nil, retn), args)
}
// If none of the above cases are matched, something has gone
// seriously wrong and we should panic.
panic("unknown type")
}
// compile recursively compiles semantic nodes into evaluators.
func compile(n semantic.Node, subst map[uint64]semantic.MonoType) (Evaluator, error) {
switch n := n.(type) {
case *semantic.Block:
body := make([]Evaluator, len(n.Body))
for i, s := range n.Body {
node, err := compile(s, subst)
if err != nil {
return nil, err
}
body[i] = node
}
return &blockEvaluator{
t: apply(subst, nil, n.ReturnStatement().Argument.TypeOf()),
body: body,
}, nil
case *semantic.ExpressionStatement:
return nil, errors.New(codes.Internal, "statement does nothing, side effects are not supported by the compiler")
case *semantic.ReturnStatement:
node, err := compile(n.Argument, subst)
if err != nil {
return nil, err
}
return returnEvaluator{
Evaluator: node,
}, nil
case *semantic.NativeVariableAssignment:
node, err := compile(n.Init, subst)
if err != nil {
return nil, err
}
return &declarationEvaluator{
t: apply(subst, nil, n.Init.TypeOf()),
id: n.Identifier.Name.Name(),
init: node,
}, nil
case *semantic.ObjectExpression:
properties := make(map[string]Evaluator, len(n.Properties))
for _, p := range n.Properties {
node, err := compile(p.Value, subst)
if err != nil {
return nil, err
}
properties[p.Key.Key()] = node
}
var extends *identifierEvaluator
if n.With != nil {
node, err := compile(n.With, subst)
if err != nil {
return nil, err
}
with, ok := node.(*identifierEvaluator)
if !ok {
return nil, errors.New(codes.Internal, "unknown identifier in with expression")
}
extends = with
}
return &objEvaluator{
t: apply(subst, nil, n.TypeOf()),
properties: properties,
with: extends,
}, nil
case *semantic.ArrayExpression:
var elements []Evaluator
if len(n.Elements) > 0 {
elements = make([]Evaluator, len(n.Elements))
for i, e := range n.Elements {
node, err := compile(e, subst)
if err != nil {
return nil, err
}
elements[i] = node
}
}
return &arrayEvaluator{
t: apply(subst, nil, n.TypeOf()),
array: elements,
}, nil
case *semantic.DictExpression:
elements := make([]struct {
Key Evaluator
Val Evaluator
}, len(n.Elements))
for i, item := range n.Elements {
key, err := compile(item.Key, subst)
if err != nil {
return nil, err
}
val, err := compile(item.Val, subst)
if err != nil {
return nil, err
}
elements[i] = struct {
Key Evaluator
Val Evaluator
}{Key: key, Val: val}
}
return &dictEvaluator{
t: apply(subst, nil, n.TypeOf()),
elements: elements,
}, nil
case *semantic.IdentifierExpression:
return &identifierEvaluator{
t: apply(subst, nil, n.TypeOf()),
name: n.Name.Name(),
}, nil
case *semantic.MemberExpression:
object, err := compile(n.Object, subst)
if err != nil {
return nil, err
}
t := apply(subst, nil, n.TypeOf())
return &memberEvaluator{
t: apply(subst, nil, n.TypeOf()),
object: object,
property: n.Property.Name(),
nullable: isNullable(t),
}, nil
case *semantic.IndexExpression:
arr, err := compile(n.Array, subst)
if err != nil {
return nil, err
}
idx, err := compile(n.Index, subst)
if err != nil {
return nil, err
}
return &arrayIndexEvaluator{
t: apply(subst, nil, n.TypeOf()),
array: arr,
index: idx,
}, nil
case *semantic.StringExpression:
parts := make([]Evaluator, len(n.Parts))
for i, p := range n.Parts {
e, err := compile(p, subst)
if err != nil {
return nil, err
}
parts[i] = e
}
return &stringExpressionEvaluator{
parts: parts,
}, nil
case *semantic.TextPart:
return &textEvaluator{
value: n.Value,
}, nil
case *semantic.InterpolatedPart:
e, err := compile(n.Expression, subst)
if err != nil {
return nil, err
}
return &interpolatedEvaluator{
s: e,
}, nil
case *semantic.BooleanLiteral:
return &booleanEvaluator{
b: n.Value,
}, nil
case *semantic.IntegerLiteral:
return &integerEvaluator{
i: n.Value,
}, nil
case *semantic.UnsignedIntegerLiteral:
return &unsignedIntegerEvaluator{
i: n.Value,
}, nil
case *semantic.FloatLiteral:
return &floatEvaluator{
f: n.Value,
}, nil
case *semantic.StringLiteral:
return &stringEvaluator{
s: n.Value,
}, nil
case *semantic.RegexpLiteral:
return ®expEvaluator{
r: n.Value,
}, nil
case *semantic.DateTimeLiteral:
return &timeEvaluator{
time: values.ConvertTime(n.Value),
}, nil
case *semantic.DurationLiteral:
v, err := values.FromDurationValues(n.Values)
if err != nil {
return nil, err
}
return &durationEvaluator{
duration: v,
}, nil
case *semantic.UnaryExpression:
node, err := compile(n.Argument, subst)
if err != nil {
return nil, err
}
return &unaryEvaluator{
t: apply(subst, nil, n.TypeOf()),
node: node,
op: n.Operator,
}, nil
case *semantic.LogicalExpression:
l, err := compile(n.Left, subst)
if err != nil {
return nil, err
}
r, err := compile(n.Right, subst)
if err != nil {
return nil, err
}
return &logicalEvaluator{
operator: n.Operator,
left: l,
right: r,
}, nil
case *semantic.ConditionalExpression:
test, err := compile(n.Test, subst)
if err != nil {
return nil, err
}
c, err := compile(n.Consequent, subst)
if err != nil {
return nil, err
}
a, err := compile(n.Alternate, subst)
if err != nil {
return nil, err
}
return &conditionalEvaluator{
test: test,
consequent: c,
alternate: a,
}, nil
case *semantic.BinaryExpression:
l, err := compile(n.Left, subst)
if err != nil {
return nil, err
}
lt := l.Type().Nature()
r, err := compile(n.Right, subst)
if err != nil {
return nil, err
}
rt := r.Type().Nature()
if lt == semantic.Invalid {
lt = rt
} else if rt == semantic.Invalid {
rt = lt
}
f, err := values.LookupBinaryFunction(values.BinaryFuncSignature{
Operator: n.Operator,
Left: lt,
Right: rt,
})
if err != nil {
return nil, err
}
return &binaryEvaluator{
t: apply(subst, nil, n.TypeOf()),
left: l,
right: r,
f: f,
}, nil
case *semantic.CallExpression:
args, err := compile(n.Arguments, subst)
if err != nil {
return nil, err
}
if n.Pipe != nil {
pipeArg, err := n.Callee.TypeOf().PipeArgument()
if err != nil {
return nil, err
}
if pipeArg == nil {
// This should be caught during type inference
return nil, errors.Newf(codes.Internal, "callee lacks a pipe argument, but one was provided")
}
pipe, err := compile(n.Pipe, subst)
if err != nil {
return nil, err
}
args.(*objEvaluator).properties[string(pipeArg.Name())] = pipe
}
callee, err := compile(n.Callee, subst)
if err != nil {
return nil, err
}
return &callEvaluator{
t: apply(subst, nil, n.TypeOf()),
callee: callee,
args: args,
}, nil
case *semantic.FunctionExpression:
fnType := apply(subst, nil, n.TypeOf())
num, err := fnType.NumArguments()
if err != nil {
return nil, err
}
params := make([]functionParam, 0, num)
for i := 0; i < num; i++ {
arg, err := fnType.Argument(i)
if err != nil {
return nil, err
}
k := string(arg.Name())
pt, err := arg.TypeOf()
if err != nil {
return nil, err
}
param := functionParam{
Key: k,
Type: pt,
}
if n.Defaults != nil {
// Search for default value
for _, d := range n.Defaults.Properties {
if d.Key.Key() == k {
d, err := compile(d.Value, subst)
if err != nil {
return nil, err
}
param.Default = d
break
}
}
}
params = append(params, param)
}
return &functionEvaluator{
t: fnType,
params: params,
fn: n,
}, nil
default:
return nil, errors.Newf(codes.Internal, "unknown semantic node of type %T", n)
}
}
func containsStr(strs []string, str string) bool {
for _, s := range strs {
if str == s {
return true
}
}
return false
}
// isNullable will report if the MonoType is capable of being nullable.
func isNullable(t semantic.MonoType) bool {
n := t.Nature()
return n != semantic.Array && n != semantic.Object && n != semantic.Dictionary
}
| 1 | 17,602 | What mechanism will be exposed so the caller knows they're using the vectorized version? | influxdata-flux | go |
@@ -20,9 +20,9 @@ class TopicsController < ApplicationController
@topic = build_new_topic
respond_to do |format|
if verify_recaptcha(model: @topic) && @topic.save
- format.html { redirect_to forum_path(@forum), flash: { success: t('.success') } }
+ format.html { redirect_to forum_path(@forum), notice: t('.success') }
else
- format.html { redirect_to forum_path(@forum), flash: { error: t('.error') } }
+ format.html { render :new }
end
end
end | 1 | class TopicsController < ApplicationController
include TopicsHelper
before_action :session_required, only: [:new, :create]
before_action :admin_session_required, only: [:edit, :update, :destroy]
before_action :find_forum_record, only: [:index, :new, :create]
before_action :find_forum_and_topic_records, except: [:index, :new, :create]
def index
@topics = @forum.topics
redirect_to forum_path(@forum)
end
def new
@topic = @forum.topics.build
@post = @topic.posts.build
redirect_to new_session_path unless logged_in?
end
def create
@topic = build_new_topic
respond_to do |format|
if verify_recaptcha(model: @topic) && @topic.save
format.html { redirect_to forum_path(@forum), flash: { success: t('.success') } }
else
format.html { redirect_to forum_path(@forum), flash: { error: t('.error') } }
end
end
end
def show
@posts = @topic.posts.paginate(page: params[:page], per_page: 25)
end
def update
respond_to do |format|
if @topic.update(topic_params)
format.html { redirect_to topic_path(@topic), flash: { success: t('.success') } }
else
format.html { redirect_to topic_path(@topic), flash: { error: t('.error') } }
end
end
end
def destroy
@topic.destroy
respond_to do |format|
format.html { redirect_to forums_path }
end
end
private
def find_forum_record
@forum = Forum.find_by(id: params[:forum_id])
end
def find_forum_and_topic_records
@topic = Topic.find_by(id: params[:id])
@forum = @topic.forum
end
def topic_params
params.require(:topic).permit(:forum_id, :title, :sticky,
:hits, :closed, posts_attributes: [:body])
end
def build_new_topic
topic = @forum.topics.build(topic_params)
topic.account_id = current_user.id
topic.posts.last.account_id = current_user.id
topic
end
end
| 1 | 6,956 | Can we remove respond_to block its not required here | blackducksoftware-ohloh-ui | rb |
@@ -23,12 +23,13 @@
#include "oneapi/dal/algo/jaccard.hpp"
#include "oneapi/dal/graph/service_functions.hpp"
#include "oneapi/dal/graph/undirected_adjacency_vector_graph.hpp"
-#include "oneapi/dal/io/graph_csv_data_source.hpp"
-#include "oneapi/dal/io/load_graph.hpp"
+#include "oneapi/dal/io/csv.hpp"
#include "oneapi/dal/table/homogen.hpp"
namespace dal = oneapi::dal;
+using namespace dal;
+
/// Computes Jaccard similarity coefficients for the graph. The upper triangular
/// matrix is processed only as it is symmetic for undirected graph.
/// | 1 | /*******************************************************************************
* Copyright 2020-2021 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#include <iostream>
#include "tbb/global_control.h"
#include "tbb/parallel_for.h"
#include "example_util/utils.hpp"
#include "oneapi/dal/algo/jaccard.hpp"
#include "oneapi/dal/graph/service_functions.hpp"
#include "oneapi/dal/graph/undirected_adjacency_vector_graph.hpp"
#include "oneapi/dal/io/graph_csv_data_source.hpp"
#include "oneapi/dal/io/load_graph.hpp"
#include "oneapi/dal/table/homogen.hpp"
namespace dal = oneapi::dal;
/// Computes Jaccard similarity coefficients for the graph. The upper triangular
/// matrix is processed only as it is symmetic for undirected graph.
///
/// @param [in] g The input graph
/// @param [in] block_row_count The size of block by rows
/// @param [in] block_column_count The size of block by columns
template <class Graph>
void vertex_similarity_block_processing(const Graph &g,
std::int32_t block_row_count,
std::int32_t block_column_count);
int main(int argc, char **argv) {
// load the graph
const auto filename = get_data_path("graph.csv");
const dal::preview::graph_csv_data_source ds(filename);
const dal::preview::load_graph::descriptor<> d;
const auto graph = dal::preview::load_graph::load(d, ds);
// set the block sizes for Jaccard similarity block processing
const std::int32_t block_row_count = 2;
const std::int32_t block_column_count = 5;
// set the number of threads
const std::int32_t tbb_threads_number = 4;
tbb::global_control c(tbb::global_control::max_allowed_parallelism, tbb_threads_number);
// compute Jaccard similarity coefficients for the graph
vertex_similarity_block_processing(graph, block_row_count, block_column_count);
return 0;
}
template <class Graph>
void vertex_similarity_block_processing(const Graph &g,
std::int32_t block_row_count,
std::int32_t block_column_count) {
// create caching builders for all threads
std::vector<dal::preview::jaccard::caching_builder> processing_blocks(
tbb::this_task_arena::max_concurrency());
// compute the number of vertices in graph
const std::int32_t vertex_count = dal::preview::get_vertex_count(g);
// compute the number of rows
std::int32_t row_count = vertex_count / block_row_count;
if (vertex_count % block_row_count) {
row_count++;
}
// parallel processing by rows
tbb::parallel_for(
tbb::blocked_range<std::int32_t>(0, row_count),
[&](const tbb::blocked_range<std::int32_t> &r) {
for (std::int32_t i = r.begin(); i != r.end(); ++i) {
// compute the range of rows
const std::int32_t row_range_begin = i * block_row_count;
const std::int32_t row_range_end = (i + 1) * block_row_count;
// start column ranges from diagonal
const std::int32_t column_begin = 1 + row_range_begin;
// compute the number of columns
std::int32_t column_count = (vertex_count - column_begin) / block_column_count;
if ((vertex_count - column_begin) % block_column_count) {
column_count++;
}
// parallel processing by columns
tbb::parallel_for(
tbb::blocked_range<std::int32_t>(0, column_count),
[&](const tbb::blocked_range<std::int32_t> &inner_r) {
for (std::int32_t j = inner_r.begin(); j != inner_r.end(); ++j) {
// compute the range of columns
const std::int32_t column_range_begin =
column_begin + j * block_column_count;
const std::int32_t column_range_end =
column_begin + (j + 1) * block_column_count;
// set block ranges for the vertex similarity algorithm
const auto jaccard_desc =
dal::preview::jaccard::descriptor<>().set_block(
{ row_range_begin, std::min(row_range_end, vertex_count) },
{ column_range_begin,
std::min(column_range_end, vertex_count) });
// compute Jaccard coefficients for the block
dal::preview::vertex_similarity(
jaccard_desc,
g,
processing_blocks[tbb::this_task_arena::current_thread_index()]);
// do application specific postprocessing of the result here
}
},
tbb::simple_partitioner{});
}
},
tbb::simple_partitioner{});
}
| 1 | 31,766 | Do not do that even in example, we demonstrate bad practice | oneapi-src-oneDAL | cpp |
@@ -202,7 +202,13 @@ module Beaker
#Examine the host system to determine the architecture
#@return [Boolean] true if x86_64, false otherwise
def determine_if_x86_64
- result = exec(Beaker::Command.new("arch | grep x86_64"), :acceptable_exit_codes => (0...127))
+ if self['is_cygwin'].nil? or self['is_cygwin'] == true
+ command = Beaker::Command.new("arch | grep x86_64")
+ else
+ command = Beaker::Command.new("echo '' | wmic os get osarchitecture | FindStr 64-bit")
+ end
+
+ result = exec(command, :acceptable_exit_codes => (0...127))
result.exit_code == 0
end
| 1 | require 'socket'
require 'timeout'
require 'benchmark'
[ 'command', 'ssh_connection' ].each do |lib|
require "beaker/#{lib}"
end
module Beaker
class Host
SELECT_TIMEOUT = 30
class CommandFailure < StandardError; end
# This class provides array syntax for using puppet --configprint on a host
class PuppetConfigReader
def initialize(host, command)
@host = host
@command = command
end
def [](k)
cmd = PuppetCommand.new(@command, "--configprint #{k.to_s}")
@host.exec(cmd).stdout.strip
end
end
def self.create name, options
case options['HOSTS'][name]['platform']
when /windows/
cygwin = options['HOSTS'][name]['is_cygwin']
if cygwin.nil? or cygwin == true
Windows::Host.new name, options
else
PSWindows::Host.new name, options
end
when /aix/
Aix::Host.new name, options
when /osx/
Mac::Host.new name, options
else
Unix::Host.new name, options
end
end
attr_accessor :logger
attr_reader :name, :defaults
def initialize name, options
@logger = options[:logger]
@name, @options = name.to_s, options.dup
# This is annoying and its because of drift/lack of enforcement/lack of having
# a explict relationship between our defaults, our setup steps and how they're
# related through 'type' and the differences between the assumption of our two
# configurations we have for many of our products
type = @options.get_type
@defaults = merge_defaults_for_type @options, type
pkg_initialize
end
def pkg_initialize
# This method should be overridden by platform-specific code to
# handle whatever packaging-related initialization is necessary.
end
def merge_defaults_for_type options, type
defaults = self.class.send "#{type}_defaults".to_sym
defaults.merge(options.merge((options['HOSTS'][name])))
end
def node_name
# TODO: might want to consider caching here; not doing it for now because
# I haven't thought through all of the possible scenarios that could
# cause the value to change after it had been cached.
result = puppet['node_name_value'].strip
end
def port_open? port
begin
Timeout.timeout SELECT_TIMEOUT do
TCPSocket.new(reachable_name, port).close
return true
end
rescue Errno::ECONNREFUSED, Timeout::Error
return false
end
end
def up?
begin
Socket.getaddrinfo( reachable_name, nil )
return true
rescue SocketError
return false
end
end
# Return the preferred method to reach the host, will use IP is available and then default to {#hostname}.
def reachable_name
self['ip'] || hostname
end
# Returning our PuppetConfigReader here allows users of the Host
# class to do things like `host.puppet['vardir']` to query the
# 'main' section or, if they want the configuration for a
# particular run type, `host.puppet('agent')['vardir']`
def puppet(command='agent')
PuppetConfigReader.new(self, command)
end
def []= k, v
@defaults[k] = v
end
def [] k
@defaults[k]
end
def has_key? k
@defaults.has_key?(k)
end
# The {#hostname} of this host.
def to_str
hostname
end
# The {#hostname} of this host.
def to_s
hostname
end
# Return the public name of the particular host, which may be different then the name of the host provided in
# the configuration file as some provisioners create random, unique hostnames.
def hostname
@defaults['vmhostname'] || @name
end
def + other
@name + other
end
def is_pe?
@options.is_pe?
end
# True if this is a pe run, or if the host has had a 'use-service' property set.
def use_service_scripts?
is_pe? || self['use-service']
end
# Mirrors the true/false value of the host's 'graceful-restarts' property,
# or falls back to the value of +is_using_passenger?+ if
# 'graceful-restarts' is nil, but only if this is not a PE run (foss only).
def graceful_restarts?
graceful =
if !self['graceful-restarts'].nil?
self['graceful-restarts']
else
!is_pe? && is_using_passenger?
end
graceful
end
# Modifies the host settings to indicate that it will be using passenger service scripts,
# (apache2) by default. Does nothing if this is a PE host, since it is already using
# passenger.
# @param [String] puppetservice Name of the service script that should be
# called to stop/startPuppet on this host. Defaults to 'apache2'.
def uses_passenger!(puppetservice = 'apache2')
if !is_pe?
self['passenger'] = true
self['puppetservice'] = puppetservice
self['use-service'] = true
end
return true
end
# True if this is a PE run, or if the host's 'passenger' property has been set.
def is_using_passenger?
is_pe? || self['passenger']
end
def log_prefix
if @defaults['vmhostname']
"#{self} (#{@name})"
else
self.to_s
end
end
#Determine the ip address of this host
def get_ip
@logger.warn("Uh oh, this should be handled by sub-classes but hasn't been")
end
#Return the ip address of this host
def ip
self[:ip] ||= get_ip
end
#Examine the host system to determine the architecture
#@return [Boolean] true if x86_64, false otherwise
def determine_if_x86_64
result = exec(Beaker::Command.new("arch | grep x86_64"), :acceptable_exit_codes => (0...127))
result.exit_code == 0
end
#@return [Boolean] true if x86_64, false otherwise
def is_x86_64?
@x86_64 ||= determine_if_x86_64
end
#Add the provided key/val to the current ssh environment
#@param [String] key The key to add the value to
#@param [String] val The value for the key
#@example
# host.add_env_var('PATH', '/usr/bin:PATH')
def add_env_var key, val
key = key.to_s.upcase
escaped_val = Regexp.escape(val).gsub('/', '\/').gsub(';', '\;')
env_file = self[:ssh_env_file]
#see if the key/value pair already exists
if exec(Beaker::Command.new("grep -e #{key}=.*#{escaped_val} #{env_file}"), :acceptable_exit_codes => (0..255) ).exit_code == 0
return #nothing to do here, key value pair already exists
#see if the key already exists
elsif exec(Beaker::Command.new("grep #{key} #{env_file}"), :acceptable_exit_codes => (0..255) ).exit_code == 0
exec(Beaker::SedCommand.new(self['HOSTS'][name]['platform'], "s/#{key}=/#{key}=#{escaped_val}:/", env_file))
else
exec(Beaker::Command.new("echo \"#{key}=#{val}\" >> #{env_file}"))
end
end
#Delete the provided key/val from the current ssh environment
#@param [String] key The key to delete the value from
#@param [String] val The value to delete for the key
#@example
# host.delete_env_var('PATH', '/usr/bin:PATH')
def delete_env_var key, val
val = Regexp.escape(val).gsub('/', '\/').gsub(';', '\;')
#if the key only has that single value remove the entire line
exec(Beaker::SedCommand.new(self['HOSTS'][name]['platform'], "/#{key}=#{val}$/d", self[:ssh_env_file]))
#if the key has multiple values and we only need to remove the provided val
exec(Beaker::SedCommand.new(self['HOSTS'][name]['platform'], "s/#{key}=\\(.*[:;]*\\)#{val}[:;]*/#{key}=\\1/", self[:ssh_env_file]))
end
def connection
@connection ||= SshConnection.connect( reachable_name,
self['user'],
self['ssh'], { :logger => @logger } )
end
def close
@connection.close if @connection
@connection = nil
end
def exec command, options={}
# I've always found this confusing
cmdline = command.cmd_line(self)
if options[:silent]
output_callback = nil
else
@logger.debug "\n#{log_prefix} #{Time.new.strftime('%H:%M:%S')}$ #{cmdline}"
output_callback = logger.method(:host_output)
end
unless $dry_run
# is this returning a result object?
# the options should come at the end of the method signature (rubyism)
# and they shouldn't be ssh specific
result = nil
seconds = Benchmark.realtime {
result = connection.execute(cmdline, options, output_callback)
}
if not options[:silent]
@logger.debug "\n#{log_prefix} executed in %0.2f seconds" % seconds
end
unless options[:silent]
# What?
result.log(@logger)
# No, TestCase has the knowledge about whether its failed, checking acceptable
# exit codes at the host level and then raising...
# is it necessary to break execution??
if !options[:accept_all_exit_codes] && !result.exit_code_in?(Array(options[:acceptable_exit_codes] || 0))
raise CommandFailure, "Host '#{self}' exited with #{result.exit_code} running:\n #{cmdline}\nLast #{@options[:trace_limit]} lines of output were:\n#{result.formatted_output(@options[:trace_limit])}"
end
end
# Danger, so we have to return this result?
result
end
end
# Create the provided directory structure on the host
# @param [String] dir The directory structure to create on the host
# @return [Boolean] True, if directory construction succeeded, otherwise False
def mkdir_p dir
if self['is_cygwin'].nil? or self['is_cygwin'] == true
cmd = "mkdir -p #{dir}"
else
cmd = "if not exist #{dir.gsub!('/','\\')} (md #{dir.gsub!('/','\\')})"
end
result = exec(Beaker::Command.new(cmd), :acceptable_exit_codes => [0, 1])
result.exit_code == 0
end
# scp files from the localhost to this test host, if a directory is provided it is recursively copied
# @param source [String] The path to the file/dir to upload
# @param target [String] The destination path on the host
# @param options [Hash{Symbol=>String}] Options to alter execution
# @option options [Array<String>] :ignore An array of file/dir paths that will not be copied to the host
def do_scp_to source, target, options
@logger.notify "localhost $ scp #{source} #{@name}:#{target} {:ignore => #{options[:ignore]}}"
result = Result.new(@name, [source, target])
has_ignore = options[:ignore] and not options[:ignore].empty?
# construct the regex for matching ignored files/dirs
ignore_re = nil
if has_ignore
ignore_arr = Array(options[:ignore]).map do |entry|
"((\/|\\A)#{entry}(\/|\\z))".gsub(/\./, '\.')
end
ignore_re = Regexp.new(ignore_arr.join('|'))
end
# either a single file, or a directory with no ignores
if not File.file?(source) and not File.directory?(source)
raise IOError, "No such file or directory - #{source}"
end
if File.file?(source) or (File.directory?(source) and not has_ignore)
source_file = source
if has_ignore and (source =~ ignore_re)
@logger.trace "After rejecting ignored files/dirs, there is no file to copy"
source_file = nil
result.stdout = "No files to copy"
result.exit_code = 1
end
if source_file
result = connection.scp_to(source_file, target, options, $dry_run)
@logger.trace result.stdout
end
else # a directory with ignores
dir_source = Dir.glob("#{source}/**/*").reject do |f|
f =~ ignore_re
end
@logger.trace "After rejecting ignored files/dirs, going to scp [#{dir_source.join(", ")}]"
# create necessary directory structure on host
# run this quietly (no STDOUT)
@logger.quiet(true)
required_dirs = (dir_source.map{ | dir | File.dirname(dir) }).uniq
require 'pathname'
required_dirs.each do |dir|
dir_path = Pathname.new(dir)
if dir_path.absolute?
mkdir_p(File.join(target, dir.gsub(source, '')))
else
mkdir_p( File.join(target, dir) )
end
end
@logger.quiet(false)
# copy each file to the host
dir_source.each do |s|
s_path = Pathname.new(s)
if s_path.absolute?
file_path = File.join(target, File.dirname(s).gsub(source,''))
else
file_path = File.join(target, File.dirname(s))
end
result = connection.scp_to(s, file_path, options, $dry_run)
@logger.trace result.stdout
end
end
return result
end
def do_scp_from source, target, options
@logger.debug "localhost $ scp #{@name}:#{source} #{target}"
result = connection.scp_from(source, target, options, $dry_run)
@logger.debug result.stdout
return result
end
end
[ 'windows', 'pswindows', 'unix', 'aix', 'mac' ].each do |lib|
require "beaker/host/#{lib}"
end
end
| 1 | 8,792 | Hm, taking a second look over this, this is why we have the object inheritance structure that we do. This could be divided up by having a custom determine_if_x86_64 in the pswindows/exec hosts and then a default method in hosts.rb. That way all the custom ps windows work is in a single location. | voxpupuli-beaker | rb |
@@ -84,11 +84,13 @@ namespace Microsoft.AspNetCore.Server.Kestrel.Core.Tests
[InlineData(null, new byte[0])]
public void EncodesAsAscii(string input, byte[] expected)
{
- var writerBuffer = _pipe.Writer;
- var writer = new BufferWriter<PipeWriter>(writerBuffer);
+ var pipeWriter = _pipe.Writer;
+ var writer = new BufferWriter<PipeWriter>(pipeWriter);
writer.WriteAsciiNoValidation(input);
writer.Commit();
- writerBuffer.FlushAsync().GetAwaiter().GetResult();
+ pipeWriter.FlushAsync().GetAwaiter().GetResult();
+ pipeWriter.Complete();
+
var reader = _pipe.Reader.ReadAsync().GetAwaiter().GetResult();
if (expected.Length > 0) | 1 | // Copyright (c) .NET Foundation. All rights reserved.
// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System;
using System.Buffers;
using System.IO.Pipelines;
using System.Text;
using Microsoft.AspNetCore.Server.Kestrel.Core.Internal.Http;
using Microsoft.AspNetCore.Server.Kestrel.Transport.Abstractions.Internal;
using Xunit;
namespace Microsoft.AspNetCore.Server.Kestrel.Core.Tests
{
public class PipelineExtensionTests : IDisposable
{
// ulong.MaxValue.ToString().Length
private const int _ulongMaxValueLength = 20;
private readonly Pipe _pipe;
private readonly MemoryPool<byte> _memoryPool = KestrelMemoryPool.Create();
public PipelineExtensionTests()
{
_pipe = new Pipe(new PipeOptions(_memoryPool, readerScheduler: PipeScheduler.Inline, writerScheduler: PipeScheduler.Inline, useSynchronizationContext: false));
}
public void Dispose()
{
_memoryPool.Dispose();
}
[Theory]
[InlineData(ulong.MinValue)]
[InlineData(ulong.MaxValue)]
[InlineData(4_8_15_16_23_42)]
public void WritesNumericToAscii(ulong number)
{
var writerBuffer = _pipe.Writer;
var writer = new BufferWriter<PipeWriter>(writerBuffer);
writer.WriteNumeric(number);
writer.Commit();
writerBuffer.FlushAsync().GetAwaiter().GetResult();
var reader = _pipe.Reader.ReadAsync().GetAwaiter().GetResult();
var numAsStr = number.ToString();
var expected = Encoding.ASCII.GetBytes(numAsStr);
AssertExtensions.Equal(expected, reader.Buffer.Slice(0, numAsStr.Length).ToArray());
}
[Theory]
[InlineData(1)]
[InlineData(_ulongMaxValueLength / 2)]
[InlineData(_ulongMaxValueLength - 1)]
public void WritesNumericAcrossSpanBoundaries(int gapSize)
{
var writerBuffer = _pipe.Writer;
var writer = new BufferWriter<PipeWriter>(writerBuffer);
// almost fill up the first block
var spacer = new byte[writer.Span.Length - gapSize];
writer.Write(spacer);
var bufferLength = writer.Span.Length;
writer.WriteNumeric(ulong.MaxValue);
Assert.NotEqual(bufferLength, writer.Span.Length);
writer.Commit();
writerBuffer.FlushAsync().GetAwaiter().GetResult();
var reader = _pipe.Reader.ReadAsync().GetAwaiter().GetResult();
var numAsString = ulong.MaxValue.ToString();
var written = reader.Buffer.Slice(spacer.Length, numAsString.Length);
Assert.False(written.IsSingleSegment, "The buffer should cross spans");
AssertExtensions.Equal(Encoding.ASCII.GetBytes(numAsString), written.ToArray());
}
[Theory]
[InlineData("\0abcxyz", new byte[] { 0, 97, 98, 99, 120, 121, 122 })]
[InlineData("!#$%i", new byte[] { 33, 35, 36, 37, 105 })]
[InlineData("!#$%", new byte[] { 33, 35, 36, 37 })]
[InlineData("!#$", new byte[] { 33, 35, 36 })]
[InlineData("!#", new byte[] { 33, 35 })]
[InlineData("!", new byte[] { 33 })]
// null or empty
[InlineData("", new byte[0])]
[InlineData(null, new byte[0])]
public void EncodesAsAscii(string input, byte[] expected)
{
var writerBuffer = _pipe.Writer;
var writer = new BufferWriter<PipeWriter>(writerBuffer);
writer.WriteAsciiNoValidation(input);
writer.Commit();
writerBuffer.FlushAsync().GetAwaiter().GetResult();
var reader = _pipe.Reader.ReadAsync().GetAwaiter().GetResult();
if (expected.Length > 0)
{
AssertExtensions.Equal(
expected,
reader.Buffer.ToArray());
}
else
{
Assert.Equal(0, reader.Buffer.Length);
}
}
[Theory]
// non-ascii characters stored in 32 bits
[InlineData("𤭢𐐝")]
// non-ascii characters stored in 16 bits
[InlineData("ñ٢⛄⛵")]
public void WriteAsciiNoValidationWritesOnlyOneBytePerChar(string input)
{
// WriteAscii doesn't validate if characters are in the ASCII range
// but it shouldn't produce more than one byte per character
var writerBuffer = _pipe.Writer;
var writer = new BufferWriter<PipeWriter>(writerBuffer);
writer.WriteAsciiNoValidation(input);
writer.Commit();
writerBuffer.FlushAsync().GetAwaiter().GetResult();
var reader = _pipe.Reader.ReadAsync().GetAwaiter().GetResult();
Assert.Equal(input.Length, reader.Buffer.Length);
}
[Fact]
public void WriteAsciiNoValidation()
{
const byte maxAscii = 0x7f;
var writerBuffer = _pipe.Writer;
var writer = new BufferWriter<PipeWriter>(writerBuffer);
for (var i = 0; i < maxAscii; i++)
{
writer.WriteAsciiNoValidation(new string((char)i, 1));
}
writer.Commit();
writerBuffer.FlushAsync().GetAwaiter().GetResult();
var reader = _pipe.Reader.ReadAsync().GetAwaiter().GetResult();
var data = reader.Buffer.Slice(0, maxAscii).ToArray();
for (var i = 0; i < maxAscii; i++)
{
Assert.Equal(i, data[i]);
}
}
[Theory]
[InlineData(2, 1)]
[InlineData(3, 1)]
[InlineData(4, 2)]
[InlineData(5, 3)]
[InlineData(7, 4)]
[InlineData(8, 3)]
[InlineData(8, 4)]
[InlineData(8, 5)]
[InlineData(100, 48)]
public void WritesAsciiAcrossBlockBoundaries(int stringLength, int gapSize)
{
var testString = new string(' ', stringLength);
var writerBuffer = _pipe.Writer;
var writer = new BufferWriter<PipeWriter>(writerBuffer);
// almost fill up the first block
var spacer = new byte[writer.Span.Length - gapSize];
writer.Write(spacer);
Assert.Equal(gapSize, writer.Span.Length);
var bufferLength = writer.Span.Length;
writer.WriteAsciiNoValidation(testString);
Assert.NotEqual(bufferLength, writer.Span.Length);
writer.Commit();
writerBuffer.FlushAsync().GetAwaiter().GetResult();
var reader = _pipe.Reader.ReadAsync().GetAwaiter().GetResult();
var written = reader.Buffer.Slice(spacer.Length, stringLength);
Assert.False(written.IsSingleSegment, "The buffer should cross spans");
AssertExtensions.Equal(Encoding.ASCII.GetBytes(testString), written.ToArray());
}
}
}
| 1 | 15,377 | We have to `.Complete` now because of empty/null string test cases. | aspnet-KestrelHttpServer | .cs |
@@ -166,7 +166,7 @@ module Beaker
def scp_to source, target, options = {}, dry_run = false
return if dry_run
- options[:recursive] = File.directory?(source) if options[:recursive].nil?
+ options[:recursive] = File.directory?(source)
options[:chunk_size] = options[:chunk_size] || 16384
result = Result.new(@hostname, [source, target]) | 1 | require 'socket'
require 'timeout'
require 'net/scp'
module Beaker
class SshConnection
attr_accessor :logger
RETRYABLE_EXCEPTIONS = [
SocketError,
Timeout::Error,
Errno::ETIMEDOUT,
Errno::EHOSTDOWN,
Errno::EHOSTUNREACH,
Errno::ECONNREFUSED,
Errno::ECONNRESET,
Errno::ENETUNREACH,
Net::SSH::Disconnect,
Net::SSH::AuthenticationFailed,
]
def initialize hostname, user = nil, ssh_opts = {}, options = {}
@hostname = hostname
@user = user
@ssh_opts = ssh_opts
@logger = options[:logger]
end
def self.connect hostname, user = 'root', ssh_opts = {}, options = {}
connection = new hostname, user, ssh_opts, options
connection.connect
connection
end
def connect
try = 1
last_wait = 0
wait = 1
@ssh ||= begin
Net::SSH.start(@hostname, @user, @ssh_opts)
rescue *RETRYABLE_EXCEPTIONS => e
if try <= 11
@logger.warn "Try #{try} -- Host #{@hostname} unreachable: #{e.message}"
@logger.warn "Trying again in #{wait} seconds"
sleep wait
(last_wait, wait) = wait, last_wait + wait
try += 1
retry
else
# why is the logger not passed into this class?
@logger.error "Failed to connect to #{@hostname}"
raise
end
end
@logger.debug "Created ssh connection to #{@hostname}, user: #{@user}, opts: #{@ssh_opts}"
self
end
def close
begin
@ssh.close if @ssh
rescue
@ssh.shutdown!
end
@ssh = nil
end
def try_to_execute command, options = {}, stdout_callback = nil,
stderr_callback = stdout_callback
result = Result.new(@hostname, command)
# why are we getting to this point on a dry run anyways?
# also... the host creates connections through the class method,
# which automatically connects, so you can't do a dry run unless you also
# can connect to your hosts?
return result if options[:dry_run]
@ssh.open_channel do |channel|
request_terminal_for( channel, command ) if options[:pty]
channel.exec(command) do |terminal, success|
abort "FAILED: to execute command on a new channel on #{@hostname}" unless success
register_stdout_for terminal, result, stdout_callback
register_stderr_for terminal, result, stderr_callback
register_exit_code_for terminal, result
process_stdin_for( terminal, options[:stdin] ) if options[:stdin]
end
end
# Process SSH activity until we stop doing that - which is when our
# channel is finished with...
@ssh.loop
result.finalize!
@logger.last_result = result
result
end
def execute command, options = {}, stdout_callback = nil,
stderr_callback = stdout_callback
attempt = true
begin
result = try_to_execute(command, options, stdout_callback, stderr_callback)
rescue *RETRYABLE_EXCEPTIONS => e
if attempt
attempt = false
@logger.error "Command execution failed, attempting to reconnect to #{@hostname}"
close
connect
retry
else
raise
end
end
result
end
def request_terminal_for channel, command
channel.request_pty do |ch, success|
if success
@logger.info "Allocated a PTY on #{@hostname} for #{command.inspect}"
else
abort "FAILED: could not allocate a pty when requested on " +
"#{@hostname} for #{command.inspect}"
end
end
end
def register_stdout_for channel, output, callback = nil
channel.on_data do |ch, data|
callback[data] if callback
output.stdout << data
output.output << data
end
end
def register_stderr_for channel, output, callback = nil
channel.on_extended_data do |ch, type, data|
if type == 1
callback[data] if callback
output.stderr << data
output.output << data
end
end
end
def register_exit_code_for channel, output
channel.on_request("exit-status") do |ch, data|
output.exit_code = data.read_long
end
end
def process_stdin_for channel, stdin
# queue stdin data, force it to packets, and signal eof: this
# triggers action in many remote commands, notably including
# 'puppet apply'. It must be sent at some point before the rest
# of the action.
channel.send_data stdin.to_s
channel.process
channel.eof!
end
def scp_to source, target, options = {}, dry_run = false
return if dry_run
options[:recursive] = File.directory?(source) if options[:recursive].nil?
options[:chunk_size] = options[:chunk_size] || 16384
result = Result.new(@hostname, [source, target])
result.stdout = "\n"
@ssh.scp.upload! source, target, options do |ch, name, sent, total|
result.stdout << "\tcopying %s: %10d/%d\n" % [name, sent, total]
end
# Setting these values allows reporting via result.log(test_name)
result.stdout << " SCP'ed file #{source} to #{@hostname}:#{target}"
# Net::Scp always returns 0, so just set the return code to 0.
result.exit_code = 0
result.finalize!
return result
end
def scp_from source, target, options = {}, dry_run = false
return if dry_run
options[:recursive] = true if options[:recursive].nil?
options[:chunk_size] = options[:chunk_size] || 16384
result = Result.new(@hostname, [source, target])
result.stdout = "\n"
@ssh.scp.download! source, target, options do |ch, name, sent, total|
result.stdout << "\tcopying %s: %10d/%d\n" % [name, sent, total]
end
# Setting these values allows reporting via result.log(test_name)
result.stdout << " SCP'ed file #{@hostname}:#{source} to #{target}"
# Net::Scp always returns 0, so just set the return code to 0.
result.exit_code = 0
result.finalize!
result
end
end
end
| 1 | 7,292 | Is there still a way to specify no recursion? | voxpupuli-beaker | rb |
@@ -438,6 +438,15 @@ Loop:
return w, nil
}
+// Tool is used to communicate the tool's name ot the user.
+type Tool struct {
+
+ // HumanReadableName is used for error messages, for example: "image import".
+ HumanReadableName string
+ // URISafeName is used programmatically, eg: "image-import"
+ URISafeName string
+}
+
// EnvironmentSettings controls the resources that are used during tool execution.
type EnvironmentSettings struct {
// Location of workflows | 1 | // Copyright 2018 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package daisyutils
import (
"context"
"errors"
"fmt"
"os"
"os/signal"
"regexp"
"sort"
"strings"
stringutils "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/common/utils/string"
"github.com/GoogleCloudPlatform/compute-image-tools/daisy"
)
const (
// BuildIDOSEnvVarName is the os env var name to get build id
BuildIDOSEnvVarName = "BUILD_ID"
translateFailedPrefix = "TranslateFailed"
)
// TranslationSettings includes information that needs to be added to a disk or image after it is imported,
// for a particular OS and version.
type TranslationSettings struct {
// GcloudOsFlag is the user-facing string corresponding to this OS, version, and licensing mode.
// It is passed as a value of the `--os` flag.
GcloudOsFlag string
// LicenseURI is the GCP Compute license corresponding to this OS, version, and licensing mode:
// https://cloud.google.com/compute/docs/reference/rest/v1/licenses
LicenseURI string
// WorkflowPath is the path to a Daisy json workflow, relative to the
// `daisy_workflows/image_import` directory.
WorkflowPath string
}
var (
supportedOS = []TranslationSettings{
// Enterprise Linux
{
GcloudOsFlag: "centos-7",
WorkflowPath: "enterprise_linux/translate_centos_7.wf.json",
LicenseURI: "projects/centos-cloud/global/licenses/centos-7",
}, {
GcloudOsFlag: "centos-8",
WorkflowPath: "enterprise_linux/translate_centos_8.wf.json",
LicenseURI: "projects/centos-cloud/global/licenses/centos-8",
}, {
GcloudOsFlag: "rhel-6",
WorkflowPath: "enterprise_linux/translate_rhel_6_licensed.wf.json",
LicenseURI: "projects/rhel-cloud/global/licenses/rhel-6-server",
}, {
GcloudOsFlag: "rhel-6-byol",
WorkflowPath: "enterprise_linux/translate_rhel_6_byol.wf.json",
LicenseURI: "projects/rhel-cloud/global/licenses/rhel-6-byol",
}, {
GcloudOsFlag: "rhel-7",
WorkflowPath: "enterprise_linux/translate_rhel_7_licensed.wf.json",
LicenseURI: "projects/rhel-cloud/global/licenses/rhel-7-server",
}, {
GcloudOsFlag: "rhel-7-byol",
WorkflowPath: "enterprise_linux/translate_rhel_7_byol.wf.json",
LicenseURI: "projects/rhel-cloud/global/licenses/rhel-7-byol",
}, {
GcloudOsFlag: "rhel-8",
WorkflowPath: "enterprise_linux/translate_rhel_8_licensed.wf.json",
LicenseURI: "projects/rhel-cloud/global/licenses/rhel-8-server",
}, {
GcloudOsFlag: "rhel-8-byol",
WorkflowPath: "enterprise_linux/translate_rhel_8_byol.wf.json",
LicenseURI: "projects/rhel-cloud/global/licenses/rhel-8-byos",
}, {
GcloudOsFlag: "rocky-8",
WorkflowPath: "enterprise_linux/translate_rocky_8.wf.json",
LicenseURI: "projects/rocky-linux-cloud/global/licenses/rocky-linux-8",
},
// SUSE
{
GcloudOsFlag: "opensuse-15",
WorkflowPath: "suse/translate_opensuse_15.wf.json",
LicenseURI: "projects/opensuse-cloud/global/licenses/opensuse-leap-42",
}, {
GcloudOsFlag: "sles-12",
WorkflowPath: "suse/translate_sles_12.wf.json",
LicenseURI: "projects/suse-cloud/global/licenses/sles-12",
}, {
GcloudOsFlag: "sles-12-byol",
WorkflowPath: "suse/translate_sles_12_byol.wf.json",
LicenseURI: "projects/suse-byos-cloud/global/licenses/sles-12-byos",
}, {
GcloudOsFlag: "sles-sap-12",
WorkflowPath: "suse/translate_sles_sap_12.wf.json",
LicenseURI: "projects/suse-sap-cloud/global/licenses/sles-sap-12",
}, {
GcloudOsFlag: "sles-sap-12-byol",
WorkflowPath: "suse/translate_sles_sap_12_byol.wf.json",
LicenseURI: "projects/suse-byos-cloud/global/licenses/sles-sap-12-byos",
}, {
GcloudOsFlag: "sles-15",
WorkflowPath: "suse/translate_sles_15.wf.json",
LicenseURI: "projects/suse-cloud/global/licenses/sles-15",
}, {
GcloudOsFlag: "sles-15-byol",
WorkflowPath: "suse/translate_sles_15_byol.wf.json",
LicenseURI: "projects/suse-byos-cloud/global/licenses/sles-15-byos",
}, {
GcloudOsFlag: "sles-sap-15",
WorkflowPath: "suse/translate_sles_sap_15.wf.json",
LicenseURI: "projects/suse-sap-cloud/global/licenses/sles-sap-15",
}, {
GcloudOsFlag: "sles-sap-15-byol",
WorkflowPath: "suse/translate_sles_sap_15_byol.wf.json",
LicenseURI: "projects/suse-byos-cloud/global/licenses/sles-sap-15-byos",
},
// Debian
{
GcloudOsFlag: "debian-8",
WorkflowPath: "debian/translate_debian_8.wf.json",
LicenseURI: "projects/debian-cloud/global/licenses/debian-8-jessie",
}, {
GcloudOsFlag: "debian-9",
WorkflowPath: "debian/translate_debian_9.wf.json",
LicenseURI: "projects/debian-cloud/global/licenses/debian-9-stretch",
}, {
GcloudOsFlag: "debian-10",
WorkflowPath: "debian/translate_debian_10.wf.json",
LicenseURI: "projects/debian-cloud/global/licenses/debian-10-buster",
}, {
GcloudOsFlag: "debian-11",
WorkflowPath: "debian/translate_debian_11.wf.json",
LicenseURI: "projects/debian-cloud/global/licenses/debian-11-bullseye",
},
// Ubuntu
{
GcloudOsFlag: "ubuntu-1404",
WorkflowPath: "ubuntu/translate_ubuntu_1404.wf.json",
LicenseURI: "projects/ubuntu-os-cloud/global/licenses/ubuntu-1404-trusty",
}, {
GcloudOsFlag: "ubuntu-1604",
WorkflowPath: "ubuntu/translate_ubuntu_1604.wf.json",
LicenseURI: "projects/ubuntu-os-cloud/global/licenses/ubuntu-1604-xenial",
}, {
GcloudOsFlag: "ubuntu-1804",
WorkflowPath: "ubuntu/translate_ubuntu_1804.wf.json",
LicenseURI: "projects/ubuntu-os-cloud/global/licenses/ubuntu-1804-lts",
}, {
GcloudOsFlag: "ubuntu-2004",
WorkflowPath: "ubuntu/translate_ubuntu_2004.wf.json",
LicenseURI: "projects/ubuntu-os-cloud/global/licenses/ubuntu-2004-lts",
},
// Windows
{
GcloudOsFlag: "windows-7-x64-byol",
WorkflowPath: "windows/translate_windows_7_x64_byol.wf.json",
LicenseURI: "projects/windows-cloud/global/licenses/windows-7-x64-byol",
}, {
GcloudOsFlag: "windows-7-x86-byol",
WorkflowPath: "windows/translate_windows_7_x86_byol.wf.json",
LicenseURI: "projects/windows-cloud/global/licenses/windows-7-x86-byol",
}, {
GcloudOsFlag: "windows-8-x64-byol",
WorkflowPath: "windows/translate_windows_8_x64_byol.wf.json",
LicenseURI: "projects/windows-cloud/global/licenses/windows-8-x64-byol",
}, {
GcloudOsFlag: "windows-8-x86-byol",
WorkflowPath: "windows/translate_windows_8_x86_byol.wf.json",
LicenseURI: "projects/windows-cloud/global/licenses/windows-8-x86-byol",
}, {
GcloudOsFlag: "windows-10-x64-byol",
WorkflowPath: "windows/translate_windows_10_x64_byol.wf.json",
LicenseURI: "projects/windows-cloud/global/licenses/windows-10-x64-byol",
}, {
GcloudOsFlag: "windows-10-x86-byol",
WorkflowPath: "windows/translate_windows_10_x86_byol.wf.json",
LicenseURI: "projects/windows-cloud/global/licenses/windows-10-x86-byol",
}, {
GcloudOsFlag: "windows-2008r2",
WorkflowPath: "windows/translate_windows_2008_r2.wf.json",
LicenseURI: "projects/windows-cloud/global/licenses/windows-server-2008-r2-dc",
}, {
GcloudOsFlag: "windows-2008r2-byol",
WorkflowPath: "windows/translate_windows_2008_r2_byol.wf.json",
LicenseURI: "projects/windows-cloud/global/licenses/windows-server-2008-r2-byol",
}, {
GcloudOsFlag: "windows-2012",
WorkflowPath: "windows/translate_windows_2012.wf.json",
LicenseURI: "projects/windows-cloud/global/licenses/windows-server-2012-dc",
}, {
GcloudOsFlag: "windows-2012-byol",
WorkflowPath: "windows/translate_windows_2012_byol.wf.json",
LicenseURI: "projects/windows-cloud/global/licenses/windows-server-2012-byol",
}, {
GcloudOsFlag: "windows-2012r2",
WorkflowPath: "windows/translate_windows_2012_r2.wf.json",
LicenseURI: "projects/windows-cloud/global/licenses/windows-server-2012-r2-dc",
}, {
GcloudOsFlag: "windows-2012r2-byol",
WorkflowPath: "windows/translate_windows_2012_r2_byol.wf.json",
LicenseURI: "projects/windows-cloud/global/licenses/windows-server-2012-r2-byol",
}, {
GcloudOsFlag: "windows-2016",
WorkflowPath: "windows/translate_windows_2016.wf.json",
LicenseURI: "projects/windows-cloud/global/licenses/windows-server-2016-dc",
}, {
GcloudOsFlag: "windows-2016-byol",
WorkflowPath: "windows/translate_windows_2016_byol.wf.json",
LicenseURI: "projects/windows-cloud/global/licenses/windows-server-2016-byol",
}, {
GcloudOsFlag: "windows-2019",
WorkflowPath: "windows/translate_windows_2019.wf.json",
LicenseURI: "projects/windows-cloud/global/licenses/windows-server-2019-dc",
}, {
GcloudOsFlag: "windows-2019-byol",
WorkflowPath: "windows/translate_windows_2019_byol.wf.json",
LicenseURI: "projects/windows-cloud/global/licenses/windows-server-2019-byol",
},
}
// legacyIDs maps a legacy identifier to its replacement.
legacyIDs = map[string]string{
"windows-7-byol": "windows-7-x64-byol",
"windows-8-1-x64-byol": "windows-8-x64-byol",
"windows-10-byol": "windows-10-x64-byol",
}
privacyRegex = regexp.MustCompile(`\[Privacy\->.*?<\-Privacy\]`)
privacyTagRegex = regexp.MustCompile(`(\[Privacy\->)|(<\-Privacy\])`)
debianWorkerRegex = regexp.MustCompile("projects/compute-image-tools/global/images/family/debian-\\d+-worker")
)
// GetSortedOSIDs returns the supported OS identifiers, sorted.
func GetSortedOSIDs() []string {
choices := make([]string, 0, len(supportedOS))
for _, k := range supportedOS {
choices = append(choices, k.GcloudOsFlag)
}
sort.Strings(choices)
return choices
}
// ValidateOS validates that osID is supported by Daisy image import
func ValidateOS(osID string) error {
_, err := GetTranslationSettings(osID)
return err
}
// GetTranslationSettings returns parameters required for translating a particular OS, version,
// and licensing mode to run on GCE.
//
// An error is returned if the OS, version, and licensing mode is not supported for import.
func GetTranslationSettings(osID string) (spec TranslationSettings, err error) {
if osID == "" {
return spec, errors.New("osID is empty")
}
if replacement := legacyIDs[osID]; replacement != "" {
osID = replacement
}
for _, choice := range supportedOS {
if choice.GcloudOsFlag == osID {
return choice, nil
}
}
allowedValuesMsg := fmt.Sprintf("Allowed values: %v", GetSortedOSIDs())
return spec, daisy.Errf("os `%v` is invalid. "+allowedValuesMsg, osID)
}
// UpdateToUEFICompatible marks workflow resources (disks and images) to be UEFI
// compatible by adding "UEFI_COMPATIBLE" to GuestOSFeatures. Debian workers
// are excluded until UEFI becomes the default boot method.
func UpdateToUEFICompatible(workflow *daisy.Workflow) {
workflow.IterateWorkflowSteps(func(step *daisy.Step) {
if step.CreateDisks != nil {
for _, disk := range *step.CreateDisks {
// for the time being, don't run Debian worker in UEFI mode
if debianWorkerRegex.MatchString(disk.SourceImage) {
continue
}
// also, don't run Windows bootstrap worker in UEFI mode
if strings.Contains(disk.SourceImage, "projects/windows-cloud/global/images/family/windows-2019-core") && strings.Contains(disk.Name, "disk-bootstrap") {
continue
}
disk.Disk.GuestOsFeatures = daisy.CombineGuestOSFeatures(disk.Disk.GuestOsFeatures, "UEFI_COMPATIBLE")
}
}
if step.CreateImages != nil {
for _, image := range step.CreateImages.Images {
image.GuestOsFeatures = stringutils.CombineStringSlices(image.GuestOsFeatures, "UEFI_COMPATIBLE")
image.Image.GuestOsFeatures = daisy.CombineGuestOSFeatures(image.Image.GuestOsFeatures, "UEFI_COMPATIBLE")
}
for _, image := range step.CreateImages.ImagesBeta {
image.GuestOsFeatures = stringutils.CombineStringSlices(image.GuestOsFeatures, "UEFI_COMPATIBLE")
image.Image.GuestOsFeatures = daisy.CombineGuestOSFeaturesBeta(image.Image.GuestOsFeatures, "UEFI_COMPATIBLE")
}
}
})
}
// RemovePrivacyLogInfo removes privacy log information.
func RemovePrivacyLogInfo(message string) string {
// Since translation scripts vary and is hard to predict the output, we have to hide the
// details and only remain "TranslateFailed"
if strings.Contains(message, translateFailedPrefix) {
return translateFailedPrefix
}
// All import/export bash scripts enclose privacy info inside "[Privacy-> XXX <-Privacy]". Let's
// remove it for privacy.
message = privacyRegex.ReplaceAllString(message, "")
return message
}
// RemovePrivacyLogTag removes privacy log tag.
func RemovePrivacyLogTag(message string) string {
// All import/export bash scripts enclose privacy info inside a pair of tag "[Privacy->XXX<-Privacy]".
// Let's remove the tag to improve the readability.
message = privacyTagRegex.ReplaceAllString(message, "")
return message
}
// PostProcessDErrorForNetworkFlag determines whether to show more hints for network flag
func PostProcessDErrorForNetworkFlag(action string, err error, network string, w *daisy.Workflow) {
if derr, ok := err.(daisy.DError); ok {
if derr.CausedByErrType("networkResourceDoesNotExist") && network == "" {
w.LogWorkflowInfo("A VPC network is required for running %v,"+
" and the default VPC network does not exist in your project. You will need to"+
" specify a VPC network with the --network flag. For more information about"+
" VPC networks, see https://cloud.google.com/vpc.", action)
}
}
}
// RunWorkflowWithCancelSignal runs Daisy workflow with accepting Ctrl-C signal
func RunWorkflowWithCancelSignal(ctx context.Context, w *daisy.Workflow) error {
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
go func(w *daisy.Workflow) {
select {
case <-c:
w.LogWorkflowInfo("\nCtrl-C caught, sending cancel signal to %q...\n", w.Name)
w.CancelWorkflow()
case <-w.Cancel:
}
}(w)
return w.Run(ctx)
}
// NewStep creates a new step for the workflow along with dependencies.
func NewStep(w *daisy.Workflow, name string, dependencies ...*daisy.Step) (*daisy.Step, error) {
s, err := w.NewStep(name)
if err != nil {
return nil, err
}
err = w.AddDependency(s, dependencies...)
return s, err
}
// GetResourceID gets resource id from its URI. Definition of resource ID:
// https://cloud.google.com/apis/design/resource_names#resource_id
func GetResourceID(resourceURI string) string {
dm := strings.Split(resourceURI, "/")
return dm[len(dm)-1]
}
// GetDeviceURI gets a URI for a device based on its attributes. A device is a disk
// attached to a instance.
func GetDeviceURI(project, zone, name string) string {
return fmt.Sprintf("projects/%v/zones/%v/devices/%v", project, zone, name)
}
// GetDiskURI gets a URI for a disk based on its attributes. Introduction
// to a disk resource: https://cloud.google.com/compute/docs/reference/rest/v1/disks
func GetDiskURI(project, zone, name string) string {
return fmt.Sprintf("projects/%v/zones/%v/disks/%v", project, zone, name)
}
// GetInstanceURI gets a URI for a instance based on its attributes. Introduction
// to a instance resource: https://cloud.google.com/compute/docs/reference/rest/v1/instances
func GetInstanceURI(project, zone, name string) string {
return fmt.Sprintf("projects/%v/zones/%v/instances/%v", project, zone, name)
}
// ParseWorkflow parses Daisy workflow file and returns Daisy workflow object or error in case of failure
func ParseWorkflow(path string, varMap map[string]string, project, zone, gcsPath, oauth, dTimeout, cEndpoint string, disableGCSLogs, disableCloudLogs, disableStdoutLogs bool) (*daisy.Workflow, error) {
w, err := daisy.NewFromFile(path)
if err != nil {
return nil, err
}
Loop:
for k, v := range varMap {
for wv := range w.Vars {
if k == wv {
w.AddVar(k, v)
continue Loop
}
}
return nil, daisy.Errf("unknown workflow Var %q passed to Workflow %q", k, w.Name)
}
EnvironmentSettings{
Project: project,
Zone: zone,
GCSPath: gcsPath,
OAuth: oauth,
Timeout: dTimeout,
ComputeEndpoint: cEndpoint,
DisableGCSLogs: disableGCSLogs,
DisableCloudLogs: disableCloudLogs,
DisableStdoutLogs: disableStdoutLogs,
}.ApplyToWorkflow(w)
return w, nil
}
// EnvironmentSettings controls the resources that are used during tool execution.
type EnvironmentSettings struct {
// Location of workflows
WorkflowDirectory string
// Fields from daisy.Workflow
Project, Zone, GCSPath, OAuth, Timeout, ComputeEndpoint string
DisableGCSLogs, DisableCloudLogs, DisableStdoutLogs bool
// An optional prefix to include in the bracketed portion of daisy's stdout logs.
// Gcloud does a prefix match to determine whether to show a log line to a user.
//
// With a prefix of `disk-1`, for example, the workflow in `importer.NewDaisyInflater`
// emits log messages starting with `[disk-1-inflate]`.
DaisyLogLinePrefix string
// Worker instance customizations
Network, Subnet string
ComputeServiceAccount string
NoExternalIP bool
Labels map[string]string
ExecutionID string
StorageLocation string
}
// ApplyToWorkflow sets fields on daisy.Workflow from the environment settings.
func (env EnvironmentSettings) ApplyToWorkflow(w *daisy.Workflow) {
w.Project = env.Project
w.Zone = env.Zone
if env.GCSPath != "" {
w.GCSPath = env.GCSPath
}
if env.OAuth != "" {
w.OAuthPath = env.OAuth
}
if env.Timeout != "" {
w.DefaultTimeout = env.Timeout
}
if env.ComputeEndpoint != "" {
w.ComputeEndpoint = env.ComputeEndpoint
}
if env.DisableGCSLogs {
w.DisableGCSLogging()
}
if env.DisableCloudLogs {
w.DisableCloudLogging()
}
if env.DisableStdoutLogs {
w.DisableStdoutLogging()
}
}
// UpdateAllInstanceNoExternalIP updates all Create Instance steps in the workflow to operate
// when no external IP access is allowed by the VPC Daisy workflow is running in.
func UpdateAllInstanceNoExternalIP(workflow *daisy.Workflow, noExternalIP bool) {
if !noExternalIP {
return
}
(&RemoveExternalIPModifier{}).Modify(workflow)
}
| 1 | 13,909 | How is this name being URI safe and being used programmatically related? | GoogleCloudPlatform-compute-image-tools | go |
@@ -358,14 +358,14 @@ module.exports = class ProviderView {
state = this.plugin.getPluginState()
state.selectedFolders[folderId] = { loading: false, files: files }
this.plugin.setPluginState({ selectedFolders: folders })
- const dashboard = this.plugin.uppy.getPlugin('Dashboard')
+
let message
if (files.length) {
- message = dashboard.i18n('folderAdded', {
+ message = this.plugin.uppy.i18n('folderAdded', {
smart_count: files.length, folder: folder.name
})
} else {
- message = dashboard.i18n('emptyFolderAdded')
+ message = this.plugin.uppy.i18n('emptyFolderAdded')
}
this.plugin.uppy.info(message)
}).catch((e) => { | 1 | const { h, Component } = require('preact')
const AuthView = require('./AuthView')
const Browser = require('./Browser')
const LoaderView = require('./Loader')
const generateFileID = require('@uppy/utils/lib/generateFileID')
const getFileType = require('@uppy/utils/lib/getFileType')
const isPreviewSupported = require('@uppy/utils/lib/isPreviewSupported')
/**
* Array.prototype.findIndex ponyfill for old browsers.
*/
function findIndex (array, predicate) {
for (let i = 0; i < array.length; i++) {
if (predicate(array[i])) return i
}
return -1
}
// location.origin does not exist in IE
function getOrigin () {
if ('origin' in location) {
return location.origin // eslint-disable-line compat/compat
}
return `${location.protocol}//${location.hostname}${location.port ? `:${location.port}` : ''}`
}
class CloseWrapper extends Component {
componentWillUnmount () {
this.props.onUnmount()
}
render () {
return this.props.children[0]
}
}
/**
* Class to easily generate generic views for Provider plugins
*/
module.exports = class ProviderView {
static VERSION = require('../package.json').version
/**
* @param {Object} instance of the plugin
*/
constructor (plugin, opts) {
this.plugin = plugin
this.provider = opts.provider
// set default options
const defaultOptions = {
viewType: 'list',
showTitles: true,
showFilter: true,
showBreadcrumbs: true
}
// merge default options with the ones set by user
this.opts = { ...defaultOptions, ...opts }
// Logic
this.addFile = this.addFile.bind(this)
this.filterItems = this.filterItems.bind(this)
this.filterQuery = this.filterQuery.bind(this)
this.toggleSearch = this.toggleSearch.bind(this)
this.getFolder = this.getFolder.bind(this)
this.getNextFolder = this.getNextFolder.bind(this)
this.logout = this.logout.bind(this)
this.preFirstRender = this.preFirstRender.bind(this)
this.handleAuth = this.handleAuth.bind(this)
this.sortByTitle = this.sortByTitle.bind(this)
this.sortByDate = this.sortByDate.bind(this)
this.isActiveRow = this.isActiveRow.bind(this)
this.isChecked = this.isChecked.bind(this)
this.toggleCheckbox = this.toggleCheckbox.bind(this)
this.handleError = this.handleError.bind(this)
this.handleScroll = this.handleScroll.bind(this)
this.donePicking = this.donePicking.bind(this)
this.cancelPicking = this.cancelPicking.bind(this)
this.clearSelection = this.clearSelection.bind(this)
// Visual
this.render = this.render.bind(this)
this.clearSelection()
}
tearDown () {
// Nothing.
}
_updateFilesAndFolders (res, files, folders) {
this.nextPagePath = res.nextPagePath
res.items.forEach((item) => {
if (item.isFolder) {
folders.push(item)
} else {
files.push(item)
}
})
this.plugin.setPluginState({ folders, files })
}
/**
* Called only the first time the provider view is rendered.
* Kind of like an init function.
*/
preFirstRender () {
this.plugin.setPluginState({ didFirstRender: true })
this.plugin.onFirstRender()
}
/**
* Based on folder ID, fetch a new folder and update it to state
*
* @param {string} id Folder id
* @returns {Promise} Folders/files in folder
*/
getFolder (id, name) {
return this._loaderWrapper(
this.provider.list(id),
(res) => {
let folders = []
let files = []
let updatedDirectories
const state = this.plugin.getPluginState()
const index = findIndex(state.directories, (dir) => id === dir.id)
if (index !== -1) {
updatedDirectories = state.directories.slice(0, index + 1)
} else {
updatedDirectories = state.directories.concat([{ id, title: name }])
}
this.username = this.username ? this.username : res.username
this._updateFilesAndFolders(res, files, folders)
this.plugin.setPluginState({ directories: updatedDirectories })
},
this.handleError)
}
/**
* Fetches new folder
*
* @param {Object} Folder
* @param {string} title Folder title
*/
getNextFolder (folder) {
this.getFolder(folder.requestPath, folder.name)
this.lastCheckbox = undefined
}
addFile (file) {
const tagFile = {
id: this.providerFileToId(file),
source: this.plugin.id,
data: file,
name: file.name || file.id,
type: file.mimeType,
isRemote: true,
body: {
fileId: file.id
},
remote: {
companionUrl: this.plugin.opts.companionUrl,
url: `${this.provider.fileUrl(file.requestPath)}`,
body: {
fileId: file.id
},
providerOptions: this.provider.opts
}
}
const fileType = getFileType(tagFile)
// TODO Should we just always use the thumbnail URL if it exists?
if (fileType && isPreviewSupported(fileType)) {
tagFile.preview = file.thumbnail
}
this.plugin.uppy.log('Adding remote file')
try {
this.plugin.uppy.addFile(tagFile)
} catch (err) {
if (!err.isRestriction) {
this.plugin.uppy.log(err)
}
}
}
removeFile (id) {
const { currentSelection } = this.plugin.getPluginState()
this.plugin.setPluginState({
currentSelection: currentSelection.filter((file) => file.id !== id)
})
}
/**
* Removes session token on client side.
*/
logout () {
this.provider.logout(location.href)
.then((res) => {
if (res.ok) {
const newState = {
authenticated: false,
files: [],
folders: [],
directories: []
}
this.plugin.setPluginState(newState)
}
}).catch(this.handleError)
}
filterQuery (e) {
const state = this.plugin.getPluginState()
this.plugin.setPluginState(Object.assign({}, state, {
filterInput: e ? e.target.value : ''
}))
}
toggleSearch (inputEl) {
const state = this.plugin.getPluginState()
this.plugin.setPluginState({
isSearchVisible: !state.isSearchVisible,
filterInput: ''
})
}
filterItems (items) {
const state = this.plugin.getPluginState()
if (!state.filterInput || state.filterInput === '') {
return items
}
return items.filter((folder) => {
return folder.name.toLowerCase().indexOf(state.filterInput.toLowerCase()) !== -1
})
}
sortByTitle () {
const state = Object.assign({}, this.plugin.getPluginState())
const { files, folders, sorting } = state
let sortedFiles = files.sort((fileA, fileB) => {
if (sorting === 'titleDescending') {
return fileB.name.localeCompare(fileA.name)
}
return fileA.name.localeCompare(fileB.name)
})
let sortedFolders = folders.sort((folderA, folderB) => {
if (sorting === 'titleDescending') {
return folderB.name.localeCompare(folderA.name)
}
return folderA.name.localeCompare(folderB.name)
})
this.plugin.setPluginState(Object.assign({}, state, {
files: sortedFiles,
folders: sortedFolders,
sorting: (sorting === 'titleDescending') ? 'titleAscending' : 'titleDescending'
}))
}
sortByDate () {
const state = Object.assign({}, this.plugin.getPluginState())
const { files, folders, sorting } = state
let sortedFiles = files.sort((fileA, fileB) => {
let a = new Date(fileA.modifiedDate)
let b = new Date(fileB.modifiedDate)
if (sorting === 'dateDescending') {
return a > b ? -1 : a < b ? 1 : 0
}
return a > b ? 1 : a < b ? -1 : 0
})
let sortedFolders = folders.sort((folderA, folderB) => {
let a = new Date(folderA.modifiedDate)
let b = new Date(folderB.modifiedDate)
if (sorting === 'dateDescending') {
return a > b ? -1 : a < b ? 1 : 0
}
return a > b ? 1 : a < b ? -1 : 0
})
this.plugin.setPluginState(Object.assign({}, state, {
files: sortedFiles,
folders: sortedFolders,
sorting: (sorting === 'dateDescending') ? 'dateAscending' : 'dateDescending'
}))
}
sortBySize () {
const state = Object.assign({}, this.plugin.getPluginState())
const { files, sorting } = state
// check that plugin supports file sizes
if (!files.length || !this.plugin.getItemData(files[0]).size) {
return
}
let sortedFiles = files.sort((fileA, fileB) => {
let a = fileA.size
let b = fileB.size
if (sorting === 'sizeDescending') {
return a > b ? -1 : a < b ? 1 : 0
}
return a > b ? 1 : a < b ? -1 : 0
})
this.plugin.setPluginState(Object.assign({}, state, {
files: sortedFiles,
sorting: (sorting === 'sizeDescending') ? 'sizeAscending' : 'sizeDescending'
}))
}
isActiveRow (file) {
return this.plugin.getPluginState().activeRow === this.plugin.getItemId(file)
}
isChecked (file) {
const { currentSelection } = this.plugin.getPluginState()
// comparing id instead of the file object, because the reference to the object
// changes when we switch folders, and the file list is updated
return currentSelection.some((item) => item.id === file.id)
}
/**
* Adds all files found inside of specified folder.
*
* Uses separated state while folder contents are being fetched and
* mantains list of selected folders, which are separated from files.
*/
addFolder (folder) {
const folderId = this.providerFileToId(folder)
let state = this.plugin.getPluginState()
let folders = state.selectedFolders || {}
if (folderId in folders && folders[folderId].loading) {
return
}
folders[folderId] = { loading: true, files: [] }
this.plugin.setPluginState({ selectedFolders: folders })
return this.provider.list(folder.requestPath).then((res) => {
let files = []
res.items.forEach((item) => {
if (!item.isFolder) {
this.addFile(item)
files.push(this.providerFileToId(item))
}
})
state = this.plugin.getPluginState()
state.selectedFolders[folderId] = { loading: false, files: files }
this.plugin.setPluginState({ selectedFolders: folders })
const dashboard = this.plugin.uppy.getPlugin('Dashboard')
let message
if (files.length) {
message = dashboard.i18n('folderAdded', {
smart_count: files.length, folder: folder.name
})
} else {
message = dashboard.i18n('emptyFolderAdded')
}
this.plugin.uppy.info(message)
}).catch((e) => {
state = this.plugin.getPluginState()
delete state.selectedFolders[folderId]
this.plugin.setPluginState({ selectedFolders: state.selectedFolders })
this.handleError(e)
})
}
/**
* Toggles file/folder checkbox to on/off state while updating files list.
*
* Note that some extra complexity comes from supporting shift+click to
* toggle multiple checkboxes at once, which is done by getting all files
* in between last checked file and current one.
*/
toggleCheckbox (e, file) {
e.stopPropagation()
e.preventDefault()
e.currentTarget.focus()
let { folders, files } = this.plugin.getPluginState()
let items = this.filterItems(folders.concat(files))
// Shift-clicking selects a single consecutive list of items
// starting at the previous click and deselects everything else.
if (this.lastCheckbox && e.shiftKey) {
let currentSelection
const prevIndex = items.indexOf(this.lastCheckbox)
const currentIndex = items.indexOf(file)
if (prevIndex < currentIndex) {
currentSelection = items.slice(prevIndex, currentIndex + 1)
} else {
currentSelection = items.slice(currentIndex, prevIndex + 1)
}
this.plugin.setPluginState({ currentSelection })
return
}
this.lastCheckbox = file
const { currentSelection } = this.plugin.getPluginState()
if (this.isChecked(file)) {
this.plugin.setPluginState({
currentSelection: currentSelection.filter((item) => item.id !== file.id)
})
} else {
this.plugin.setPluginState({
currentSelection: currentSelection.concat([file])
})
}
}
providerFileToId (file) {
return generateFileID({
data: file,
name: file.name || file.id,
type: file.mimeType
})
}
handleAuth () {
const authState = btoa(JSON.stringify({ origin: getOrigin() }))
// @todo remove this hardcoded version
const clientVersion = encodeURIComponent('@uppy/companion-client=1.0.2')
const link = `${this.provider.authUrl()}?state=${authState}&uppyVersions=${clientVersion}`
const authWindow = window.open(link, '_blank')
const handleToken = (e) => {
if (!this._isOriginAllowed(e.origin, this.plugin.opts.companionAllowedHosts) || e.source !== authWindow) {
this.plugin.uppy.log(`rejecting event from ${e.origin} vs allowed pattern ${this.plugin.opts.companionAllowedHosts}`)
return
}
// Check if it's a string before doing the JSON.parse to maintain support
// for older Companion versions that used object references
const data = typeof e.data === 'string' ? JSON.parse(e.data) : e.data
if (!data.token) {
this.plugin.uppy.log('did not receive token from auth window')
return
}
authWindow.close()
window.removeEventListener('message', handleToken)
this.provider.setAuthToken(data.token)
this.preFirstRender()
}
window.addEventListener('message', handleToken)
}
_isOriginAllowed (origin, allowedOrigin) {
const getRegex = (value) => {
if (typeof value === 'string') {
return new RegExp(`^${value}$`)
} else if (value instanceof RegExp) {
return value
}
}
const patterns = Array.isArray(allowedOrigin) ? allowedOrigin.map(getRegex) : [getRegex(allowedOrigin)]
return patterns
.filter((pattern) => pattern != null) // loose comparison to catch undefined
.some((pattern) => pattern.test(origin) || pattern.test(`${origin}/`)) // allowing for trailing '/'
}
handleError (error) {
const uppy = this.plugin.uppy
uppy.log(error.toString())
if (error.isAuthError) {
return
}
const message = uppy.i18n('companionError')
uppy.info({ message: message, details: error.toString() }, 'error', 5000)
}
handleScroll (e) {
const scrollPos = e.target.scrollHeight - (e.target.scrollTop + e.target.offsetHeight)
const path = this.nextPagePath || null
if (scrollPos < 50 && path && !this._isHandlingScroll) {
this.provider.list(path)
.then((res) => {
const { files, folders } = this.plugin.getPluginState()
this._updateFilesAndFolders(res, files, folders)
}).catch(this.handleError)
.then(() => { this._isHandlingScroll = false }) // always called
this._isHandlingScroll = true
}
}
donePicking () {
const { currentSelection } = this.plugin.getPluginState()
const promises = currentSelection.map((file) => {
if (file.isFolder) {
return this.addFolder(file)
} else {
return this.addFile(file)
}
})
this._loaderWrapper(Promise.all(promises), () => {
this.clearSelection()
}, () => {})
}
cancelPicking () {
this.clearSelection()
const dashboard = this.plugin.uppy.getPlugin('Dashboard')
if (dashboard) dashboard.hideAllPanels()
}
clearSelection () {
this.plugin.setPluginState({ currentSelection: [] })
}
// displays loader view while asynchronous request is being made.
_loaderWrapper (promise, then, catch_) {
promise
.then((result) => {
this.plugin.setPluginState({ loading: false })
then(result)
}).catch((err) => {
this.plugin.setPluginState({ loading: false })
catch_(err)
})
this.plugin.setPluginState({ loading: true })
}
render (state) {
const { authenticated, didFirstRender } = this.plugin.getPluginState()
if (!didFirstRender) {
this.preFirstRender()
}
// reload pluginState for "loading" attribute because it might
// have changed above.
if (this.plugin.getPluginState().loading) {
return (
<CloseWrapper onUnmount={this.clearSelection}>
<LoaderView i18n={this.plugin.uppy.i18n} />
</CloseWrapper>
)
}
if (!authenticated) {
return (
<CloseWrapper onUnmount={this.clearSelection}>
<AuthView
pluginName={this.plugin.title}
pluginIcon={this.plugin.icon}
handleAuth={this.handleAuth}
i18n={this.plugin.uppy.i18n}
i18nArray={this.plugin.uppy.i18nArray} />
</CloseWrapper>
)
}
const browserProps = Object.assign({}, this.plugin.getPluginState(), {
username: this.username,
getNextFolder: this.getNextFolder,
getFolder: this.getFolder,
filterItems: this.filterItems,
filterQuery: this.filterQuery,
toggleSearch: this.toggleSearch,
sortByTitle: this.sortByTitle,
sortByDate: this.sortByDate,
logout: this.logout,
isActiveRow: this.isActiveRow,
isChecked: this.isChecked,
toggleCheckbox: this.toggleCheckbox,
handleScroll: this.handleScroll,
done: this.donePicking,
cancel: this.cancelPicking,
title: this.plugin.title,
viewType: this.opts.viewType,
showTitles: this.opts.showTitles,
showFilter: this.opts.showFilter,
showBreadcrumbs: this.opts.showBreadcrumbs,
pluginIcon: this.plugin.icon,
i18n: this.plugin.uppy.i18n
})
return (
<CloseWrapper onUnmount={this.clearSelection}>
<Browser {...browserProps} />
</CloseWrapper>
)
}
}
| 1 | 12,240 | I think we have to keep this as a fallback for now, else it's a small breaking change :( | transloadit-uppy | js |
@@ -80,7 +80,7 @@ func TestOpImmediateNote(t *testing.T) {
func TestOpDocExtra(t *testing.T) {
xd := OpDocExtra("bnz")
require.NotEmpty(t, xd)
- xd = OpDocExtra("+")
+ xd = OpDocExtra("-")
require.Empty(t, xd)
}
| 1 | // Copyright (C) 2019-2020 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
package logic
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestOpDocs(t *testing.T) {
opsSeen := make(map[string]bool, len(OpSpecs))
for _, op := range OpSpecs {
opsSeen[op.Name] = false
}
for _, od := range opDocList {
_, exists := opsSeen[od.a]
if !exists {
t.Errorf("error: doc for op %#v that does not exist in OpSpecs", od.a)
}
opsSeen[od.a] = true
}
for op, seen := range opsSeen {
if !seen {
t.Errorf("error: doc for op %#v missing", op)
}
}
}
func TestOpGroupCoverage(t *testing.T) {
opsSeen := make(map[string]bool, len(OpSpecs))
for _, op := range OpSpecs {
opsSeen[op.Name] = false
}
for _, og := range OpGroupList {
for _, name := range og.Ops {
_, exists := opsSeen[name]
if !exists {
t.Errorf("error: op %#v in group list but not in OpSpecs\n", name)
continue
}
opsSeen[name] = true
}
}
for name, seen := range opsSeen {
if !seen {
t.Errorf("warning: op %#v not in any group list\n", name)
}
}
}
func TestOpDoc(t *testing.T) {
xd := OpDoc("txn")
require.NotEmpty(t, xd)
xd = OpDoc("NOT AN INSTRUCTION")
require.Empty(t, xd)
}
func TestOpImmediateNote(t *testing.T) {
xd := OpImmediateNote("txn")
require.NotEmpty(t, xd)
xd = OpImmediateNote("+")
require.Empty(t, xd)
}
func TestOpDocExtra(t *testing.T) {
xd := OpDocExtra("bnz")
require.NotEmpty(t, xd)
xd = OpDocExtra("+")
require.Empty(t, xd)
}
func TestOpCost(t *testing.T) {
c := OpCost("+")
require.Equal(t, 1, c)
c = OpCost("sha256")
require.True(t, c > 1)
a := OpAllCosts("+")
require.Equal(t, 1, len(a))
require.Equal(t, 1, a[0])
a = OpAllCosts("sha256")
require.True(t, len(a) > 1)
for v := 1; v <= LogicVersion; v++ {
require.True(t, a[v] > 1)
}
}
func TestOpSize(t *testing.T) {
c := OpSize("+")
require.Equal(t, 1, c)
c = OpSize("intc")
require.Equal(t, 2, c)
}
func TestTypeNameDescription(t *testing.T) {
require.Equal(t, len(TxnTypeNames), len(typeEnumDescriptions))
for i, a := range TxnTypeNames {
b := TypeNameDescription(a)
require.Equal(t, b, typeEnumDescriptions[i].b)
}
require.Equal(t, "invalid type name", TypeNameDescription("invalid type name"))
}
| 1 | 37,946 | nit: shouldn't be part of this PR. | algorand-go-algorand | go |
@@ -83,7 +83,7 @@ describe('Cursor Async Iterator Tests', function() {
expect(doc).to.exist;
cursor.close();
}
- throw new Error('expected closing the cursor to break iteration');
+ throw new MongoError('expected closing the cursor to break iteration');
} catch (e) {
expect(e).to.be.an.instanceOf(MongoError);
} | 1 | 'use strict';
const { expect } = require('chai');
const { MongoError } = require('../../../index');
describe('Cursor Async Iterator Tests', function() {
let client, collection;
before(async function() {
client = this.configuration.newClient();
await client.connect();
const docs = Array.from({ length: 1000 }).map((_, index) => ({ foo: index, bar: 1 }));
collection = client.db(this.configuration.db).collection('async_cursor_tests');
await collection.deleteMany({});
await collection.insertMany(docs);
await client.close();
});
beforeEach(async function() {
client = this.configuration.newClient();
await client.connect();
collection = client.db(this.configuration.db).collection('async_cursor_tests');
});
afterEach(() => client.close());
it('should be able to use a for-await loop on a find command cursor', {
metadata: { requires: { node: '>=10.5.0' } },
test: async function() {
const cursor = collection.find({ bar: 1 });
let counter = 0;
for await (const doc of cursor) {
expect(doc).to.have.property('bar', 1);
counter += 1;
}
expect(counter).to.equal(1000);
}
});
it('should be able to use a for-await loop on an aggregation cursor', {
metadata: { requires: { node: '>=10.5.0' } },
test: async function() {
const cursor = collection.aggregate([{ $match: { bar: 1 } }]);
let counter = 0;
for await (const doc of cursor) {
expect(doc).to.have.property('bar', 1);
counter += 1;
}
expect(counter).to.equal(1000);
}
});
it('should be able to use a for-await loop on a command cursor', {
metadata: { requires: { node: '>=10.5.0', mongodb: '>=3.0.0' } },
test: async function() {
const cursor1 = collection.listIndexes();
const cursor2 = collection.listIndexes();
const indexes = await cursor1.toArray();
let counter = 0;
for await (const doc of cursor2) {
expect(doc).to.exist;
counter += 1;
}
expect(counter).to.equal(indexes.length);
}
});
it('should properly error when cursor is closed', {
metadata: { requires: { node: '>=10.5.0' } },
test: async function() {
const cursor = collection.find();
try {
for await (const doc of cursor) {
expect(doc).to.exist;
cursor.close();
}
throw new Error('expected closing the cursor to break iteration');
} catch (e) {
expect(e).to.be.an.instanceOf(MongoError);
}
}
});
});
| 1 | 17,437 | this change looks wrong to me. I think the test is trying to signal that something went wrong by throwing the `Error` here, otherwise the `catch` below will swallow it. | mongodb-node-mongodb-native | js |
@@ -164,6 +164,13 @@ namespace NLog.Internal
if (reusableBuilder != null)
{
+ if (!_layout.IsThreadAgnostic)
+ {
+ string cachedResult;
+ if (logEvent.TryGetCachedLayoutValue(_layout, out cachedResult))
+ return cachedResult;
+ }
+
_layout.RenderAppendBuilder(logEvent, reusableBuilder);
if (_cachedPrevRawFileName != null && _cachedPrevRawFileName.Length == reusableBuilder.Length) | 1 | //
// Copyright (c) 2004-2016 Jaroslaw Kowalski <[email protected]>, Kim Christensen, Julian Verdurmen
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * Neither the name of Jaroslaw Kowalski nor the names of its
// contributors may be used to endorse or promote products derived from this
// software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
// THE POSSIBILITY OF SUCH DAMAGE.
//
using System;
using System.Collections.Generic;
using System.IO;
using NLog.Internal.Fakeables;
using NLog.Layouts;
using NLog.Targets;
namespace NLog.Internal
{
/// <summary>
/// A layout that represents a filePath.
/// </summary>
internal class FilePathLayout : IRenderable
{
/// <summary>
/// Cached directory separator char array to avoid memory allocation on each method call.
/// </summary>
private readonly static char[] DirectorySeparatorChars = new[] { Path.DirectorySeparatorChar, Path.AltDirectorySeparatorChar };
#if !SILVERLIGHT || WINDOWS_PHONE
/// <summary>
/// Cached invalid filenames char array to avoid memory allocation everytime Path.GetInvalidFileNameChars() is called.
/// </summary>
private readonly static HashSet<char> InvalidFileNameChars = new HashSet<char>(Path.GetInvalidFileNameChars());
#endif
private Layout _layout;
private FilePathKind _filePathKind;
/// <summary>
/// not null when <see cref="_filePathKind"/> == <c>false</c>
/// </summary>
private string _baseDir;
/// <summary>
/// non null is fixed,
/// </summary>
private string cleanedFixedResult;
private bool _cleanupInvalidChars;
/// <summary>
/// <see cref="_cachedPrevRawFileName"/> is the cache-key, and when newly rendered filename matches the cache-key,
/// then it reuses the cleaned cache-value <see cref="_cachedPrevCleanFileName"/>.
/// </summary>
private string _cachedPrevRawFileName;
/// <summary>
/// <see cref="_cachedPrevCleanFileName"/> is the cache-value that is reused, when the newly rendered filename
/// matches the cache-key <see cref="_cachedPrevRawFileName"/>
/// </summary>
private string _cachedPrevCleanFileName;
//TODO onInit maken
/// <summary>Initializes a new instance of the <see cref="T:System.Object" /> class.</summary>
public FilePathLayout(Layout layout, bool cleanupInvalidChars, FilePathKind filePathKind)
{
_layout = layout;
_filePathKind = filePathKind;
_cleanupInvalidChars = cleanupInvalidChars;
if (_layout == null)
{
_filePathKind = FilePathKind.Unknown;
return;
}
//do we have to the the layout?
if (cleanupInvalidChars || _filePathKind == FilePathKind.Unknown)
{
//check if fixed
var pathLayout2 = layout as SimpleLayout;
if (pathLayout2 != null)
{
var isFixedText = pathLayout2.IsFixedText;
if (isFixedText)
{
cleanedFixedResult = pathLayout2.FixedText;
if (cleanupInvalidChars)
{
//clean first
cleanedFixedResult = CleanupInvalidFilePath(cleanedFixedResult);
}
}
//detect absolute
if (_filePathKind == FilePathKind.Unknown)
{
_filePathKind = DetectFilePathKind(pathLayout2);
}
}
else
{
_filePathKind = FilePathKind.Unknown;
}
}
if (_filePathKind == FilePathKind.Relative)
{
_baseDir = LogFactory.CurrentAppDomain.BaseDirectory;
}
}
public Layout GetLayout()
{
return _layout;
}
#region Implementation of IRenderable
/// <summary>
/// Render the raw filename from Layout
/// </summary>
/// <param name="logEvent">The log event.</param>
/// <param name="reusableBuilder">StringBuilder to minimize allocations [optional].</param>
/// <returns>String representation of a layout.</returns>
private string GetRenderedFileName(LogEventInfo logEvent, System.Text.StringBuilder reusableBuilder = null)
{
if (cleanedFixedResult != null)
{
return cleanedFixedResult;
}
if (_layout == null)
{
return null;
}
if (reusableBuilder != null)
{
_layout.RenderAppendBuilder(logEvent, reusableBuilder);
if (_cachedPrevRawFileName != null && _cachedPrevRawFileName.Length == reusableBuilder.Length)
{
// If old filename matches the newly rendered, then no need to call StringBuilder.ToString()
for (int i = 0; i < _cachedPrevRawFileName.Length; ++i)
{
if (_cachedPrevRawFileName[i] != reusableBuilder[i])
{
_cachedPrevRawFileName = null;
break;
}
}
if (_cachedPrevRawFileName != null)
return _cachedPrevRawFileName;
}
_cachedPrevRawFileName = reusableBuilder.ToString();
_cachedPrevCleanFileName = null;
return _cachedPrevRawFileName;
}
else
{
return _layout.Render(logEvent);
}
}
/// <summary>
/// Convert the raw filename to a correct filename
/// </summary>
/// <param name="rawFileName">The filename generated by Layout.</param>
/// <returns>String representation of a correct filename.</returns>
private string GetCleanFileName(string rawFileName)
{
var cleanFileName = rawFileName;
if (_cleanupInvalidChars && cleanedFixedResult == null)
{
cleanFileName = CleanupInvalidFilePath(rawFileName);
}
if (_filePathKind == FilePathKind.Absolute)
{
return cleanFileName;
}
if (_filePathKind == FilePathKind.Relative && _baseDir != null)
{
//use basedir, faster than Path.GetFullPath
cleanFileName = Path.Combine(_baseDir, cleanFileName);
return cleanFileName;
}
//unknown, use slow method
cleanFileName = Path.GetFullPath(cleanFileName);
return cleanFileName;
}
public string Render(LogEventInfo logEvent)
{
return RenderWithBuilder(logEvent);
}
internal string RenderWithBuilder(LogEventInfo logEvent, System.Text.StringBuilder reusableBuilder = null)
{
var rawFileName = GetRenderedFileName(logEvent, reusableBuilder);
if (string.IsNullOrEmpty(rawFileName))
{
return rawFileName;
}
if ((!_cleanupInvalidChars || cleanedFixedResult != null) && _filePathKind == FilePathKind.Absolute)
return rawFileName; // Skip clean filename string-allocation
if (string.Equals(_cachedPrevRawFileName, rawFileName, StringComparison.Ordinal) && _cachedPrevCleanFileName != null)
return _cachedPrevCleanFileName; // Cache Hit, reuse clean filename string-allocation
var cleanFileName = GetCleanFileName(rawFileName);
_cachedPrevCleanFileName = cleanFileName;
_cachedPrevRawFileName = rawFileName;
return cleanFileName;
}
#endregion
/// <summary>
/// Is this (templated/invalid) path an absolute, relative or unknown?
/// </summary>
internal static FilePathKind DetectFilePathKind(Layout pathLayout)
{
var simpleLayout = pathLayout as SimpleLayout;
if (simpleLayout == null)
{
return FilePathKind.Unknown;
}
return DetectFilePathKind(simpleLayout);
}
/// <summary>
/// Is this (templated/invalid) path an absolute, relative or unknown?
/// </summary>
private static FilePathKind DetectFilePathKind(SimpleLayout pathLayout)
{
var isFixedText = pathLayout.IsFixedText;
//nb: ${basedir} has already been rewritten in the SimpleLayout.compile
var path = isFixedText ? pathLayout.FixedText : pathLayout.Text;
if (path != null)
{
path = path.TrimStart();
int length = path.Length;
if (length >= 1)
{
var firstChar = path[0];
if (firstChar == Path.DirectorySeparatorChar || firstChar == Path.AltDirectorySeparatorChar)
return FilePathKind.Absolute;
if (firstChar == '.') //. and ..
{
return FilePathKind.Relative;
}
if (length >= 2)
{
var secondChar = path[1];
//on unix VolumeSeparatorChar == DirectorySeparatorChar
if (Path.VolumeSeparatorChar != Path.DirectorySeparatorChar && secondChar == Path.VolumeSeparatorChar)
return FilePathKind.Absolute;
}
if (!isFixedText && path.StartsWith("${", StringComparison.OrdinalIgnoreCase))
{
//if first part is a layout, then unknown
return FilePathKind.Unknown;
}
//not a layout renderer, but text
return FilePathKind.Relative;
}
}
return FilePathKind.Unknown;
}
private static string CleanupInvalidFilePath(string filePath)
{
#if !SILVERLIGHT || WINDOWS_PHONE
if (StringHelpers.IsNullOrWhiteSpace(filePath))
{
return filePath;
}
var lastDirSeparator = filePath.LastIndexOfAny(DirectorySeparatorChars);
char[] fileNameChars = null;
for (int i = lastDirSeparator + 1; i < filePath.Length; i++)
{
if (InvalidFileNameChars.Contains(filePath[i]))
{
//delay char[] creation until first invalid char
//is found to avoid memory allocation.
if (fileNameChars == null)
{
fileNameChars = filePath.Substring(lastDirSeparator + 1).ToCharArray();
}
fileNameChars[i - (lastDirSeparator + 1)] = '_';
}
}
//only if an invalid char was replaced do we create a new string.
if (fileNameChars != null)
{
//keep the / in the dirname, because dirname could be c:/ and combine of c: and file name won't work well.
var dirName = lastDirSeparator > 0 ? filePath.Substring(0, lastDirSeparator + 1) : String.Empty;
string fileName = new string(fileNameChars);
return Path.Combine(dirName, fileName);
}
return filePath;
#else
return filePath;
#endif
}
}
}
| 1 | 14,722 | I'm doubting if this should be `if (_layout.IsThreadAgnostic)`, as ThreadAgnostic stuff could be calculated on every thread. (and thus could be lazy). non-ThreadAgnostic should be calculated on the main thread. | NLog-NLog | .cs |
@@ -7,14 +7,19 @@ import javafx.scene.control.ToggleGroup;
import javafx.scene.layout.HBox;
import org.phoenicis.javafx.views.common.widgets.lists.CombinedListWidget;
import org.phoenicis.javafx.views.common.widgets.lists.ListWidgetType;
+import org.slf4j.LoggerFactory;
import java.util.Arrays;
import java.util.List;
+import java.util.Optional;
+import java.util.function.Consumer;
/**
* Created by marc on 15.05.17.
*/
public class LeftListWidgetChooser<E> extends HBox {
+ private final org.slf4j.Logger LOGGER = LoggerFactory.getLogger(LeftListWidgetChooser.class);
+
private List<CombinedListWidget<E>> listWidgets;
private ToggleGroup toggleGroup; | 1 | package org.phoenicis.javafx.views.mainwindow.ui;
import javafx.event.ActionEvent;
import javafx.event.EventHandler;
import javafx.scene.control.ToggleButton;
import javafx.scene.control.ToggleGroup;
import javafx.scene.layout.HBox;
import org.phoenicis.javafx.views.common.widgets.lists.CombinedListWidget;
import org.phoenicis.javafx.views.common.widgets.lists.ListWidgetType;
import java.util.Arrays;
import java.util.List;
/**
* Created by marc on 15.05.17.
*/
public class LeftListWidgetChooser<E> extends HBox {
private List<CombinedListWidget<E>> listWidgets;
private ToggleGroup toggleGroup;
private ToggleButton iconsListButton;
private ToggleButton compactListButton;
private ToggleButton detailsListButton;
public LeftListWidgetChooser(CombinedListWidget<E> listWidget) {
this(Arrays.asList(listWidget));
}
public LeftListWidgetChooser(List<CombinedListWidget<E>> listWidgets) {
super();
this.listWidgets = listWidgets;
this.getStyleClass().add("listChooser");
this.toggleGroup = new ToggleGroup();
// prevent unselecting all buttons
EventHandler filter = (EventHandler<ActionEvent>) actionEvent -> {
ToggleButton source = (ToggleButton) actionEvent.getSource();
if (source.getToggleGroup() == null || !source.isSelected()) {
source.fire();
}
};
this.iconsListButton = new ToggleButton();
this.iconsListButton.setToggleGroup(toggleGroup);
this.iconsListButton.getStyleClass().addAll("listIcon", "iconsList");
this.iconsListButton
.setOnAction(event -> listWidgets.forEach(widget -> widget.showList(ListWidgetType.ICONS_LIST)));
this.iconsListButton.addEventFilter(ActionEvent.ANY, filter);
this.compactListButton = new ToggleButton();
this.compactListButton.setToggleGroup(toggleGroup);
this.compactListButton.getStyleClass().addAll("listIcon", "compactList");
this.compactListButton
.setOnAction(event -> listWidgets.forEach(widget -> widget.showList(ListWidgetType.COMPACT_LIST)));
this.compactListButton.addEventFilter(ActionEvent.ANY, filter);
this.detailsListButton = new ToggleButton();
this.detailsListButton.setToggleGroup(toggleGroup);
this.detailsListButton.getStyleClass().addAll("listIcon", "detailsList");
this.detailsListButton
.setOnAction(event -> listWidgets.forEach(widget -> widget.showList(ListWidgetType.DETAILS_LIST)));
this.detailsListButton.addEventFilter(ActionEvent.ANY, filter);
this.iconsListButton.setSelected(true);
this.getChildren().setAll(iconsListButton, compactListButton, detailsListButton);
this.listWidgets.forEach(widget -> widget.showList(ListWidgetType.ICONS_LIST));
}
}
| 1 | 11,052 | Please use only `Logger` instead of `org.slf4j.Logger` | PhoenicisOrg-phoenicis | java |
@@ -326,6 +326,10 @@ void nano::block_processor::process_live (nano::transaction const & transaction_
{
node.network.flood_block_initial (block_a);
}
+ else if (!node.flags.disable_block_processor_republishing)
+ {
+ node.network.flood_block (block_a, nano::buffer_drop_policy::no_limiter_drop);
+ }
if (node.websocket_server && node.websocket_server->any_subscriber (nano::websocket::topic::new_unconfirmed_block))
{ | 1 | #include <nano/lib/threading.hpp>
#include <nano/lib/timer.hpp>
#include <nano/node/blockprocessor.hpp>
#include <nano/node/election.hpp>
#include <nano/node/node.hpp>
#include <nano/node/websocket.hpp>
#include <nano/secure/store.hpp>
#include <boost/format.hpp>
std::chrono::milliseconds constexpr nano::block_processor::confirmation_request_delay;
nano::block_post_events::block_post_events (std::function<nano::read_transaction ()> && get_transaction_a) :
get_transaction (std::move (get_transaction_a))
{
}
nano::block_post_events::~block_post_events ()
{
debug_assert (get_transaction != nullptr);
auto transaction (get_transaction ());
for (auto const & i : events)
{
i (transaction);
}
}
nano::block_processor::block_processor (nano::node & node_a, nano::write_database_queue & write_database_queue_a) :
next_log (std::chrono::steady_clock::now ()),
node (node_a),
write_database_queue (write_database_queue_a),
state_block_signature_verification (node.checker, node.ledger.constants.epochs, node.config, node.logger, node.flags.block_processor_verification_size)
{
state_block_signature_verification.blocks_verified_callback = [this] (std::deque<nano::unchecked_info> & items, std::vector<int> const & verifications, std::vector<nano::block_hash> const & hashes, std::vector<nano::signature> const & blocks_signatures) {
this->process_verified_state_blocks (items, verifications, hashes, blocks_signatures);
};
state_block_signature_verification.transition_inactive_callback = [this] () {
if (this->flushing)
{
{
// Prevent a race with condition.wait in block_processor::flush
nano::lock_guard<nano::mutex> guard (this->mutex);
}
this->condition.notify_all ();
}
};
processing_thread = std::thread ([this] () {
nano::thread_role::set (nano::thread_role::name::block_processing);
this->process_blocks ();
});
}
nano::block_processor::~block_processor ()
{
stop ();
if (processing_thread.joinable ())
{
processing_thread.join ();
}
}
void nano::block_processor::stop ()
{
{
nano::lock_guard<nano::mutex> lock (mutex);
stopped = true;
}
condition.notify_all ();
state_block_signature_verification.stop ();
}
void nano::block_processor::flush ()
{
node.checker.flush ();
flushing = true;
nano::unique_lock<nano::mutex> lock (mutex);
while (!stopped && (have_blocks () || active || state_block_signature_verification.is_active ()))
{
condition.wait (lock);
}
flushing = false;
}
size_t nano::block_processor::size ()
{
nano::unique_lock<nano::mutex> lock (mutex);
return (blocks.size () + state_block_signature_verification.size () + forced.size ());
}
bool nano::block_processor::full ()
{
return size () >= node.flags.block_processor_full_size;
}
bool nano::block_processor::half_full ()
{
return size () >= node.flags.block_processor_full_size / 2;
}
void nano::block_processor::add (std::shared_ptr<nano::block> const & block_a, uint64_t origination)
{
nano::unchecked_info info (block_a, 0, origination, nano::signature_verification::unknown);
add (info);
}
void nano::block_processor::add (nano::unchecked_info const & info_a)
{
debug_assert (!node.network_params.work.validate_entry (*info_a.block));
bool quarter_full (size () > node.flags.block_processor_full_size / 4);
if (info_a.verified == nano::signature_verification::unknown && (info_a.block->type () == nano::block_type::state || info_a.block->type () == nano::block_type::open || !info_a.account.is_zero ()))
{
state_block_signature_verification.add (info_a);
}
else
{
{
nano::lock_guard<nano::mutex> guard (mutex);
blocks.emplace_back (info_a);
}
condition.notify_all ();
}
}
void nano::block_processor::add_local (nano::unchecked_info const & info_a)
{
release_assert (info_a.verified == nano::signature_verification::unknown && (info_a.block->type () == nano::block_type::state || !info_a.account.is_zero ()));
debug_assert (!node.network_params.work.validate_entry (*info_a.block));
state_block_signature_verification.add (info_a);
}
void nano::block_processor::force (std::shared_ptr<nano::block> const & block_a)
{
{
nano::lock_guard<nano::mutex> lock (mutex);
forced.push_back (block_a);
}
condition.notify_all ();
}
void nano::block_processor::wait_write ()
{
nano::lock_guard<nano::mutex> lock (mutex);
awaiting_write = true;
}
void nano::block_processor::process_blocks ()
{
nano::unique_lock<nano::mutex> lock (mutex);
while (!stopped)
{
if (have_blocks_ready ())
{
active = true;
lock.unlock ();
process_batch (lock);
lock.lock ();
active = false;
}
else
{
condition.notify_one ();
condition.wait (lock);
}
}
}
bool nano::block_processor::should_log ()
{
auto result (false);
auto now (std::chrono::steady_clock::now ());
if (next_log < now)
{
next_log = now + (node.config.logging.timing_logging () ? std::chrono::seconds (2) : std::chrono::seconds (15));
result = true;
}
return result;
}
bool nano::block_processor::have_blocks_ready ()
{
debug_assert (!mutex.try_lock ());
return !blocks.empty () || !forced.empty ();
}
bool nano::block_processor::have_blocks ()
{
debug_assert (!mutex.try_lock ());
return have_blocks_ready () || state_block_signature_verification.size () != 0;
}
void nano::block_processor::process_verified_state_blocks (std::deque<nano::unchecked_info> & items, std::vector<int> const & verifications, std::vector<nano::block_hash> const & hashes, std::vector<nano::signature> const & blocks_signatures)
{
{
nano::unique_lock<nano::mutex> lk (mutex);
for (auto i (0); i < verifications.size (); ++i)
{
debug_assert (verifications[i] == 1 || verifications[i] == 0);
auto & item = items.front ();
if (!item.block->link ().is_zero () && node.ledger.is_epoch_link (item.block->link ()))
{
// Epoch blocks
if (verifications[i] == 1)
{
item.verified = nano::signature_verification::valid_epoch;
blocks.emplace_back (std::move (item));
}
else
{
// Possible regular state blocks with epoch link (send subtype)
item.verified = nano::signature_verification::unknown;
blocks.emplace_back (std::move (item));
}
}
else if (verifications[i] == 1)
{
// Non epoch blocks
item.verified = nano::signature_verification::valid;
blocks.emplace_back (std::move (item));
}
else
{
requeue_invalid (hashes[i], item);
}
items.pop_front ();
}
}
condition.notify_all ();
}
void nano::block_processor::process_batch (nano::unique_lock<nano::mutex> & lock_a)
{
auto scoped_write_guard = write_database_queue.wait (nano::writer::process_batch);
block_post_events post_events ([&store = node.store] { return store.tx_begin_read (); });
auto transaction (node.store.tx_begin_write ({ tables::accounts, tables::blocks, tables::frontiers, tables::pending, tables::unchecked }));
nano::timer<std::chrono::milliseconds> timer_l;
lock_a.lock ();
timer_l.start ();
// Processing blocks
unsigned number_of_blocks_processed (0), number_of_forced_processed (0);
auto deadline_reached = [&timer_l, deadline = node.config.block_processor_batch_max_time] { return timer_l.after_deadline (deadline); };
auto processor_batch_reached = [&number_of_blocks_processed, max = node.flags.block_processor_batch_size] { return number_of_blocks_processed >= max; };
auto store_batch_reached = [&number_of_blocks_processed, max = node.store.max_block_write_batch_num ()] { return number_of_blocks_processed >= max; };
while (have_blocks_ready () && (!deadline_reached () || !processor_batch_reached ()) && !awaiting_write && !store_batch_reached ())
{
if ((blocks.size () + state_block_signature_verification.size () + forced.size () > 64) && should_log ())
{
node.logger.always_log (boost::str (boost::format ("%1% blocks (+ %2% state blocks) (+ %3% forced) in processing queue") % blocks.size () % state_block_signature_verification.size () % forced.size ()));
}
nano::unchecked_info info;
nano::block_hash hash (0);
bool force (false);
if (forced.empty ())
{
info = blocks.front ();
blocks.pop_front ();
hash = info.block->hash ();
}
else
{
info = nano::unchecked_info (forced.front (), 0, nano::seconds_since_epoch (), nano::signature_verification::unknown);
forced.pop_front ();
hash = info.block->hash ();
force = true;
number_of_forced_processed++;
}
lock_a.unlock ();
if (force)
{
auto successor (node.ledger.successor (transaction, info.block->qualified_root ()));
if (successor != nullptr && successor->hash () != hash)
{
// Replace our block with the winner and roll back any dependent blocks
if (node.config.logging.ledger_rollback_logging ())
{
node.logger.always_log (boost::str (boost::format ("Rolling back %1% and replacing with %2%") % successor->hash ().to_string () % hash.to_string ()));
}
std::vector<std::shared_ptr<nano::block>> rollback_list;
if (node.ledger.rollback (transaction, successor->hash (), rollback_list))
{
node.logger.always_log (nano::severity_level::error, boost::str (boost::format ("Failed to roll back %1% because it or a successor was confirmed") % successor->hash ().to_string ()));
}
else if (node.config.logging.ledger_rollback_logging ())
{
node.logger.always_log (boost::str (boost::format ("%1% blocks rolled back") % rollback_list.size ()));
}
// Deleting from votes cache, stop active transaction
for (auto & i : rollback_list)
{
node.history.erase (i->root ());
// Stop all rolled back active transactions except initial
if (i->hash () != successor->hash ())
{
node.active.erase (*i);
}
}
}
}
number_of_blocks_processed++;
process_one (transaction, post_events, info, force);
lock_a.lock ();
}
awaiting_write = false;
lock_a.unlock ();
if (node.config.logging.timing_logging () && number_of_blocks_processed != 0 && timer_l.stop () > std::chrono::milliseconds (100))
{
node.logger.always_log (boost::str (boost::format ("Processed %1% blocks (%2% blocks were forced) in %3% %4%") % number_of_blocks_processed % number_of_forced_processed % timer_l.value ().count () % timer_l.unit ()));
}
}
void nano::block_processor::process_live (nano::transaction const & transaction_a, nano::block_hash const & hash_a, std::shared_ptr<nano::block> const & block_a, nano::process_return const & process_return_a, nano::block_origin const origin_a)
{
// Start collecting quorum on block
if (node.ledger.dependents_confirmed (transaction_a, *block_a))
{
auto account = block_a->account ().is_zero () ? block_a->sideband ().account : block_a->account ();
node.scheduler.activate (account, transaction_a);
}
else
{
node.active.trigger_inactive_votes_cache_election (block_a);
}
// Announce block contents to the network
if (origin_a == nano::block_origin::local)
{
node.network.flood_block_initial (block_a);
}
if (node.websocket_server && node.websocket_server->any_subscriber (nano::websocket::topic::new_unconfirmed_block))
{
node.websocket_server->broadcast (nano::websocket::message_builder ().new_block_arrived (*block_a));
}
}
nano::process_return nano::block_processor::process_one (nano::write_transaction const & transaction_a, block_post_events & events_a, nano::unchecked_info info_a, const bool forced_a, nano::block_origin const origin_a)
{
nano::process_return result;
auto block (info_a.block);
auto hash (block->hash ());
result = node.ledger.process (transaction_a, *block, info_a.verified);
switch (result.code)
{
case nano::process_result::progress:
{
release_assert (info_a.account.is_zero () || info_a.account == node.store.block.account_calculated (*block));
if (node.config.logging.ledger_logging ())
{
std::string block_string;
block->serialize_json (block_string, node.config.logging.single_line_record ());
node.logger.try_log (boost::str (boost::format ("Processing block %1%: %2%") % hash.to_string () % block_string));
}
if ((info_a.modified > nano::seconds_since_epoch () - 300 && node.block_arrival.recent (hash)) || forced_a)
{
events_a.events.emplace_back ([this, hash, block = info_a.block, result, origin_a] (nano::transaction const & post_event_transaction_a) { process_live (post_event_transaction_a, hash, block, result, origin_a); });
}
queue_unchecked (transaction_a, hash);
/* For send blocks check epoch open unchecked (gap pending).
For state blocks check only send subtype and only if block epoch is not last epoch.
If epoch is last, then pending entry shouldn't trigger same epoch open block for destination account. */
if (block->type () == nano::block_type::send || (block->type () == nano::block_type::state && block->sideband ().details.is_send && std::underlying_type_t<nano::epoch> (block->sideband ().details.epoch) < std::underlying_type_t<nano::epoch> (nano::epoch::max)))
{
/* block->destination () for legacy send blocks
block->link () for state blocks (send subtype) */
queue_unchecked (transaction_a, block->destination ().is_zero () ? block->link () : block->destination ());
}
break;
}
case nano::process_result::gap_previous:
{
if (node.config.logging.ledger_logging ())
{
node.logger.try_log (boost::str (boost::format ("Gap previous for: %1%") % hash.to_string ()));
}
info_a.verified = result.verified;
if (info_a.modified == 0)
{
info_a.modified = nano::seconds_since_epoch ();
}
nano::unchecked_key unchecked_key (block->previous (), hash);
node.store.unchecked.put (transaction_a, unchecked_key, info_a);
events_a.events.emplace_back ([this, hash] (nano::transaction const & /* unused */) { this->node.gap_cache.add (hash); });
node.stats.inc (nano::stat::type::ledger, nano::stat::detail::gap_previous);
break;
}
case nano::process_result::gap_source:
{
if (node.config.logging.ledger_logging ())
{
node.logger.try_log (boost::str (boost::format ("Gap source for: %1%") % hash.to_string ()));
}
info_a.verified = result.verified;
if (info_a.modified == 0)
{
info_a.modified = nano::seconds_since_epoch ();
}
nano::unchecked_key unchecked_key (node.ledger.block_source (transaction_a, *(block)), hash);
node.store.unchecked.put (transaction_a, unchecked_key, info_a);
events_a.events.emplace_back ([this, hash] (nano::transaction const & /* unused */) { this->node.gap_cache.add (hash); });
node.stats.inc (nano::stat::type::ledger, nano::stat::detail::gap_source);
break;
}
case nano::process_result::gap_epoch_open_pending:
{
if (node.config.logging.ledger_logging ())
{
node.logger.try_log (boost::str (boost::format ("Gap pending entries for epoch open: %1%") % hash.to_string ()));
}
info_a.verified = result.verified;
if (info_a.modified == 0)
{
info_a.modified = nano::seconds_since_epoch ();
}
nano::unchecked_key unchecked_key (block->account (), hash); // Specific unchecked key starting with epoch open block account public key
node.store.unchecked.put (transaction_a, unchecked_key, info_a);
node.stats.inc (nano::stat::type::ledger, nano::stat::detail::gap_source);
break;
}
case nano::process_result::old:
{
if (node.config.logging.ledger_duplicate_logging ())
{
node.logger.try_log (boost::str (boost::format ("Old for: %1%") % hash.to_string ()));
}
node.stats.inc (nano::stat::type::ledger, nano::stat::detail::old);
break;
}
case nano::process_result::bad_signature:
{
if (node.config.logging.ledger_logging ())
{
node.logger.try_log (boost::str (boost::format ("Bad signature for: %1%") % hash.to_string ()));
}
events_a.events.emplace_back ([this, hash, info_a] (nano::transaction const & /* unused */) { requeue_invalid (hash, info_a); });
break;
}
case nano::process_result::negative_spend:
{
if (node.config.logging.ledger_logging ())
{
node.logger.try_log (boost::str (boost::format ("Negative spend for: %1%") % hash.to_string ()));
}
break;
}
case nano::process_result::unreceivable:
{
if (node.config.logging.ledger_logging ())
{
node.logger.try_log (boost::str (boost::format ("Unreceivable for: %1%") % hash.to_string ()));
}
break;
}
case nano::process_result::fork:
{
node.stats.inc (nano::stat::type::ledger, nano::stat::detail::fork);
events_a.events.emplace_back ([this, block] (nano::transaction const &) { this->node.active.publish (block); });
if (node.config.logging.ledger_logging ())
{
node.logger.try_log (boost::str (boost::format ("Fork for: %1% root: %2%") % hash.to_string () % block->root ().to_string ()));
}
break;
}
case nano::process_result::opened_burn_account:
{
node.logger.always_log (boost::str (boost::format ("*** Rejecting open block for burn account ***: %1%") % hash.to_string ()));
break;
}
case nano::process_result::balance_mismatch:
{
if (node.config.logging.ledger_logging ())
{
node.logger.try_log (boost::str (boost::format ("Balance mismatch for: %1%") % hash.to_string ()));
}
break;
}
case nano::process_result::representative_mismatch:
{
if (node.config.logging.ledger_logging ())
{
node.logger.try_log (boost::str (boost::format ("Representative mismatch for: %1%") % hash.to_string ()));
}
break;
}
case nano::process_result::block_position:
{
if (node.config.logging.ledger_logging ())
{
node.logger.try_log (boost::str (boost::format ("Block %1% cannot follow predecessor %2%") % hash.to_string () % block->previous ().to_string ()));
}
break;
}
case nano::process_result::insufficient_work:
{
if (node.config.logging.ledger_logging ())
{
node.logger.try_log (boost::str (boost::format ("Insufficient work for %1% : %2% (difficulty %3%)") % hash.to_string () % nano::to_string_hex (block->block_work ()) % nano::to_string_hex (node.network_params.work.difficulty (*block))));
}
break;
}
}
return result;
}
nano::process_return nano::block_processor::process_one (nano::write_transaction const & transaction_a, block_post_events & events_a, std::shared_ptr<nano::block> const & block_a)
{
nano::unchecked_info info (block_a, block_a->account (), 0, nano::signature_verification::unknown);
auto result (process_one (transaction_a, events_a, info));
return result;
}
void nano::block_processor::queue_unchecked (nano::write_transaction const & transaction_a, nano::hash_or_account const & hash_or_account_a)
{
auto unchecked_blocks (node.store.unchecked.get (transaction_a, hash_or_account_a.hash));
for (auto & info : unchecked_blocks)
{
if (!node.flags.disable_block_processor_unchecked_deletion)
{
node.store.unchecked.del (transaction_a, nano::unchecked_key (hash_or_account_a, info.block->hash ()));
}
add (info);
}
node.gap_cache.erase (hash_or_account_a.hash);
}
void nano::block_processor::requeue_invalid (nano::block_hash const & hash_a, nano::unchecked_info const & info_a)
{
debug_assert (hash_a == info_a.block->hash ());
node.bootstrap_initiator.lazy_requeue (hash_a, info_a.block->previous (), info_a.confirmed);
}
std::unique_ptr<nano::container_info_component> nano::collect_container_info (block_processor & block_processor, std::string const & name)
{
size_t blocks_count;
size_t forced_count;
{
nano::lock_guard<nano::mutex> guard (block_processor.mutex);
blocks_count = block_processor.blocks.size ();
forced_count = block_processor.forced.size ();
}
auto composite = std::make_unique<container_info_composite> (name);
composite->add_component (collect_container_info (block_processor.state_block_signature_verification, "state_block_signature_verification"));
composite->add_component (std::make_unique<container_info_leaf> (container_info{ "blocks", blocks_count, sizeof (decltype (block_processor.blocks)::value_type) }));
composite->add_component (std::make_unique<container_info_leaf> (container_info{ "forced", forced_count, sizeof (decltype (block_processor.forced)::value_type) }));
return composite;
}
| 1 | 16,911 | Should this be "no limiter drop", since this isn't an absolutely essential activity for the stability of the network? | nanocurrency-nano-node | cpp |
@@ -22,8 +22,8 @@ var iam = new AWS.IAM({apiVersion: '2010-05-08'});
iam.deleteAccountAlias({AccountAlias: process.argv[2]}, function(err, data) {
if (err) {
- console.log("Error", err);
+ throw err;
} else {
- console.log("Success", data);
+ console.log('Account alias ' + process.argv[2] + ' deleted.');
}
}); | 1 | /*
Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
This file is licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License. A copy of
the License is located at
http://aws.amazon.com/apache2.0/
This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
*/
// Load the AWS SDK for Node.js
var AWS = require('aws-sdk');
// Load the credentials and set region from JSON file
AWS.config.loadFromPath('./config.json');
// Create the IAM service object
var iam = new AWS.IAM({apiVersion: '2010-05-08'});
iam.deleteAccountAlias({AccountAlias: process.argv[2]}, function(err, data) {
if (err) {
console.log("Error", err);
} else {
console.log("Success", data);
}
});
| 1 | 15,069 | I updated many of the success messages, because most of these operations don't return data if they succeed. This meant many of the operations would print: `Success null` if the script ran successfully. | awsdocs-aws-doc-sdk-examples | rb |
@@ -45,6 +45,8 @@ def _test_pyx():
stdout=devnull, stderr=subprocess.STDOUT)
except (subprocess.CalledProcessError, OSError):
return False
+ except FileNotFoundError as fnfe:
+ return False
else:
return r == 0
| 1 | # This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Philippe Biondi <[email protected]>
# This program is published under a GPLv2 license
"""
External link to programs
"""
import os
import subprocess
from scapy.error import log_loading
# Notice: this file must not be called before main.py, if started
# in interactive mode, because it needs to be called after the
# logger has been setup, to be able to print the warning messages
# MATPLOTLIB
try:
from matplotlib import get_backend as matplotlib_get_backend
from matplotlib import pyplot as plt
MATPLOTLIB = 1
if "inline" in matplotlib_get_backend():
MATPLOTLIB_INLINED = 1
else:
MATPLOTLIB_INLINED = 0
MATPLOTLIB_DEFAULT_PLOT_KARGS = {"marker": "+"}
# RuntimeError to catch gtk "Cannot open display" error
except (ImportError, RuntimeError):
plt = None
MATPLOTLIB = 0
MATPLOTLIB_INLINED = 0
MATPLOTLIB_DEFAULT_PLOT_KARGS = dict()
log_loading.info("Can't import matplotlib. Won't be able to plot.")
# PYX
def _test_pyx():
"""Returns if PyX is correctly installed or not"""
try:
with open(os.devnull, 'wb') as devnull:
r = subprocess.check_call(["pdflatex", "--version"],
stdout=devnull, stderr=subprocess.STDOUT)
except (subprocess.CalledProcessError, OSError):
return False
else:
return r == 0
try:
import pyx # noqa: F401
if _test_pyx():
PYX = 1
else:
log_loading.info("PyX dependencies are not installed ! Please install TexLive or MikTeX.") # noqa: E501
PYX = 0
except ImportError:
log_loading.info("Can't import PyX. Won't be able to use psdump() or pdfdump().") # noqa: E501
PYX = 0
| 1 | 15,938 | Could you simply add it to the previous clause ? | secdev-scapy | py |
@@ -108,13 +108,10 @@ Blockly.Procedures.sortProcedureMutations_ = function(mutations) {
var procCodeA = a.getAttribute('proccode');
var procCodeB = b.getAttribute('proccode');
- if (procCodeA < procCodeB) {
- return -1;
- } else if (procCodeA > procCodeB) {
- return 1;
- } else {
- return 0;
- }
+ return procCodeA.localeCompare(procCodeB, undefined, {
+ sensitivity: 'base',
+ numeric: true
+ });
});
return newMutations; | 1 | /**
* @license
* Visual Blocks Editor
*
* Copyright 2012 Google Inc.
* https://developers.google.com/blockly/
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @fileoverview Utility functions for handling procedures.
* @author [email protected] (Neil Fraser)
*/
'use strict';
/**
* @name Blockly.Procedures
* @namespace
**/
goog.provide('Blockly.Procedures');
goog.require('Blockly.Blocks');
goog.require('Blockly.constants');
goog.require('Blockly.Events.BlockChange');
goog.require('Blockly.Field');
goog.require('Blockly.Names');
goog.require('Blockly.Workspace');
/**
* Constant to separate procedure names from variables and generated functions
* when running generators.
* @deprecated Use Blockly.PROCEDURE_CATEGORY_NAME
*/
Blockly.Procedures.NAME_TYPE = Blockly.PROCEDURE_CATEGORY_NAME;
/**
* Find all user-created procedure definitions in a workspace.
* @param {!Blockly.Workspace} root Root workspace.
* @return {!Array.<!Array.<!Array>>} Pair of arrays, the
* first contains procedures without return variables, the second with.
* Each procedure is defined by a three-element list of name, parameter
* list, and return value boolean.
*/
Blockly.Procedures.allProcedures = function(root) {
var blocks = root.getAllBlocks();
var proceduresReturn = [];
var proceduresNoReturn = [];
for (var i = 0; i < blocks.length; i++) {
if (blocks[i].getProcedureDef) {
var tuple = blocks[i].getProcedureDef();
if (tuple) {
if (tuple[2]) {
proceduresReturn.push(tuple);
} else {
proceduresNoReturn.push(tuple);
}
}
}
}
proceduresNoReturn.sort(Blockly.Procedures.procTupleComparator_);
proceduresReturn.sort(Blockly.Procedures.procTupleComparator_);
return [proceduresNoReturn, proceduresReturn];
};
/**
* Find all user-created procedure definition mutations in a workspace.
* @param {!Blockly.Workspace} root Root workspace.
* @return {!Array.<Element>} Array of mutation xml elements.
* @package
*/
Blockly.Procedures.allProcedureMutations = function(root) {
var blocks = root.getAllBlocks();
var mutations = [];
for (var i = 0; i < blocks.length; i++) {
if (blocks[i].type == Blockly.PROCEDURES_PROTOTYPE_BLOCK_TYPE) {
var mutation = blocks[i].mutationToDom(/* opt_generateShadows */ true);
if (mutation) {
mutations.push(mutation);
}
}
}
return mutations;
};
/**
* Sorts an array of procedure definition mutations alphabetically.
* (Does not mutate the given array.)
* @param {!Array.<Element>} mutations Array of mutation xml elements.
* @return {!Array.<Element>} Sorted array of mutation xml elements.
* @private
*/
Blockly.Procedures.sortProcedureMutations_ = function(mutations) {
var newMutations = mutations.slice();
newMutations.sort(function(a, b) {
var procCodeA = a.getAttribute('proccode');
var procCodeB = b.getAttribute('proccode');
if (procCodeA < procCodeB) {
return -1;
} else if (procCodeA > procCodeB) {
return 1;
} else {
return 0;
}
});
return newMutations;
};
/**
* Comparison function for case-insensitive sorting of the first element of
* a tuple.
* @param {!Array} ta First tuple.
* @param {!Array} tb Second tuple.
* @return {number} -1, 0, or 1 to signify greater than, equality, or less than.
* @private
*/
Blockly.Procedures.procTupleComparator_ = function(ta, tb) {
return ta[0].toLowerCase().localeCompare(tb[0].toLowerCase());
};
/**
* Ensure two identically-named procedures don't exist.
* @param {string} name Proposed procedure name.
* @param {!Blockly.Block} block Block to disambiguate.
* @return {string} Non-colliding name.
*/
Blockly.Procedures.findLegalName = function(name, block) {
if (block.isInFlyout) {
// Flyouts can have multiple procedures called 'do something'.
return name;
}
while (!Blockly.Procedures.isLegalName_(name, block.workspace, block)) {
// Collision with another procedure.
var r = name.match(/^(.*?)(\d+)$/);
if (!r) {
name += '2';
} else {
name = r[1] + (parseInt(r[2], 10) + 1);
}
}
return name;
};
/**
* Does this procedure have a legal name? Illegal names include names of
* procedures already defined.
* @param {string} name The questionable name.
* @param {!Blockly.Workspace} workspace The workspace to scan for collisions.
* @param {Blockly.Block=} opt_exclude Optional block to exclude from
* comparisons (one doesn't want to collide with oneself).
* @return {boolean} True if the name is legal.
* @private
*/
Blockly.Procedures.isLegalName_ = function(name, workspace, opt_exclude) {
return !Blockly.Procedures.isNameUsed(name, workspace, opt_exclude);
};
/**
* Return if the given name is already a procedure name.
* @param {string} name The questionable name.
* @param {!Blockly.Workspace} workspace The workspace to scan for collisions.
* @param {Blockly.Block=} opt_exclude Optional block to exclude from
* comparisons (one doesn't want to collide with oneself).
* @return {boolean} True if the name is used, otherwise return false.
*/
Blockly.Procedures.isNameUsed = function(name, workspace, opt_exclude) {
var blocks = workspace.getAllBlocks();
// Iterate through every block and check the name.
for (var i = 0; i < blocks.length; i++) {
if (blocks[i] == opt_exclude) {
continue;
}
if (blocks[i].getProcedureDef) {
var procName = blocks[i].getProcedureDef();
if (Blockly.Names.equals(procName[0], name)) {
return false;
}
}
}
return true;
};
/**
* Rename a procedure. Called by the editable field.
* @param {string} name The proposed new name.
* @return {string} The accepted name.
* @this {Blockly.Field}
*/
Blockly.Procedures.rename = function(name) {
// Strip leading and trailing whitespace. Beyond this, all names are legal.
name = name.replace(/^[\s\xa0]+|[\s\xa0]+$/g, '');
// Ensure two identically-named procedures don't exist.
var legalName = Blockly.Procedures.findLegalName(name, this.sourceBlock_);
var oldName = this.text_;
if (oldName != name && oldName != legalName) {
// Rename any callers.
var blocks = this.sourceBlock_.workspace.getAllBlocks();
for (var i = 0; i < blocks.length; i++) {
if (blocks[i].renameProcedure) {
blocks[i].renameProcedure(oldName, legalName);
}
}
}
return legalName;
};
/**
* Construct the blocks required by the flyout for the procedure category.
* @param {!Blockly.Workspace} workspace The workspace contianing procedures.
* @return {!Array.<!Element>} Array of XML block elements.
*/
Blockly.Procedures.flyoutCategory = function(workspace) {
var xmlList = [];
Blockly.Procedures.addCreateButton_(workspace, xmlList);
// Create call blocks for each procedure defined in the workspace
var mutations = Blockly.Procedures.allProcedureMutations(workspace);
mutations = Blockly.Procedures.sortProcedureMutations_(mutations);
for (var i = 0; i < mutations.length; i++) {
var mutation = mutations[i];
// <block type="procedures_call">
// <mutation ...></mutation>
// </block>
var block = goog.dom.createDom('block');
block.setAttribute('type', 'procedures_call');
block.setAttribute('gap', 16);
block.appendChild(mutation);
xmlList.push(block);
}
return xmlList;
};
/**
* Create the "Make a Block..." button.
* @param {!Blockly.Workspace} workspace The workspace contianing procedures.
* @param {!Array.<!Element>} xmlList Array of XML block elements to add to.
* @private
*/
Blockly.Procedures.addCreateButton_ = function(workspace, xmlList) {
var button = goog.dom.createDom('button');
var msg = Blockly.Msg.NEW_PROCEDURE;
var callbackKey = 'CREATE_PROCEDURE';
var callback = function() {
Blockly.Procedures.createProcedureDefCallback_(workspace);
};
button.setAttribute('text', msg);
button.setAttribute('callbackKey', callbackKey);
workspace.registerButtonCallback(callbackKey, callback);
xmlList.push(button);
};
/**
* Find all callers of a named procedure.
* @param {string} name Name of procedure (procCode in scratch-blocks).
* @param {!Blockly.Workspace} ws The workspace to find callers in.
* @param {!Blockly.Block} definitionRoot The root of the stack where the
* procedure is defined.
* @param {boolean} allowRecursive True if the search should include recursive
* procedure calls. False if the search should ignore the stack starting
* with definitionRoot.
* @return {!Array.<!Blockly.Block>} Array of caller blocks.
* @package
*/
Blockly.Procedures.getCallers = function(name, ws, definitionRoot,
allowRecursive) {
var allBlocks = [];
var topBlocks = ws.getTopBlocks();
// Start by deciding which stacks to investigate.
for (var i = 0; i < topBlocks.length; i++) {
var block = topBlocks[i];
if (block.id == definitionRoot.id && !allowRecursive) {
continue;
}
allBlocks.push.apply(allBlocks, block.getDescendants());
}
var callers = [];
for (var i = 0; i < allBlocks.length; i++) {
var block = allBlocks[i];
if (block.type == Blockly.PROCEDURES_CALL_BLOCK_TYPE ) {
var procCode = block.getProcCode();
if (procCode && procCode == name) {
callers.push(block);
}
}
}
return callers;
};
/**
* Find and edit all callers with a procCode using a new mutation.
* @param {string} name Name of procedure (procCode in scratch-blocks).
* @param {!Blockly.Workspace} ws The workspace to find callers in.
* @param {!Element} mutation New mutation for the callers.
* @package
*/
Blockly.Procedures.mutateCallersAndPrototype = function(name, ws, mutation) {
var defineBlock = Blockly.Procedures.getDefineBlock(name, ws);
var prototypeBlock = Blockly.Procedures.getPrototypeBlock(name, ws);
if (defineBlock && prototypeBlock) {
var callers = Blockly.Procedures.getCallers(name,
defineBlock.workspace, defineBlock, true /* allowRecursive */);
callers.push(prototypeBlock);
Blockly.Events.setGroup(true);
for (var i = 0, caller; caller = callers[i]; i++) {
var oldMutationDom = caller.mutationToDom();
var oldMutation = oldMutationDom && Blockly.Xml.domToText(oldMutationDom);
caller.domToMutation(mutation);
var newMutationDom = caller.mutationToDom();
var newMutation = newMutationDom && Blockly.Xml.domToText(newMutationDom);
if (oldMutation != newMutation) {
Blockly.Events.fire(new Blockly.Events.BlockChange(
caller, 'mutation', null, oldMutation, newMutation));
}
}
Blockly.Events.setGroup(false);
} else {
alert('No define block on workspace'); // TODO decide what to do about this.
}
};
/**
* Find the definition block for the named procedure.
* @param {string} procCode The identifier of the procedure.
* @param {!Blockly.Workspace} workspace The workspace to search.
* @return {Blockly.Block} The procedure definition block, or null not found.
* @package
*/
Blockly.Procedures.getDefineBlock = function(procCode, workspace) {
// Assume that a procedure definition is a top block.
var blocks = workspace.getTopBlocks(false);
for (var i = 0; i < blocks.length; i++) {
if (blocks[i].type == Blockly.PROCEDURES_DEFINITION_BLOCK_TYPE) {
var prototypeBlock = blocks[i].getInput('custom_block').connection.targetBlock();
if (prototypeBlock.getProcCode && prototypeBlock.getProcCode() == procCode) {
return blocks[i];
}
}
}
return null;
};
/**
* Find the prototype block for the named procedure.
* @param {string} procCode The identifier of the procedure.
* @param {!Blockly.Workspace} workspace The workspace to search.
* @return {Blockly.Block} The procedure prototype block, or null not found.
* @package
*/
Blockly.Procedures.getPrototypeBlock = function(procCode, workspace) {
var defineBlock = Blockly.Procedures.getDefineBlock(procCode, workspace);
if (defineBlock) {
return defineBlock.getInput('custom_block').connection.targetBlock();
}
return null;
};
/**
* Create a mutation for a brand new custom procedure.
* @return {Element} The mutation for a new custom procedure
* @package
*/
Blockly.Procedures.newProcedureMutation = function() {
var mutationText = '<xml>' +
'<mutation' +
' proccode="block name"' +
' argumentids="[]"' +
' argumentnames="[]"' +
' argumentdefaults="[]"' +
' warp="false">' +
'</mutation>' +
'</xml>';
return Blockly.Xml.textToDom(mutationText).firstChild;
};
/**
* Callback to create a new procedure custom command block.
* @param {!Blockly.Workspace} workspace The workspace to create the new procedure on.
* @private
*/
Blockly.Procedures.createProcedureDefCallback_ = function(workspace) {
Blockly.Procedures.externalProcedureDefCallback(
Blockly.Procedures.newProcedureMutation(),
Blockly.Procedures.createProcedureCallbackFactory_(workspace)
);
};
/**
* Callback factory for adding a new custom procedure from a mutation.
* @param {!Blockly.Workspace} workspace The workspace to create the new procedure on.
* @return {function(?Element)} callback for creating the new custom procedure.
* @private
*/
Blockly.Procedures.createProcedureCallbackFactory_ = function(workspace) {
return function(mutation) {
if (mutation) {
var blockText = '<xml>' +
'<block type="procedures_definition">' +
'<statement name="custom_block">' +
'<shadow type="procedures_prototype">' +
Blockly.Xml.domToText(mutation) +
'</shadow>' +
'</statement>' +
'</block>' +
'</xml>';
var blockDom = Blockly.Xml.textToDom(blockText).firstChild;
Blockly.Events.setGroup(true);
var block = Blockly.Xml.domToBlock(blockDom, workspace);
block.moveBy(30, 30);
block.scheduleSnapAndBump();
Blockly.Events.setGroup(false);
}
};
};
/**
* Callback to open the modal for editing custom procedures.
* @param {!Blockly.Block} block The block that was right-clicked.
* @private
*/
Blockly.Procedures.editProcedureCallback_ = function(block) {
// Edit can come from one of three block types (call, define, prototype)
// Normalize by setting the block to the prototype block for the procedure.
if (block.type == Blockly.PROCEDURES_DEFINITION_BLOCK_TYPE) {
var input = block.getInput('custom_block');
if (!input) {
alert('Bad input'); // TODO: Decide what to do about this.
return;
}
var conn = input.connection;
if (!conn) {
alert('Bad connection'); // TODO: Decide what to do about this.
return;
}
var innerBlock = conn.targetBlock();
if (!innerBlock ||
!innerBlock.type == Blockly.PROCEDURES_PROTOTYPE_BLOCK_TYPE) {
alert('Bad inner block'); // TODO: Decide what to do about this.
return;
}
block = innerBlock;
} else if (block.type == Blockly.PROCEDURES_CALL_BLOCK_TYPE) {
// This is a call block, find the prototype corresponding to the procCode.
// Make sure to search the correct workspace, call block can be in flyout.
var workspaceToSearch = block.workspace.isFlyout ?
block.workspace.targetWorkspace : block.workspace;
block = Blockly.Procedures.getPrototypeBlock(
block.getProcCode(), workspaceToSearch);
}
// Block now refers to the procedure prototype block, it is safe to proceed.
Blockly.Procedures.externalProcedureDefCallback(
block.mutationToDom(),
Blockly.Procedures.editProcedureCallbackFactory_(block)
);
};
/**
* Callback factory for editing an existing custom procedure.
* @param {!Blockly.Block} block The procedure prototype block being edited.
* @return {function(?Element)} Callback for editing the custom procedure.
* @private
*/
Blockly.Procedures.editProcedureCallbackFactory_ = function(block) {
return function(mutation) {
if (mutation) {
Blockly.Procedures.mutateCallersAndPrototype(block.getProcCode(),
block.workspace, mutation);
}
};
};
/**
* Callback to create a new procedure custom command block.
* @public
*/
Blockly.Procedures.externalProcedureDefCallback = function(/** mutator, callback */) {
alert('External procedure editor must be override Blockly.Procedures.externalProcedureDefCallback');
};
/**
* Make a context menu option for editing a custom procedure.
* This appears in the context menu for procedure definitions and procedure
* calls.
* @param {!Blockly.BlockSvg} block The block where the right-click originated.
* @return {!Object} A menu option, containing text, enabled, and a callback.
* @package
*/
Blockly.Procedures.makeEditOption = function(block) {
var editOption = {
enabled: true,
text: Blockly.Msg.EDIT_PROCEDURE,
callback: function() {
Blockly.Procedures.editProcedureCallback_(block);
}
};
return editOption;
};
/**
* Callback to show the procedure definition corresponding to a custom command
* block.
* TODO(#1136): Implement.
* @param {!Blockly.Block} block The block that was right-clicked.
* @private
*/
Blockly.Procedures.showProcedureDefCallback_ = function(block) {
alert('TODO(#1136): implement showing procedure definition (procCode was "' +
block.procCode_ + '")');
};
/**
* Make a context menu option for showing the definition for a custom procedure,
* based on a right-click on a custom command block.
* @param {!Blockly.BlockSvg} block The block where the right-click originated.
* @return {!Object} A menu option, containing text, enabled, and a callback.
* @package
*/
Blockly.Procedures.makeShowDefinitionOption = function(block) {
var option = {
enabled: true,
text: Blockly.Msg.SHOW_PROCEDURE_DEFINITION,
callback: function() {
Blockly.Procedures.showProcedureDefCallback_(block);
}
};
return option;
};
/**
* Callback to try to delete a custom block definitions.
* @param {string} procCode The identifier of the procedure to delete.
* @param {!Blockly.Block} definitionRoot The root block of the stack that
* defines the custom procedure.
* @return {boolean} True if the custom procedure was deleted, false otherwise.
* @package
*/
Blockly.Procedures.deleteProcedureDefCallback = function(procCode,
definitionRoot) {
var callers = Blockly.Procedures.getCallers(procCode,
definitionRoot.workspace, definitionRoot, false /* allowRecursive */);
if (callers.length > 0) {
return false;
}
var workspace = definitionRoot.workspace;
// Delete the whole stack.
Blockly.Events.setGroup(true);
definitionRoot.dispose();
Blockly.Events.setGroup(false);
// TODO (#1354) Update this function when '_' is removed
// Refresh toolbox, so caller doesn't appear there anymore
workspace.refreshToolboxSelection_();
return true;
};
| 1 | 9,410 | @joshyrobot, I think you can just use localeCompare with only the first argument and leave the other arguments out (here and all the other lines changed). It seems to do the right thing, and then we don't have to worry about these extra options. | LLK-scratch-blocks | js |
@@ -239,6 +239,10 @@ public class ExecuteFlowAction implements TriggerAction {
}
exflow.setExecutionOptions(executionOptions);
+ if (slaOptions != null && slaOptions.size() > 0) {
+ exflow.setSlaOptions(slaOptions);
+ }
+
try {
logger.info("Invoking flow " + project.getName() + "." + flowName);
executorManager.submitExecutableFlow(exflow, submitUser); | 1 | /*
* Copyright 2012 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package azkaban.trigger.builtin;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.log4j.Logger;
import azkaban.executor.ExecutableFlow;
import azkaban.executor.ExecutionOptions;
import azkaban.executor.ExecutorManagerAdapter;
import azkaban.executor.ExecutorManagerException;
import azkaban.flow.Flow;
import azkaban.project.Project;
import azkaban.project.ProjectManager;
import azkaban.sla.SlaOption;
import azkaban.trigger.Condition;
import azkaban.trigger.ConditionChecker;
import azkaban.trigger.Trigger;
import azkaban.trigger.TriggerAction;
import azkaban.trigger.TriggerManager;
public class ExecuteFlowAction implements TriggerAction {
public static final String type = "ExecuteFlowAction";
public static final String EXEC_ID = "ExecuteFlowAction.execid";
private static ExecutorManagerAdapter executorManager;
private static TriggerManager triggerManager;
private String actionId;
private int projectId;
private String projectName;
private String flowName;
private String submitUser;
private static ProjectManager projectManager;
private ExecutionOptions executionOptions = new ExecutionOptions();
private List<SlaOption> slaOptions;
private static Logger logger = Logger.getLogger(ExecuteFlowAction.class);
public ExecuteFlowAction(String actionId, int projectId, String projectName,
String flowName, String submitUser, ExecutionOptions executionOptions,
List<SlaOption> slaOptions) {
this.actionId = actionId;
this.projectId = projectId;
this.projectName = projectName;
this.flowName = flowName;
this.submitUser = submitUser;
this.executionOptions = executionOptions;
this.slaOptions = slaOptions;
}
public static void setLogger(Logger logger) {
ExecuteFlowAction.logger = logger;
}
public String getProjectName() {
return projectName;
}
public int getProjectId() {
return projectId;
}
protected void setProjectId(int projectId) {
this.projectId = projectId;
}
public String getFlowName() {
return flowName;
}
protected void setFlowName(String flowName) {
this.flowName = flowName;
}
public String getSubmitUser() {
return submitUser;
}
protected void setSubmitUser(String submitUser) {
this.submitUser = submitUser;
}
public ExecutionOptions getExecutionOptions() {
return executionOptions;
}
protected void setExecutionOptions(ExecutionOptions executionOptions) {
this.executionOptions = executionOptions;
}
public List<SlaOption> getSlaOptions() {
return slaOptions;
}
protected void setSlaOptions(List<SlaOption> slaOptions) {
this.slaOptions = slaOptions;
}
public static ExecutorManagerAdapter getExecutorManager() {
return executorManager;
}
public static void setExecutorManager(ExecutorManagerAdapter executorManager) {
ExecuteFlowAction.executorManager = executorManager;
}
public static TriggerManager getTriggerManager() {
return triggerManager;
}
public static void setTriggerManager(TriggerManager triggerManager) {
ExecuteFlowAction.triggerManager = triggerManager;
}
public static ProjectManager getProjectManager() {
return projectManager;
}
public static void setProjectManager(ProjectManager projectManager) {
ExecuteFlowAction.projectManager = projectManager;
}
@Override
public String getType() {
return type;
}
@SuppressWarnings("unchecked")
@Override
public TriggerAction fromJson(Object obj) {
return createFromJson((HashMap<String, Object>) obj);
}
@SuppressWarnings("unchecked")
public static TriggerAction createFromJson(HashMap<String, Object> obj) {
Map<String, Object> jsonObj = (HashMap<String, Object>) obj;
String objType = (String) jsonObj.get("type");
if (!objType.equals(type)) {
throw new RuntimeException("Cannot create action of " + type + " from "
+ objType);
}
String actionId = (String) jsonObj.get("actionId");
int projectId = Integer.valueOf((String) jsonObj.get("projectId"));
String projectName = (String) jsonObj.get("projectName");
String flowName = (String) jsonObj.get("flowName");
String submitUser = (String) jsonObj.get("submitUser");
ExecutionOptions executionOptions = null;
if (jsonObj.containsKey("executionOptions")) {
executionOptions =
ExecutionOptions.createFromObject(jsonObj.get("executionOptions"));
}
List<SlaOption> slaOptions = null;
if (jsonObj.containsKey("slaOptions")) {
slaOptions = new ArrayList<SlaOption>();
List<Object> slaOptionsObj = (List<Object>) jsonObj.get("slaOptions");
for (Object slaObj : slaOptionsObj) {
slaOptions.add(SlaOption.fromObject(slaObj));
}
}
return new ExecuteFlowAction(actionId, projectId, projectName, flowName,
submitUser, executionOptions, slaOptions);
}
@Override
public Object toJson() {
Map<String, Object> jsonObj = new HashMap<String, Object>();
jsonObj.put("actionId", actionId);
jsonObj.put("type", type);
jsonObj.put("projectId", String.valueOf(projectId));
jsonObj.put("projectName", projectName);
jsonObj.put("flowName", flowName);
jsonObj.put("submitUser", submitUser);
if (executionOptions != null) {
jsonObj.put("executionOptions", executionOptions.toObject());
}
if (slaOptions != null) {
List<Object> slaOptionsObj = new ArrayList<Object>();
for (SlaOption sla : slaOptions) {
slaOptionsObj.add(sla.toObject());
}
jsonObj.put("slaOptions", slaOptionsObj);
}
return jsonObj;
}
@Override
public void doAction() throws Exception {
if (projectManager == null || executorManager == null) {
throw new Exception("ExecuteFlowAction not properly initialized!");
}
Project project = projectManager.getProject(projectId);
if (project == null) {
logger.error("Project to execute " + projectId + " does not exist!");
throw new RuntimeException("Error finding the project to execute "
+ projectId);
}
Flow flow = project.getFlow(flowName);
if (flow == null) {
logger.error("Flow " + flowName + " cannot be found in project "
+ project.getName());
throw new RuntimeException("Error finding the flow to execute "
+ flowName);
}
ExecutableFlow exflow = new ExecutableFlow(project, flow);
exflow.setSubmitUser(submitUser);
exflow.addAllProxyUsers(project.getProxyUsers());
if (executionOptions == null) {
executionOptions = new ExecutionOptions();
}
if (!executionOptions.isFailureEmailsOverridden()) {
executionOptions.setFailureEmails(flow.getFailureEmails());
}
if (!executionOptions.isSuccessEmailsOverridden()) {
executionOptions.setSuccessEmails(flow.getSuccessEmails());
}
exflow.setExecutionOptions(executionOptions);
try {
logger.info("Invoking flow " + project.getName() + "." + flowName);
executorManager.submitExecutableFlow(exflow, submitUser);
logger.info("Invoked flow " + project.getName() + "." + flowName);
} catch (ExecutorManagerException e) {
throw new RuntimeException(e);
}
// deal with sla
if (slaOptions != null && slaOptions.size() > 0) {
int execId = exflow.getExecutionId();
for (SlaOption sla : slaOptions) {
logger.info("Adding sla trigger " + sla.toString() + " to execution "
+ execId);
SlaChecker slaFailChecker =
new SlaChecker("slaFailChecker", sla, execId);
Map<String, ConditionChecker> slaCheckers =
new HashMap<String, ConditionChecker>();
slaCheckers.put(slaFailChecker.getId(), slaFailChecker);
Condition triggerCond =
new Condition(slaCheckers, slaFailChecker.getId()
+ ".isSlaFailed()");
// if whole flow finish before violate sla, just expire
SlaChecker slaPassChecker =
new SlaChecker("slaPassChecker", sla, execId);
Map<String, ConditionChecker> expireCheckers =
new HashMap<String, ConditionChecker>();
expireCheckers.put(slaPassChecker.getId(), slaPassChecker);
Condition expireCond =
new Condition(expireCheckers, slaPassChecker.getId()
+ ".isSlaPassed()");
List<TriggerAction> actions = new ArrayList<TriggerAction>();
List<String> slaActions = sla.getActions();
for (String act : slaActions) {
if (act.equals(SlaOption.ACTION_ALERT)) {
SlaAlertAction slaAlert =
new SlaAlertAction("slaAlert", sla, execId);
actions.add(slaAlert);
} else if (act.equals(SlaOption.ACTION_CANCEL_FLOW)) {
KillExecutionAction killAct =
new KillExecutionAction("killExecution", execId);
actions.add(killAct);
}
}
Trigger slaTrigger =
new Trigger("azkaban_sla", "azkaban", triggerCond, expireCond,
actions);
slaTrigger.getInfo().put("monitored.finished.execution",
String.valueOf(execId));
slaTrigger.setResetOnTrigger(false);
slaTrigger.setResetOnExpire(false);
logger.info("Ready to put in the sla trigger");
triggerManager.insertTrigger(slaTrigger);
logger.info("Sla inserted.");
}
}
}
@Override
public String getDescription() {
return "Execute flow " + getFlowName() + " from project "
+ getProjectName();
}
@Override
public void setContext(Map<String, Object> context) {
}
@Override
public String getId() {
return actionId;
}
}
| 1 | 12,776 | @chengren311 : where does this logic move to? | azkaban-azkaban | java |
@@ -204,6 +204,14 @@ func (s *NodegroupService) reconcileNodegroupIAMRole() error {
}
policies := NodegroupRolePolicies()
+ if len(s.scope.ManagedMachinePool.Spec.RoleAdditionalPolicies) > 0 {
+ if !s.scope.AllowAdditionalRoles() {
+ return ErrCannotUseAdditionalRoles
+ }
+
+ policies = append(policies, s.scope.ManagedMachinePool.Spec.RoleAdditionalPolicies...)
+ }
+
_, err = s.EnsurePoliciesAttached(role, aws.StringSlice(policies))
if err != nil {
return errors.Wrapf(err, "error ensuring policies are attached: %v", policies) | 1 | /*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package eks
import (
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/iam"
"github.com/pkg/errors"
ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/controlplane/eks/api/v1alpha4"
infrav1exp "sigs.k8s.io/cluster-api-provider-aws/exp/api/v1alpha4"
eksiam "sigs.k8s.io/cluster-api-provider-aws/pkg/cloud/services/eks/iam"
"sigs.k8s.io/cluster-api-provider-aws/pkg/eks"
"sigs.k8s.io/cluster-api-provider-aws/pkg/record"
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4"
)
const (
maxIAMRoleNameLength = 64
)
// NodegroupRolePolicies gives the policies required for a nodegroup role.
func NodegroupRolePolicies() []string {
return []string{
"arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy",
"arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy", //TODO: Can remove when CAPA supports provisioning of OIDC web identity federation with service account token volume projection
"arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly",
}
}
// FargateRolePolicies gives the policies required for a fargate role.
func FargateRolePolicies() []string {
return []string{
"arn:aws:iam::aws:policy/AmazonEKSFargatePodExecutionRolePolicy",
}
}
func (s *Service) reconcileControlPlaneIAMRole() error {
s.scope.V(2).Info("Reconciling EKS Control Plane IAM Role")
if s.scope.ControlPlane.Spec.RoleName == nil {
if !s.scope.EnableIAM() {
s.scope.Info("no eks control plane role specified, using default eks control plane role")
s.scope.ControlPlane.Spec.RoleName = &ekscontrolplanev1.DefaultEKSControlPlaneRole
} else {
s.scope.Info("no eks control plane role specified, using role based on cluster name")
s.scope.ControlPlane.Spec.RoleName = aws.String(fmt.Sprintf("%s-iam-service-role", s.scope.Name()))
}
}
s.scope.Info("using eks control plane role", "role-name", *s.scope.ControlPlane.Spec.RoleName)
role, err := s.GetIAMRole(*s.scope.ControlPlane.Spec.RoleName)
if err != nil {
if !isNotFound(err) {
return err
}
// If the disable IAM flag is used then the role must exist
if !s.scope.EnableIAM() {
return fmt.Errorf("getting role %s: %w", *s.scope.ControlPlane.Spec.RoleName, ErrClusterRoleNotFound)
}
role, err = s.CreateRole(*s.scope.ControlPlane.Spec.RoleName, s.scope.Name(), eksiam.ControlPlaneTrustRelationship(false), s.scope.AdditionalTags())
if err != nil {
record.Warnf(s.scope.ControlPlane, "FailedIAMRoleCreation", "Failed to create control plane IAM role %q: %v", *s.scope.ControlPlane.Spec.RoleName, err)
return fmt.Errorf("creating role %s: %w", *s.scope.ControlPlane.Spec.RoleName, err)
}
record.Eventf(s.scope.ControlPlane, "SuccessfulIAMRoleCreation", "Created control plane IAM role %q", *s.scope.ControlPlane.Spec.RoleName)
}
if s.IsUnmanaged(role, s.scope.Name()) {
s.scope.V(2).Info("Skipping, EKS control plane role policy assignment as role is unamanged")
return nil
}
//TODO: check tags and trust relationship to see if they need updating
policies := []*string{
aws.String("arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"),
}
if s.scope.ControlPlane.Spec.RoleAdditionalPolicies != nil {
if !s.scope.AllowAdditionalRoles() && len(*s.scope.ControlPlane.Spec.RoleAdditionalPolicies) > 0 {
return ErrCannotUseAdditionalRoles
}
for _, policy := range *s.scope.ControlPlane.Spec.RoleAdditionalPolicies {
additionalPolicy := policy
policies = append(policies, &additionalPolicy)
}
}
_, err = s.EnsurePoliciesAttached(role, policies)
if err != nil {
return errors.Wrapf(err, "error ensuring policies are attached: %v", policies)
}
return nil
}
func (s *Service) deleteControlPlaneIAMRole() error {
if s.scope.ControlPlane.Spec.RoleName == nil {
return nil
}
roleName := *s.scope.ControlPlane.Spec.RoleName
if !s.scope.EnableIAM() {
s.scope.V(2).Info("EKS IAM disabled, skipping deleting EKS Control Plane IAM Role")
return nil
}
s.scope.V(2).Info("Deleting EKS Control Plane IAM Role")
role, err := s.GetIAMRole(roleName)
if err != nil {
if isNotFound(err) {
s.V(2).Info("EKS Control Plane IAM Role already deleted")
return nil
}
return errors.Wrap(err, "getting eks control plane iam role")
}
if s.IsUnmanaged(role, s.scope.Name()) {
s.V(2).Info("Skipping, EKS control plane iam role deletion as role is unamanged")
return nil
}
err = s.DeleteRole(*s.scope.ControlPlane.Spec.RoleName)
if err != nil {
record.Eventf(s.scope.ControlPlane, "FailedIAMRoleDeletion", "Failed to delete control Plane IAM role %q: %v", *s.scope.ControlPlane.Spec.RoleName, err)
return err
}
record.Eventf(s.scope.ControlPlane, "SuccessfulIAMRoleDeletion", "Deleted Control Plane IAM role %q", *s.scope.ControlPlane.Spec.RoleName)
return nil
}
func (s *NodegroupService) reconcileNodegroupIAMRole() error {
s.scope.V(2).Info("Reconciling EKS Nodegroup IAM Role")
if s.scope.RoleName() == "" {
var roleName string
var err error
if !s.scope.EnableIAM() {
s.scope.Info("no EKS nodegroup role specified, using default EKS nodegroup role")
roleName = infrav1exp.DefaultEKSNodegroupRole
} else {
s.scope.Info("no EKS nodegroup role specified, using role based on nodegroup name")
roleName, err = eks.GenerateEKSName(
fmt.Sprintf("%s-%s", s.scope.KubernetesClusterName(), s.scope.NodegroupName()),
"-nodegroup-iam-service-role",
maxIAMRoleNameLength,
)
if err != nil {
return errors.Wrap(err, "failed to generate IAM role name")
}
}
s.scope.ManagedMachinePool.Spec.RoleName = roleName
}
role, err := s.GetIAMRole(s.scope.RoleName())
if err != nil {
if !isNotFound(err) {
return err
}
// If the disable IAM flag is used then the role must exist
if !s.scope.EnableIAM() {
return ErrNodegroupRoleNotFound
}
role, err = s.CreateRole(s.scope.ManagedMachinePool.Spec.RoleName, s.scope.ClusterName(), eksiam.NodegroupTrustRelationship(), s.scope.AdditionalTags())
if err != nil {
record.Warnf(s.scope.ManagedMachinePool, "FailedIAMRoleCreation", "Failed to create nodegroup IAM role %q: %v", s.scope.RoleName(), err)
return err
}
record.Eventf(s.scope.ManagedMachinePool, "SuccessfulIAMRoleCreation", "Created nodegroup IAM role %q", s.scope.RoleName())
}
if s.IsUnmanaged(role, s.scope.ClusterName()) {
s.scope.V(2).Info("Skipping, EKS nodegroup role policy assignment as role is unamanged")
return nil
}
_, err = s.EnsureTagsAndPolicy(role, s.scope.ClusterName(), eksiam.NodegroupTrustRelationship(), s.scope.AdditionalTags())
if err != nil {
return errors.Wrapf(err, "error ensuring tags and policy document are set on node role")
}
policies := NodegroupRolePolicies()
_, err = s.EnsurePoliciesAttached(role, aws.StringSlice(policies))
if err != nil {
return errors.Wrapf(err, "error ensuring policies are attached: %v", policies)
}
return nil
}
func (s *NodegroupService) deleteNodegroupIAMRole() (reterr error) {
if err := s.scope.IAMReadyFalse(clusterv1.DeletingReason, ""); err != nil {
return err
}
defer func() {
if reterr != nil {
record.Warnf(
s.scope.ManagedMachinePool, "FailedDeleteIAMNodegroupRole", "Failed to delete EKS nodegroup role %s: %v", s.scope.ManagedMachinePool.Spec.RoleName, reterr,
)
if err := s.scope.IAMReadyFalse("DeletingFailed", reterr.Error()); err != nil {
reterr = err
}
} else if err := s.scope.IAMReadyFalse(clusterv1.DeletedReason, ""); err != nil {
reterr = err
}
}()
roleName := s.scope.RoleName()
if !s.scope.EnableIAM() {
s.scope.V(2).Info("EKS IAM disabled, skipping deleting EKS Nodegroup IAM Role")
return nil
}
s.scope.V(2).Info("Deleting EKS Nodegroup IAM Role")
role, err := s.GetIAMRole(roleName)
if err != nil {
if isNotFound(err) {
s.V(2).Info("EKS Nodegroup IAM Role already deleted")
return nil
}
return errors.Wrap(err, "getting EKS nodegroup iam role")
}
if s.IsUnmanaged(role, s.scope.ClusterName()) {
s.V(2).Info("Skipping, EKS Nodegroup iam role deletion as role is unamanged")
return nil
}
err = s.DeleteRole(s.scope.RoleName())
if err != nil {
record.Eventf(s.scope.ManagedMachinePool, "FailedIAMRoleDeletion", "Failed to delete Nodegroup IAM role %q: %v", s.scope.ManagedMachinePool.Spec.RoleName, err)
return err
}
record.Eventf(s.scope.ManagedMachinePool, "SuccessfulIAMRoleDeletion", "Deleted Nodegroup IAM role %q", s.scope.ManagedMachinePool.Spec.RoleName)
return nil
}
func (s *FargateService) reconcileFargateIAMRole() (requeue bool, err error) {
s.scope.V(2).Info("Reconciling EKS Fargate IAM Role")
if s.scope.RoleName() == "" {
var roleName string
if !s.scope.EnableIAM() {
s.scope.Info("no EKS fargate role specified, using default EKS fargate role")
roleName = infrav1exp.DefaultEKSFargateRole
} else {
s.scope.Info("no EKS fargate role specified, using role based on fargate profile name")
roleName, err = eks.GenerateEKSName(
"fargate",
fmt.Sprintf("%s-%s", s.scope.KubernetesClusterName(), s.scope.FargateProfile.Spec.ProfileName),
maxIAMRoleNameLength,
)
if err != nil {
return false, errors.Wrap(err, "couldn't generate IAM role name")
}
}
s.scope.FargateProfile.Spec.RoleName = roleName
return true, nil
}
var createdRole bool
role, err := s.GetIAMRole(s.scope.RoleName())
if err != nil {
if !isNotFound(err) {
return false, err
}
// If the disable IAM flag is used then the role must exist
if !s.scope.EnableIAM() {
return false, ErrFargateRoleNotFound
}
createdRole = true
role, err = s.CreateRole(s.scope.RoleName(), s.scope.ClusterName(), eksiam.FargateTrustRelationship(), s.scope.AdditionalTags())
if err != nil {
record.Warnf(s.scope.FargateProfile, "FailedIAMRoleCreation", "Failed to create fargate IAM role %q: %v", s.scope.RoleName(), err)
return false, errors.Wrap(err, "failed to create role")
}
record.Eventf(s.scope.FargateProfile, "SuccessfulIAMRoleCreation", "Created fargate IAM role %q", s.scope.RoleName())
}
updatedRole, err := s.EnsureTagsAndPolicy(role, s.scope.ClusterName(), eksiam.FargateTrustRelationship(), s.scope.AdditionalTags())
if err != nil {
return updatedRole, errors.Wrapf(err, "error ensuring tags and policy document are set on fargate role")
}
policies := FargateRolePolicies()
updatedPolicies, err := s.EnsurePoliciesAttached(role, aws.StringSlice(policies))
if err != nil {
return updatedRole, errors.Wrapf(err, "error ensuring policies are attached: %v", policies)
}
return createdRole || updatedRole || updatedPolicies, nil
}
func (s *FargateService) deleteFargateIAMRole() (reterr error) {
if err := s.scope.IAMReadyFalse(clusterv1.DeletingReason, ""); err != nil {
return err
}
defer func() {
if reterr != nil {
record.Warnf(
s.scope.FargateProfile, "FailedIAMRoleDeletion", "Failed to delete EKS fargate role %s: %v", s.scope.FargateProfile.Spec.RoleName, reterr,
)
if err := s.scope.IAMReadyFalse("DeletingFailed", reterr.Error()); err != nil {
reterr = err
}
} else if err := s.scope.IAMReadyFalse(clusterv1.DeletedReason, ""); err != nil {
reterr = err
}
}()
roleName := s.scope.RoleName()
if !s.scope.EnableIAM() {
s.scope.V(2).Info("EKS IAM disabled, skipping deleting EKS fargate IAM Role")
return nil
}
s.scope.V(2).Info("Deleting EKS fargate IAM Role")
_, err := s.GetIAMRole(roleName)
if err != nil {
if isNotFound(err) {
s.V(2).Info("EKS fargate IAM Role already deleted")
return nil
}
return errors.Wrap(err, "getting EKS fargate iam role")
}
err = s.DeleteRole(s.scope.RoleName())
if err != nil {
record.Eventf(s.scope.FargateProfile, "FailedIAMRoleDeletion", "Failed to delete fargate IAM role %q: %v", s.scope.RoleName(), err)
return err
}
record.Eventf(s.scope.FargateProfile, "SuccessfulIAMRoleDeletion", "Deleted fargate IAM role %q", s.scope.RoleName())
return nil
}
func isNotFound(err error) bool {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case iam.ErrCodeNoSuchEntityException:
return true
default:
return false
}
}
return false
}
| 1 | 20,700 | Can we have a `nil` exception check at `s.scope.ManagedMachinePool` | kubernetes-sigs-cluster-api-provider-aws | go |
@@ -40,16 +40,6 @@ class Options extends \VuFind\Search\Base\Options
{
use \VuFind\Search\Options\ViewOptionsTrait;
- /**
- * Available sort options for facets
- *
- * @var array
- */
- protected $facetSortOptions = [
- 'count' => 'sort_count',
- 'index' => 'sort_alphabetic'
- ];
-
/**
* Hierarchical facets
* | 1 | <?php
/**
* Solr aspect of the Search Multi-class (Options)
*
* PHP version 7
*
* Copyright (C) Villanova University 2011.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* @category VuFind
* @package Search_Solr
* @author Demian Katz <[email protected]>
* @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License
* @link https://vufind.org Main Page
*/
namespace VuFind\Search\Solr;
/**
* Solr Search Options
*
* @category VuFind
* @package Search_Solr
* @author Demian Katz <[email protected]>
* @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License
* @link https://vufind.org Main Page
*/
class Options extends \VuFind\Search\Base\Options
{
use \VuFind\Search\Options\ViewOptionsTrait;
/**
* Available sort options for facets
*
* @var array
*/
protected $facetSortOptions = [
'count' => 'sort_count',
'index' => 'sort_alphabetic'
];
/**
* Hierarchical facets
*
* @var array
*/
protected $hierarchicalFacets = [];
/**
* Hierarchical facet separators
*
* @var array
*/
protected $hierarchicalFacetSeparators = [];
/**
* Relevance sort override for empty searches
*
* @var string
*/
protected $emptySearchRelevanceOverride = null;
/**
* Constructor
*
* @param \VuFind\Config\PluginManager $configLoader Config loader
*/
public function __construct(\VuFind\Config\PluginManager $configLoader)
{
parent::__construct($configLoader);
$searchSettings = $configLoader->get($this->searchIni);
if (isset($searchSettings->General->default_limit)) {
$this->defaultLimit = $searchSettings->General->default_limit;
}
if (isset($searchSettings->General->limit_options)) {
$this->limitOptions
= explode(",", $searchSettings->General->limit_options);
}
if (isset($searchSettings->General->default_sort)) {
$this->defaultSort = $searchSettings->General->default_sort;
}
if (isset($searchSettings->General->empty_search_relevance_override)) {
$this->emptySearchRelevanceOverride
= $searchSettings->General->empty_search_relevance_override;
}
if (isset($searchSettings->DefaultSortingByType)
&& count($searchSettings->DefaultSortingByType) > 0
) {
foreach ($searchSettings->DefaultSortingByType as $key => $val) {
$this->defaultSortByHandler[$key] = $val;
}
}
if (isset($searchSettings->RSS->sort)) {
$this->rssSort = $searchSettings->RSS->sort;
}
if (isset($searchSettings->General->default_handler)) {
$this->defaultHandler = $searchSettings->General->default_handler;
}
if (isset($searchSettings->General->retain_filters_by_default)) {
$this->retainFiltersByDefault
= $searchSettings->General->retain_filters_by_default;
}
if (isset($searchSettings->General->default_filters)) {
$this->defaultFilters = $searchSettings->General->default_filters
->toArray();
}
// Result limit:
if (isset($searchSettings->General->result_limit)) {
$this->resultLimit = $searchSettings->General->result_limit;
}
if (isset($searchSettings->Basic_Searches)) {
foreach ($searchSettings->Basic_Searches as $key => $value) {
$this->basicHandlers[$key] = $value;
}
}
if (isset($searchSettings->Advanced_Searches)) {
foreach ($searchSettings->Advanced_Searches as $key => $value) {
$this->advancedHandlers[$key] = $value;
}
}
// Load sort preferences (or defaults if none in .ini file):
if (isset($searchSettings->Sorting)) {
foreach ($searchSettings->Sorting as $key => $value) {
$this->sortOptions[$key] = $value;
}
} else {
$this->sortOptions = ['relevance' => 'sort_relevance',
'year' => 'sort_year', 'year asc' => 'sort_year asc',
'callnumber-sort' => 'sort_callnumber', 'author' => 'sort_author',
'title' => 'sort_title'];
}
// Set up views
$this->initViewOptions($searchSettings);
// Load list view for result (controls AJAX embedding vs. linking)
if (isset($searchSettings->List->view)) {
$this->listviewOption = $searchSettings->List->view;
}
// Load facet preferences
$facetSettings = $configLoader->get($this->facetsIni);
if (isset($facetSettings->Advanced_Settings->translated_facets)
&& count($facetSettings->Advanced_Settings->translated_facets) > 0
) {
$this->setTranslatedFacets(
$facetSettings->Advanced_Settings->translated_facets->toArray()
);
}
if (isset($facetSettings->Advanced_Settings->delimiter)) {
$this->setDefaultFacetDelimiter(
$facetSettings->Advanced_Settings->delimiter
);
}
if (isset($facetSettings->Advanced_Settings->delimited_facets)
&& count($facetSettings->Advanced_Settings->delimited_facets) > 0
) {
$this->setDelimitedFacets(
$facetSettings->Advanced_Settings->delimited_facets->toArray()
);
}
if (isset($facetSettings->Advanced_Settings->special_facets)) {
$this->specialAdvancedFacets
= $facetSettings->Advanced_Settings->special_facets;
}
if (isset($facetSettings->SpecialFacets->hierarchical)) {
$this->hierarchicalFacets
= $facetSettings->SpecialFacets->hierarchical->toArray();
}
if (isset($facetSettings->SpecialFacets->hierarchicalFacetSeparators)) {
$this->hierarchicalFacetSeparators = $facetSettings->SpecialFacets
->hierarchicalFacetSeparators->toArray();
}
// Load Spelling preferences
$config = $configLoader->get($this->mainIni);
if (isset($config->Spelling->enabled)) {
$this->spellcheck = $config->Spelling->enabled;
}
// Turn on first/last navigation if configured:
if (isset($config->Record->first_last_navigation)
&& $config->Record->first_last_navigation
) {
$this->firstlastNavigation = true;
}
// Turn on highlighting if the user has requested highlighting or snippet
// functionality:
$highlight = !isset($searchSettings->General->highlighting)
? false : $searchSettings->General->highlighting;
$snippet = !isset($searchSettings->General->snippets)
? false : $searchSettings->General->snippets;
if ($highlight || $snippet) {
$this->highlight = true;
}
// Load autocomplete preferences:
$this->configureAutocomplete($searchSettings);
// Load shard settings
if (isset($searchSettings->IndexShards)
&& !empty($searchSettings->IndexShards)
) {
foreach ($searchSettings->IndexShards as $k => $v) {
$this->shards[$k] = $v;
}
// If we have a default from the configuration, use that...
if (isset($searchSettings->ShardPreferences->defaultChecked)
&& !empty($searchSettings->ShardPreferences->defaultChecked)
) {
$defaultChecked
= is_object($searchSettings->ShardPreferences->defaultChecked)
? $searchSettings->ShardPreferences->defaultChecked->toArray()
: [$searchSettings->ShardPreferences->defaultChecked];
foreach ($defaultChecked as $current) {
$this->defaultSelectedShards[] = $current;
}
} else {
// If no default is configured, use all shards...
$this->defaultSelectedShards = array_keys($this->shards);
}
// Apply checkbox visibility setting if applicable:
if (isset($searchSettings->ShardPreferences->showCheckboxes)) {
$this->visibleShardCheckboxes
= $searchSettings->ShardPreferences->showCheckboxes;
}
}
}
/**
* Return the route name for the search results action.
*
* @return string
*/
public function getSearchAction()
{
return 'search-results';
}
/**
* Return the route name of the action used for performing advanced searches.
* Returns false if the feature is not supported.
*
* @return string|bool
*/
public function getAdvancedSearchAction()
{
return 'search-advanced';
}
/**
* Return the route name for the facet list action. Returns false to cover
* unimplemented support.
*
* @return string|bool
*/
public function getFacetListAction()
{
return 'search-facetlist';
}
/**
* Get the relevance sort override for empty searches.
*
* @return string Sort field or null if not set
*/
public function getEmptySearchRelevanceOverride()
{
return $this->emptySearchRelevanceOverride;
}
/**
* Get an array of hierarchical facets.
*
* @return array
*/
public function getHierarchicalFacets()
{
return $this->hierarchicalFacets;
}
/**
* Get hierarchical facet separators
*
* @return array
*/
public function getHierarchicalFacetSeparators()
{
return $this->hierarchicalFacetSeparators;
}
}
| 1 | 28,233 | Rather than deleting this, should you just reformat it so it's the `*` settings, so if nothing is configured in facets.ini, the existing default behavior continues to work? | vufind-org-vufind | php |
@@ -31,11 +31,14 @@ class ManagerConfiguration {
'#collapsible' => FALSE,
];
foreach ($this->datastoreManager->getConfigurableProperties() as $property => $default_value) {
+ $propety_label = str_replace("_", " ", $property);
+ $propety_label = ucfirst($propety_label);
+
if ($property == "delimiter") {
$form['import_options']["datastore_manager_config_{$property}"] = array(
'#type' => 'select',
// @codingStandardsIgnoreStart
- '#title' => ucfirst(t("{$property}")),
+ '#title' => t($propety_label),
// @codingStandardsIgnoreEnd
'#options' => array(
"," => ",", | 1 | <?php
namespace Dkan\Datastore\Page\Component;
use Dkan\Datastore\Manager\ManagerInterface;
/**
* Class ManagerConfiguration.
*
* Form component to configure a datastore manager.
*/
class ManagerConfiguration {
private $datastoreManager;
/**
* Constructor.
*/
public function __construct(ManagerInterface $manager) {
$this->datastoreManager = $manager;
}
/**
* Get form.
*/
public function getForm() {
$form = [];
$form['import_options'] = [
'#type' => 'fieldset',
'#title' => t('Import options'),
'#collapsible' => FALSE,
];
foreach ($this->datastoreManager->getConfigurableProperties() as $property => $default_value) {
if ($property == "delimiter") {
$form['import_options']["datastore_manager_config_{$property}"] = array(
'#type' => 'select',
// @codingStandardsIgnoreStart
'#title' => ucfirst(t("{$property}")),
// @codingStandardsIgnoreEnd
'#options' => array(
"," => ",",
";" => ";",
"|" => "|",
"\t" => "TAB",
),
'#default_value' => $default_value,
);
}
else {
$form['import_options']["datastore_manager_config_{$property}"] = [
'#type' => 'textfield',
// @codingStandardsIgnoreStart
'#title' => ucfirst(t("{$property}")),
// @codingStandardsIgnoreEnd
'#default_value' => $default_value,
];
}
}
return $form;
}
/**
* Submit.
*/
public function submit($value) {
$configurable_properties = [];
foreach ($value as $property_name => $v) {
if (!empty($v)) {
$pname = str_replace("datastore_manager_config_", "", $property_name);
$configurable_properties[$pname] = $v;
}
}
$this->datastoreManager->setConfigurableProperties($configurable_properties);
$this->datastoreManager->saveState();
}
}
| 1 | 20,660 | wrap, single line | GetDKAN-dkan | php |
@@ -202,6 +202,12 @@ public class JavaContextCommon {
public abstract String getReturnType();
+ public String getGenericAwareReturnType() {
+ String returnType = getReturnType();
+ if (returnType == null || returnType.isEmpty()) return "Void";
+ else return returnType;
+ }
+
public abstract ImmutableList<Variable> getParams();
public abstract ImmutableList<Variable> getRequiredParams(); | 1 | /* Copyright 2016 Google Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.java;
import com.google.api.codegen.CollectionConfig;
import com.google.api.codegen.LanguageUtil;
import com.google.api.tools.framework.model.Field;
import com.google.api.tools.framework.model.Interface;
import com.google.api.tools.framework.model.TypeRef;
import com.google.auto.value.AutoValue;
import com.google.common.base.Splitter;
import com.google.common.collect.BiMap;
import com.google.common.collect.HashBiMap;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Maps;
import com.google.common.escape.Escaper;
import com.google.common.escape.Escapers;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import javax.annotation.Nullable;
/**
* A class that provides helper methods for snippet files generating Java code to get data and
* perform data transformations that are difficult or messy to do in the snippets themselves.
*/
public class JavaContextCommon {
/**
* A regexp to match types from java.lang. Assumes well-formed qualified type names.
*/
private static final String JAVA_LANG_TYPE_PREFIX = "java.lang.";
/**
* Escaper for formatting javadoc strings.
*/
private static final Escaper JAVADOC_ESCAPER =
Escapers.builder()
.addEscape('&', "&")
.addEscape('<', "<")
.addEscape('>', ">")
.addEscape('*', "*")
.build();
/**
* A map from unboxed Java primitive type name to boxed counterpart.
*/
private static final ImmutableMap<String, String> BOXED_TYPE_MAP =
ImmutableMap.<String, String>builder()
.put("boolean", "Boolean")
.put("int", "Integer")
.put("long", "Long")
.put("float", "Float")
.put("double", "Double")
.build();
/**
* A bi-map from full names to short names indicating the import map.
*/
private final BiMap<String, String> imports = HashBiMap.create();
/**
* A map from simple type name to a boolean, indicating whether its in java.lang or not. If a
* simple type name is not in the map, this information is unknown.
*/
private final Map<String, Boolean> implicitImports = Maps.newHashMap();
private final String defaultPackagePrefix;
public JavaContextCommon(String defaultPackagePrefix) {
this.defaultPackagePrefix = defaultPackagePrefix;
}
/**
* Returns the Java representation of a basic type in boxed form.
*/
public String boxedTypeName(String typeName) {
return LanguageUtil.getRename(typeName, BOXED_TYPE_MAP);
}
public String getMinimallyQualifiedName(String fullName, String shortName) {
// Derive a short name if possible
if (imports.containsKey(fullName)) {
// Short name already there.
return imports.get(fullName);
}
if (imports.containsValue(shortName)
|| !fullName.startsWith(JAVA_LANG_TYPE_PREFIX) && isImplicitImport(shortName)) {
// Short name clashes, use long name.
return fullName;
}
imports.put(fullName, shortName);
return shortName;
}
/**
* Checks whether the simple type name is implicitly imported from java.lang.
*/
private boolean isImplicitImport(String name) {
Boolean yes = implicitImports.get(name);
if (yes != null) {
return yes;
}
// Use reflection to determine whether the name exists in java.lang.
try {
Class.forName("java.lang." + name);
yes = true;
} catch (Exception e) {
yes = false;
}
implicitImports.put(name, yes);
return yes;
}
/**
* Splits given text into lines and returns an iterable of strings each one representing a line
* decorated for a javadoc documentation comment. Markdown will be translated to javadoc.
*/
public Iterable<String> getJavaDocLines(String text) {
return getJavaDocLinesWithPrefix(text, "");
}
/**
* Splits given text into lines and returns an iterable of strings each one representing a line
* decorated for a javadoc documentation comment, with the first line prefixed with
* firstLinePrefix. Markdown will be translated to javadoc.
*/
public Iterable<String> getJavaDocLinesWithPrefix(String text, String firstLinePrefix) {
// TODO(wgg): convert markdown to javadoc
List<String> result = new ArrayList<>();
String linePrefix = firstLinePrefix;
text = JAVADOC_ESCAPER.escape(text);
for (String line : Splitter.on(String.format("%n")).split(text)) {
result.add(" * " + linePrefix + line);
linePrefix = "";
}
return result;
}
@AutoValue
abstract static class Variable {
public abstract TypeRef getType();
public abstract String getUnformattedName();
public abstract String getDescription();
public abstract String getName();
@Nullable
public abstract CollectionConfig getFormattingConfig();
// This function is necessary for use in snippets
public boolean hasFormattingConfig() {
return getFormattingConfig() != null;
}
}
// This member function is necessary to provide access to snippets for
// the functionality, since snippets can't call static functions.
public Variable newVariable(TypeRef type, String name, String description) {
return s_newVariable(type, name, description);
}
// This function is necessary to provide a static entry point for the same-named
// member function.
public static Variable s_newVariable(TypeRef type, String name, String description) {
return s_newVariable(type, name, description, name, null);
}
public static Variable s_newVariable(
TypeRef type,
String unformattedName,
String description,
String name,
CollectionConfig formattingConfig) {
return new AutoValue_JavaContextCommon_Variable(
type, unformattedName, description, name, formattingConfig);
}
@AutoValue
abstract static class JavaDocConfig {
public abstract String getApiName();
public abstract String getMethodName();
public abstract String getReturnType();
public abstract ImmutableList<Variable> getParams();
public abstract ImmutableList<Variable> getRequiredParams();
public abstract boolean isPagedVariant();
public abstract boolean isCallableVariant();
@AutoValue.Builder
abstract static class Builder {
public abstract Builder setApiName(String serviceName);
public abstract Builder setMethodName(String methodName);
public abstract Builder setReturnType(String returnType);
public abstract Builder setParams(ImmutableList<Variable> params);
public Builder setParams(JavaGapicContext context, Iterable<Field> fields) {
return setParams(fieldsToParams(context, fields));
}
public Builder setParamsWithFormatting(
JavaGapicContext context,
Interface service,
Iterable<Field> fields,
ImmutableMap<String, String> fieldNamePatterns) {
return setParams(fieldsToParamsWithFormatting(context, service, fields, fieldNamePatterns));
}
public Builder setSingleParam(
JavaGapicContext context, TypeRef requestType, String name, String doc) {
return setParams(ImmutableList.of(s_newVariable(requestType, name, doc)));
}
public abstract Builder setRequiredParams(ImmutableList<Variable> params);
public Builder setRequiredParams(JavaGapicContext context, Iterable<Field> fields) {
return setRequiredParams(fieldsToParams(context, fields));
}
public Builder setRequiredParamsWithFormatting(
JavaGapicContext context,
Interface service,
Iterable<Field> fields,
ImmutableMap<String, String> fieldNamePatterns) {
return setRequiredParams(
fieldsToParamsWithFormatting(context, service, fields, fieldNamePatterns));
}
public Builder setRequiredParamsEmpty() {
return setRequiredParams(ImmutableList.<Variable>of());
}
public abstract Builder setPagedVariant(boolean paged);
public abstract Builder setCallableVariant(boolean callable);
public abstract JavaDocConfig build();
private static ImmutableList<Variable> fieldsToParams(
JavaGapicContext context, Iterable<Field> fields) {
ImmutableList.Builder<Variable> params = ImmutableList.<Variable>builder();
for (Field field : fields) {
params.add(
s_newVariable(
field.getType(),
LanguageUtil.lowerUnderscoreToLowerCamel(field.getSimpleName()),
context.getDescription(field)));
}
return params.build();
}
private static ImmutableList<Variable> fieldsToParamsWithFormatting(
JavaGapicContext context,
Interface service,
Iterable<Field> fields,
ImmutableMap<String, String> fieldNamePatterns) {
ImmutableList.Builder<Variable> params = ImmutableList.<Variable>builder();
for (Field field : fields) {
if (fieldNamePatterns.containsKey(field.getSimpleName())) {
params.add(
s_newVariable(
field.getType(),
LanguageUtil.lowerUnderscoreToLowerCamel(field.getSimpleName()),
context.getDescription(field),
"formatted" + LanguageUtil.lowerUnderscoreToUpperCamel(field.getSimpleName()),
context.getCollectionConfig(
service, fieldNamePatterns.get(field.getSimpleName()))));
} else {
params.add(
s_newVariable(
field.getType(),
LanguageUtil.lowerUnderscoreToLowerCamel(field.getSimpleName()),
context.getDescription(field)));
}
}
return params.build();
}
}
}
public JavaDocConfig.Builder newJavaDocConfigBuilder() {
return new AutoValue_JavaContextCommon_JavaDocConfig.Builder();
}
public boolean getTrue() {
return true;
}
public boolean getFalse() {
return false;
}
public String requestParamDoc() {
return "The request object containing all of the parameters for the API call.";
}
public String requestParam() {
return "request";
}
public List<String> getImports() {
// Clean up the imports.
List<String> cleanedImports = new ArrayList<>();
for (String imported : imports.keySet()) {
if (imported.startsWith(JAVA_LANG_TYPE_PREFIX)
|| defaultPackagePrefix != null && imported.startsWith(defaultPackagePrefix)) {
// Imported type is in java.lang or in package, can be ignored.
continue;
}
cleanedImports.add(imported);
}
Collections.sort(cleanedImports);
return cleanedImports;
}
}
| 1 | 14,902 | always use brackets for if statements | googleapis-gapic-generator | java |
@@ -34,14 +34,13 @@ namespace Datadog.Trace.ClrProfiler.Integrations.Testing
private const string NUnitTestExecutionContextType = "NUnit.Framework.Internal.TestExecutionContext";
private static readonly Vendors.Serilog.ILogger Log = DatadogLogging.GetLogger(typeof(NUnitIntegration));
- private static readonly FrameworkDescription _runtimeDescription;
+ private static readonly FrameworkDescription RuntimeDescription;
static NUnitIntegration()
{
// Preload environment variables.
CIEnvironmentValues.DecorateSpan(null);
-
- _runtimeDescription = FrameworkDescription.Create();
+ RuntimeDescription = FrameworkDescription.Instance;
}
/// <summary> | 1 | using System;
using System.Collections;
using System.Collections.Generic;
using System.Linq;
using System.Reflection;
using System.Threading;
using Datadog.Trace.Ci;
using Datadog.Trace.ClrProfiler.Emit;
using Datadog.Trace.ExtensionMethods;
using Datadog.Trace.Logging;
namespace Datadog.Trace.ClrProfiler.Integrations.Testing
{
/// <summary>
/// Tracing integration for NUnit teting framework
/// </summary>
public static class NUnitIntegration
{
private const string IntegrationName = "NUnit";
private const string Major3 = "3";
private const string Major3Minor0 = "3.0";
private const string NUnitAssembly = "nunit.framework";
private const string NUnitTestCommandType = "NUnit.Framework.Internal.Commands.TestCommand";
private const string NUnitTestMethodCommandType = "NUnit.Framework.Internal.Commands.TestMethodCommand";
private const string NUnitSkipCommandType = "NUnit.Framework.Internal.Commands.SkipCommand";
private const string NUnitExecuteMethod = "Execute";
private const string NUnitWorkShiftType = "NUnit.Framework.Internal.Execution.WorkShift";
private const string NUnitShutdownMethod = "ShutDown";
private const string NUnitTestResultType = "NUnit.Framework.Internal.TestResult";
private const string NUnitTestExecutionContextType = "NUnit.Framework.Internal.TestExecutionContext";
private static readonly Vendors.Serilog.ILogger Log = DatadogLogging.GetLogger(typeof(NUnitIntegration));
private static readonly FrameworkDescription _runtimeDescription;
static NUnitIntegration()
{
// Preload environment variables.
CIEnvironmentValues.DecorateSpan(null);
_runtimeDescription = FrameworkDescription.Create();
}
/// <summary>
/// Wrap the original NUnit.Framework.Internal.Commands.TestMethodCommand.Execute method by adding instrumentation code around it
/// </summary>
/// <param name="testMethodCommand">The test method command instance</param>
/// <param name="testExecutionContext">Test execution context</param>
/// <param name="opCode">The OpCode used in the original method call.</param>
/// <param name="mdToken">The mdToken of the original method call.</param>
/// <param name="moduleVersionPtr">A pointer to the module version GUID.</param>
/// <returns>The original method's return value.</returns>
[InterceptMethod(
TargetAssembly = NUnitAssembly,
TargetType = NUnitTestCommandType,
TargetMethod = NUnitExecuteMethod,
TargetMinimumVersion = Major3Minor0,
TargetMaximumVersion = Major3,
TargetSignatureTypes = new[] { NUnitTestResultType, NUnitTestExecutionContextType })]
public static object TestCommand_Execute(
object testMethodCommand,
object testExecutionContext,
int opCode,
int mdToken,
long moduleVersionPtr)
{
if (testMethodCommand == null) { throw new ArgumentNullException(nameof(testMethodCommand)); }
Type testMethodCommandType = testMethodCommand.GetType();
Func<object, object, object> execute;
try
{
execute = MethodBuilder<Func<object, object, object>>
.Start(moduleVersionPtr, mdToken, opCode, NUnitExecuteMethod)
.WithConcreteType(testMethodCommandType)
.WithParameters(testExecutionContext)
.WithNamespaceAndNameFilters(NUnitTestResultType, NUnitTestExecutionContextType)
.Build();
}
catch (Exception ex)
{
Log.ErrorRetrievingMethod(
exception: ex,
moduleVersionPointer: moduleVersionPtr,
mdToken: mdToken,
opCode: opCode,
instrumentedType: NUnitTestCommandType,
methodName: NUnitExecuteMethod,
instanceType: testMethodCommandType.AssemblyQualifiedName);
throw;
}
if (testMethodCommandType.FullName != NUnitTestMethodCommandType &&
testMethodCommandType.FullName != NUnitSkipCommandType)
{
return execute(testMethodCommand, testExecutionContext);
}
Scope scope = CreateScope(testExecutionContext, testMethodCommandType);
if (scope is null)
{
return execute(testMethodCommand, testExecutionContext);
}
using (scope)
{
object result = null;
Exception exception = null;
try
{
scope.Span.ResetStartTime();
result = execute(testMethodCommand, testExecutionContext);
}
catch (Exception ex)
{
exception = ex;
throw;
}
finally
{
FinishScope(scope, testExecutionContext, exception);
}
return result;
}
}
private static Scope CreateScope(object testExecutionContext, Type testMethodCommandType)
{
Scope scope = null;
try
{
if (testExecutionContext.TryGetPropertyValue<object>("CurrentTest", out object currentTest))
{
MethodInfo testMethod = null;
object[] testMethodArguments = null;
object properties = null;
if (currentTest != null)
{
if (currentTest.TryGetPropertyValue<object>("Method", out object method))
{
method?.TryGetPropertyValue<MethodInfo>("MethodInfo", out testMethod);
}
currentTest.TryGetPropertyValue<object[]>("Arguments", out testMethodArguments);
currentTest.TryGetPropertyValue<object>("Properties", out properties);
}
if (testMethod != null)
{
string testFramework = "NUnit " + testMethodCommandType.Assembly.GetName().Version;
string testSuite = testMethod.DeclaringType?.FullName;
string testName = testMethod.Name;
string skipReason = null;
List<KeyValuePair<string, string>> testArguments = null;
List<KeyValuePair<string, string>> testTraits = null;
// Get test parameters
ParameterInfo[] methodParameters = testMethod.GetParameters();
if (methodParameters?.Length > 0)
{
testArguments = new List<KeyValuePair<string, string>>();
for (int i = 0; i < methodParameters.Length; i++)
{
if (testMethodArguments != null && i < testMethodArguments.Length)
{
testArguments.Add(new KeyValuePair<string, string>($"{TestTags.Arguments}.{methodParameters[i].Name}", testMethodArguments[i]?.ToString() ?? "(null)"));
}
else
{
testArguments.Add(new KeyValuePair<string, string>($"{TestTags.Arguments}.{methodParameters[i].Name}", "(default)"));
}
}
}
// Get traits
if (properties != null)
{
properties.TryCallMethod<string, string>("Get", "_SKIPREASON", out skipReason);
if (properties.TryGetFieldValue<Dictionary<string, IList>>("inner", out Dictionary<string, IList> traits) && traits.Count > 0)
{
testTraits = new List<KeyValuePair<string, string>>();
foreach (KeyValuePair<string, IList> traitValue in traits)
{
if (traitValue.Key == "_SKIPREASON")
{
continue;
}
IEnumerable<string> values = Enumerable.Empty<string>();
if (traitValue.Value != null)
{
List<string> lstValues = new List<string>();
foreach (object valObj in traitValue.Value)
{
if (valObj is null)
{
continue;
}
lstValues.Add(valObj.ToString());
}
values = lstValues;
}
testTraits.Add(new KeyValuePair<string, string>($"{TestTags.Traits}.{traitValue.Key}", string.Join(", ", values) ?? "(null)"));
}
}
}
Tracer tracer = Tracer.Instance;
scope = tracer.StartActive("nunit.test");
Span span = scope.Span;
span.Type = SpanTypes.Test;
span.SetMetric(Tags.Analytics, 1.0d);
span.SetTraceSamplingPriority(SamplingPriority.AutoKeep);
span.ResourceName = $"{testSuite}.{testName}";
span.SetTag(TestTags.Suite, testSuite);
span.SetTag(TestTags.Name, testName);
span.SetTag(TestTags.Framework, testFramework);
span.SetTag(TestTags.Type, TestTags.TypeTest);
CIEnvironmentValues.DecorateSpan(span);
span.SetTag(CommonTags.RuntimeName, _runtimeDescription.Name);
span.SetTag(CommonTags.RuntimeOSArchitecture, _runtimeDescription.OSArchitecture);
span.SetTag(CommonTags.RuntimeOSPlatform, _runtimeDescription.OSPlatform);
span.SetTag(CommonTags.RuntimeProcessArchitecture, _runtimeDescription.ProcessArchitecture);
span.SetTag(CommonTags.RuntimeVersion, _runtimeDescription.ProductVersion);
if (testArguments != null)
{
foreach (KeyValuePair<string, string> argument in testArguments)
{
span.SetTag(argument.Key, argument.Value);
}
}
if (testTraits != null)
{
foreach (KeyValuePair<string, string> trait in testTraits)
{
span.SetTag(trait.Key, trait.Value);
}
}
if (skipReason != null)
{
span.SetTag(TestTags.Status, TestTags.StatusSkip);
span.SetTag(TestTags.SkipReason, skipReason);
span.Finish(TimeSpan.Zero);
scope.Dispose();
scope = null;
}
}
}
}
catch (Exception ex)
{
Log.Error(ex, "Error creating or populating scope.");
}
return scope;
}
private static void FinishScope(Scope scope, object testExecutionContext, Exception ex)
{
// unwrap the generic NUnitException
if (ex != null && ex.GetType().FullName == "NUnit.Framework.Internal.NUnitException")
{
ex = ex.InnerException;
}
if (ex != null && ex.GetType().FullName != "NUnit.Framework.SuccessException")
{
scope.Span.SetException(ex);
scope.Span.SetTag(TestTags.Status, TestTags.StatusFail);
}
else
{
scope.Span.SetTag(TestTags.Status, TestTags.StatusPass);
}
}
/// <summary>
/// Wrap the original NUnit.Framework.Internal.Execution.WorkShift.ShutDown method by adding instrumentation code around it
/// </summary>
/// <param name="workShift">The workshift instance</param>
/// <param name="opCode">The OpCode used in the original method call.</param>
/// <param name="mdToken">The mdToken of the original method call.</param>
/// <param name="moduleVersionPtr">A pointer to the module version GUID.</param>
[InterceptMethod(
TargetAssembly = NUnitAssembly,
TargetType = NUnitWorkShiftType,
TargetMethod = NUnitShutdownMethod,
TargetMinimumVersion = Major3Minor0,
TargetMaximumVersion = Major3,
TargetSignatureTypes = new[] { ClrNames.Void })]
public static void WorkShift_ShutDown(
object workShift,
int opCode,
int mdToken,
long moduleVersionPtr)
{
if (workShift == null) { throw new ArgumentNullException(nameof(workShift)); }
Type workShiftType = workShift.GetType();
Action<object> execute;
try
{
execute = MethodBuilder<Action<object>>
.Start(moduleVersionPtr, mdToken, opCode, NUnitShutdownMethod)
.WithConcreteType(workShiftType)
.WithNamespaceAndNameFilters(ClrNames.Void)
.Build();
}
catch (Exception ex)
{
Log.ErrorRetrievingMethod(
exception: ex,
moduleVersionPointer: moduleVersionPtr,
mdToken: mdToken,
opCode: opCode,
instrumentedType: NUnitWorkShiftType,
methodName: NUnitShutdownMethod,
instanceType: workShiftType.AssemblyQualifiedName);
throw;
}
execute(workShift);
SynchronizationContext context = SynchronizationContext.Current;
try
{
SynchronizationContext.SetSynchronizationContext(null);
// We have to ensure the flush of the buffer after we finish the tests of an assembly.
// For some reason, sometimes when all test are finished none of the callbacks to handling the tracer disposal is triggered.
// So the last spans in buffer aren't send to the agent.
// Other times we reach the 500 items of the buffer in a sec and the tracer start to drop spans.
// In a test scenario we must keep all spans.
Tracer.Instance.FlushAsync().GetAwaiter().GetResult();
}
finally
{
SynchronizationContext.SetSynchronizationContext(context);
}
}
}
}
| 1 | 18,665 | `NUnitIntegration` doesn't need to cache this anymore. | DataDog-dd-trace-dotnet | .cs |
@@ -210,8 +210,9 @@ EXTS_ACCESS_COUNTS = textwrap.dedent("""\
eventname,
bucket,
lower(CASE
- WHEN cardinality(parts) > 2 THEN concat(element_at(parts, -2), '.', element_at(parts, -1))
- WHEN cardinality(parts) = 2 THEN element_at(parts, -1)
+ WHEN cardinality(parts) > 2 AND lower(element_at(parts, -1)) = 'gz'
+ THEN concat(element_at(parts, -2), '.', element_at(parts, -1))
+ WHEN cardinality(parts) >= 2 THEN element_at(parts, -1)
ELSE ''
END
) AS ext, | 1 | """
Lambda function that runs Athena queries over CloudTrail logs and .quilt/named_packages/
and creates summaries of object and package access events.
"""
from datetime import datetime, timedelta, timezone
import os
import textwrap
import time
import boto3
ATHENA_DATABASE = os.environ['ATHENA_DATABASE']
# Bucket where CloudTrail logs are located.
CLOUDTRAIL_BUCKET = os.environ['CLOUDTRAIL_BUCKET']
# Bucket where query results will be stored.
QUERY_RESULT_BUCKET = os.environ['QUERY_RESULT_BUCKET']
# Directory where the summary files will be stored.
ACCESS_COUNTS_OUTPUT_DIR = os.environ['ACCESS_COUNTS_OUTPUT_DIR']
# A temporary directory where Athena query results will be written.
QUERY_TEMP_DIR = 'AthenaQueryResults'
# Pre-processed CloudTrail logs, persistent across different runs of the lambda.
OBJECT_ACCESS_LOG_DIR = 'ObjectAccessLog'
# Timestamp for the dir above.
LAST_UPDATE_KEY = f'{OBJECT_ACCESS_LOG_DIR}.last_updated_ts.txt'
# Athena does not allow us to write more than 100 partitions at once.
MAX_OPEN_PARTITIONS = 100
def sql_escape(s):
return s.replace("'", "''")
DROP_CLOUDTRAIL = """DROP TABLE IF EXISTS cloudtrail"""
DROP_OBJECT_ACCESS_LOG = """DROP TABLE IF EXISTS object_access_log"""
DROP_PACKAGE_HASHES = """DROP TABLE IF EXISTS package_hashes"""
CREATE_CLOUDTRAIL = textwrap.dedent(f"""\
CREATE EXTERNAL TABLE cloudtrail (
eventVersion STRING,
userIdentity STRUCT<
type: STRING,
principalId: STRING,
arn: STRING,
accountId: STRING,
invokedBy: STRING,
accessKeyId: STRING,
userName: STRING,
sessionContext: STRUCT<
attributes: STRUCT<
mfaAuthenticated: STRING,
creationDate: STRING>,
sessionIssuer: STRUCT<
type: STRING,
principalId: STRING,
arn: STRING,
accountId: STRING,
userName: STRING>>>,
eventTime STRING,
eventSource STRING,
eventName STRING,
awsRegion STRING,
sourceIpAddress STRING,
userAgent STRING,
errorCode STRING,
errorMessage STRING,
requestParameters STRING,
responseElements STRING,
additionalEventData STRING,
requestId STRING,
eventId STRING,
resources ARRAY<STRUCT<
arn: STRING,
accountId: STRING,
type: STRING>>,
eventType STRING,
apiVersion STRING,
readOnly STRING,
recipientAccountId STRING,
serviceEventDetails STRING,
sharedEventID STRING,
vpcEndpointId STRING
)
PARTITIONED BY (account STRING, region STRING, year STRING, month STRING, day STRING)
ROW FORMAT SERDE 'com.amazon.emr.hive.serde.CloudTrailSerde'
STORED AS INPUTFORMAT 'com.amazon.emr.cloudtrail.CloudTrailInputFormat'
OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'
LOCATION 's3://{sql_escape(CLOUDTRAIL_BUCKET)}/AWSLogs/'
TBLPROPERTIES ('classification'='cloudtrail')
""")
ADD_CLOUDTRAIL_PARTITION = textwrap.dedent(f"""\
ALTER TABLE cloudtrail
ADD PARTITION (account = '{{account}}', region = '{{region}}', year = '{{year:04d}}', month = '{{month:02d}}', day = '{{day:02d}}')
LOCATION 's3://{sql_escape(CLOUDTRAIL_BUCKET)}/AWSLogs/{{account}}/CloudTrail/{{region}}/{{year:04d}}/{{month:02d}}/{{day:02d}}/'
""") # noqa: E501
CREATE_OBJECT_ACCESS_LOG = textwrap.dedent(f"""\
CREATE EXTERNAL TABLE object_access_log (
eventname STRING,
bucket STRING,
key STRING
)
PARTITIONED BY (date STRING)
ROW FORMAT SERDE 'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe'
STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat'
OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat'
LOCATION 's3://{sql_escape(QUERY_RESULT_BUCKET)}/{sql_escape(OBJECT_ACCESS_LOG_DIR)}/'
TBLPROPERTIES ('parquet.compression'='SNAPPY')
""")
REPAIR_OBJECT_ACCESS_LOG = textwrap.dedent("""
MSCK REPAIR TABLE object_access_log
""")
INSERT_INTO_OBJECT_ACCESS_LOG = textwrap.dedent("""\
INSERT INTO object_access_log
SELECT eventname, bucket, key, date_format(eventtime, '%Y-%m-%d') AS date
FROM (
SELECT
eventname,
from_iso8601_timestamp(eventtime) AS eventtime,
json_extract_scalar(requestparameters, '$.bucketName') AS bucket,
json_extract_scalar(requestparameters, '$.key') AS key
FROM cloudtrail
WHERE useragent != 'athena.amazonaws.com' AND useragent NOT LIKE '%quilt3-lambdas-es-indexer%'
)
-- Filter out non-S3 events, or S3 events like ListBucket that have no object
-- Select the correct time range
WHERE bucket IS NOT NULL AND key IS NOT NULL AND
eventtime >= from_unixtime({{start_ts:f}}) AND eventtime < from_unixtime({{end_ts:f}})
""")
CREATE_PACKAGE_HASHES = textwrap.dedent(f"""\
CREATE TABLE package_hashes
WITH (
format = 'Parquet',
parquet_compression = 'SNAPPY',
external_location = 's3://{sql_escape(QUERY_RESULT_BUCKET)}/{sql_escape(QUERY_TEMP_DIR)}/package_hashes/'
)
AS
SELECT DISTINCT
-- Parse a file path like `s3://BUCKET/.quilt/named_packages/USER_NAME/PACKAGE_NAME/VERSION`.
-- Only take package names and hashes, without versions, to avoid duplicates.
split_part("$path", '/', 3) AS bucket,
concat(split_part("$path", '/', 6), '/', split_part("$path", '/', 7)) AS name,
hash
FROM named_packages
""")
# All GROUP BY statements are supposed to be:
# - in the order from most unique values to least unique
# - integers rather than strings
OBJECT_ACCESS_COUNTS = textwrap.dedent("""\
SELECT
eventname,
bucket,
key,
CAST(histogram(date) AS JSON) AS counts
FROM object_access_log
GROUP BY 3, 2, 1
""")
PACKAGE_ACCESS_COUNTS = textwrap.dedent("""\
SELECT
eventname,
package_hashes.bucket AS bucket,
name,
CAST(histogram(date) AS JSON) AS counts
FROM object_access_log JOIN package_hashes
ON object_access_log.bucket = package_hashes.bucket AND key = concat('.quilt/packages/', hash)
GROUP BY 3, 2, 1
""")
PACKAGE_VERSION_ACCESS_COUNTS = textwrap.dedent("""\
SELECT
eventname,
package_hashes.bucket AS bucket,
name,
hash,
CAST(histogram(date) AS JSON) AS counts
FROM object_access_log JOIN package_hashes
ON object_access_log.bucket = package_hashes.bucket AND key = concat('.quilt/packages/', hash)
GROUP BY 4, 3, 2, 1
""")
BUCKET_ACCESS_COUNTS = textwrap.dedent("""\
SELECT
eventname,
bucket,
CAST(histogram(date) AS JSON) AS counts
FROM object_access_log
GROUP BY 2, 1
""")
EXTS_ACCESS_COUNTS = textwrap.dedent("""\
SELECT
eventname,
bucket,
ext,
CAST(histogram(date) AS JSON) AS counts
FROM (
SELECT
eventname,
bucket,
lower(CASE
WHEN cardinality(parts) > 2 THEN concat(element_at(parts, -2), '.', element_at(parts, -1))
WHEN cardinality(parts) = 2 THEN element_at(parts, -1)
ELSE ''
END
) AS ext,
date
FROM (
SELECT
eventname,
bucket,
split(substr(element_at(split(key, '/'), -1), 2), '.') AS parts,
date
FROM object_access_log
)
)
GROUP BY 3, 2, 1
""")
athena = boto3.client('athena')
s3 = boto3.client('s3')
def start_query(query_string):
output = 's3://%s/%s/' % (QUERY_RESULT_BUCKET, QUERY_TEMP_DIR)
response = athena.start_query_execution(
QueryString=query_string,
QueryExecutionContext=dict(Database=ATHENA_DATABASE),
ResultConfiguration=dict(OutputLocation=output)
)
print("Started query:", response)
execution_id = response['QueryExecutionId']
return execution_id
def query_finished(execution_id):
response = athena.get_query_execution(QueryExecutionId=execution_id)
print("Query status:", response)
state = response['QueryExecution']['Status']['State']
if state == 'RUNNING' or state == 'QUEUED':
return False
elif state == 'SUCCEEDED':
return True
elif state == 'FAILED':
raise Exception("Query failed! QueryExecutionId=%r" % execution_id)
elif state == 'CANCELLED':
raise Exception("Query cancelled! QueryExecutionId=%r" % execution_id)
else:
assert False, "Unexpected state: %s" % state
# Athena limitation for DDL queries.
MAX_CONCURRENT_QUERIES = 20
def run_multiple_queries(query_list):
results = [None] * len(query_list)
remaining_queries = list(enumerate(query_list))
remaining_queries.reverse() # Just to make unit tests more sane: we use pop() later, so keep the order the same.
pending_execution_ids = set()
while remaining_queries or pending_execution_ids:
# Remove completed queries. Make a copy of the set before iterating over it.
for execution_id in list(pending_execution_ids):
if query_finished(execution_id):
pending_execution_ids.remove(execution_id)
# Start new queries.
while remaining_queries and len(pending_execution_ids) < MAX_CONCURRENT_QUERIES:
idx, query = remaining_queries.pop()
execution_id = start_query(query)
results[idx] = execution_id
pending_execution_ids.add(execution_id)
time.sleep(5)
assert all(results)
return results
def delete_dir(bucket, prefix):
params = dict(
Bucket=bucket,
Prefix=prefix,
MaxKeys=1000, # The max we're allowed to delete at once.
)
paginator = s3.get_paginator('list_objects_v2')
for list_response in paginator.paginate(**params):
contents = list_response.get('Contents')
if not contents:
break
delete_response = s3.delete_objects(
Bucket=QUERY_RESULT_BUCKET,
Delete=dict(
Objects=[dict(
Key=obj['Key']
) for obj in contents]
)
)
errors = delete_response.get('Errors')
if errors:
print(errors)
raise Exception(f"Failed to delete dir: bucket={bucket!r}, prefix={prefix!r}")
def now():
"""Only exists for unit testing, cause patching datetime.utcnow() is pretty much impossible."""
return datetime.now(timezone.utc)
def handler(event, context):
# End of the CloudTrail time range we're going to look at. Subtract 15min
# because events can be delayed by that much.
end_ts = now() - timedelta(minutes=15)
# Start of the CloudTrail time range: the end timestamp from the previous run, or a year ago if it's the first run.
try:
timestamp_str = s3.get_object(Bucket=QUERY_RESULT_BUCKET, Key=LAST_UPDATE_KEY)['Body'].read()
start_ts = datetime.fromtimestamp(float(timestamp_str), timezone.utc)
except s3.exceptions.NoSuchKey as ex:
start_ts = end_ts - timedelta(days=365)
# We start from scratch, so make sure we don't have any old data.
delete_dir(QUERY_RESULT_BUCKET, OBJECT_ACCESS_LOG_DIR)
# We can't write more than 100 days worth of data at a time due to Athena's partitioning limitations.
# Moreover, we don't want the lambda to time out, so just process 100 days
# and let the next invocation handle the rest.
end_ts = min(end_ts, start_ts + timedelta(days=MAX_OPEN_PARTITIONS-1))
# Delete the temporary directory where Athena query results are written to.
delete_dir(QUERY_RESULT_BUCKET, QUERY_TEMP_DIR)
# Create a CloudTrail table, but only with partitions for the last N days, to avoid scanning all of the data.
# A bucket can have data for multiple accounts and multiple regions, so those need to be handled first.
partition_queries = []
for account_response in s3.list_objects_v2(
Bucket=CLOUDTRAIL_BUCKET, Prefix='AWSLogs/', Delimiter='/').get('CommonPrefixes') or []:
account = account_response['Prefix'].split('/')[1]
for region_response in s3.list_objects_v2(
Bucket=CLOUDTRAIL_BUCKET,
Prefix=f'AWSLogs/{account}/CloudTrail/', Delimiter='/').get('CommonPrefixes') or []:
region = region_response['Prefix'].split('/')[3]
date = start_ts.date()
while date <= end_ts.date():
query = ADD_CLOUDTRAIL_PARTITION.format(
account=sql_escape(account),
region=sql_escape(region),
year=date.year,
month=date.month,
day=date.day
)
partition_queries.append(query)
date += timedelta(days=1)
# Drop old Athena tables from previous runs.
# (They're in the DB owned by the stack, so safe to do.)
run_multiple_queries([DROP_CLOUDTRAIL, DROP_OBJECT_ACCESS_LOG, DROP_PACKAGE_HASHES])
# Create new Athena tables.
run_multiple_queries([CREATE_CLOUDTRAIL, CREATE_OBJECT_ACCESS_LOG, CREATE_PACKAGE_HASHES])
# Load object access log partitions, after the object access log table is created.
# Create CloudTrail partitions, after the CloudTrail table is created.
run_multiple_queries([REPAIR_OBJECT_ACCESS_LOG] + partition_queries)
# Delete the old timestamp: if the INSERT query or put_object fail, make sure we regenerate everything next time,
# instead of ending up with duplicate logs.
s3.delete_object(Bucket=QUERY_RESULT_BUCKET, Key=LAST_UPDATE_KEY)
# Scan CloudTrail and insert new data into "object_access_log".
insert_query = INSERT_INTO_OBJECT_ACCESS_LOG.format(start_ts=start_ts.timestamp(), end_ts=end_ts.timestamp())
run_multiple_queries([insert_query])
# Save the end timestamp.
s3.put_object(
Bucket=QUERY_RESULT_BUCKET, Key=LAST_UPDATE_KEY, Body=str(end_ts.timestamp()), ContentType='text/plain')
queries = [
('Objects', OBJECT_ACCESS_COUNTS),
('Packages', PACKAGE_ACCESS_COUNTS),
('PackageVersions', PACKAGE_VERSION_ACCESS_COUNTS),
('Bucket', BUCKET_ACCESS_COUNTS),
('Exts', EXTS_ACCESS_COUNTS)
]
execution_ids = run_multiple_queries([query for _, query in queries])
for (filename, _), execution_id in zip(queries, execution_ids):
src_key = f'{QUERY_TEMP_DIR}/{execution_id}.csv'
dest_key = f'{ACCESS_COUNTS_OUTPUT_DIR}/{filename}.csv'
s3.copy(
CopySource=dict(
Bucket=QUERY_RESULT_BUCKET,
Key=src_key
),
Bucket=QUERY_RESULT_BUCKET,
Key=dest_key
)
| 1 | 18,547 | Why did you change it to `>=` here? | quiltdata-quilt | py |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.