name
stringlengths
12
178
code_snippet
stringlengths
8
36.5k
score
float64
3.26
3.68
querydsl_GuavaGroupBy_groupBy
/** * Create a new GroupByBuilder for the given key expression * * @param key key for aggregation * @return builder for further specification */ public static <K> GuavaGroupByBuilder<K> groupBy(Expression<K> key) { return new GuavaGroupByBuilder<K>(key); }
3.68
morf_AnalyseTable_isApplied
/** * {@inheritDoc} * * @see org.alfasoftware.morf.upgrade.SchemaChange#isApplied(Schema, ConnectionResources) */ @Override public boolean isApplied(Schema schema, ConnectionResources database) { return true; }
3.68
hmily_FileUtils_writeFile
/** * Write file. * * @param fullFileName the full file name * @param contents the contents */ public static void writeFile(final String fullFileName, final byte[] contents) { try { RandomAccessFile raf = new RandomAccessFile(fullFileName, "rw"); try (FileChannel channel = raf.getChannel()) { ByteBuffer buffer = ByteBuffer.allocate(contents.length); buffer.put(contents); buffer.flip(); while (buffer.hasRemaining()) { channel.write(buffer); } channel.force(true); } } catch (IOException e) { e.printStackTrace(); } }
3.68
hadoop_AccessTokenTimer_shouldRefresh
/** * Return true if the current token has expired or will expire within the * EXPIRE_BUFFER_MS (to give ample wiggle room for the call to be made to * the server). */ public boolean shouldRefresh() { long lowerLimit = nextRefreshMSSinceEpoch - EXPIRE_BUFFER_MS; long currTime = timer.now(); return currTime > lowerLimit; }
3.68
flink_DataStream_print
/** * Writes a DataStream to the standard output stream (stdout). * * <p>For each element of the DataStream the result of {@link Object#toString()} is written. * * <p>NOTE: This will print to stdout on the machine where the code is executed, i.e. the Flink * worker. * * @param sinkIdentifier The string to prefix the output with. * @return The closed DataStream. */ @PublicEvolving public DataStreamSink<T> print(String sinkIdentifier) { PrintSinkFunction<T> printFunction = new PrintSinkFunction<>(sinkIdentifier, false); return addSink(printFunction).name("Print to Std. Out"); }
3.68
hbase_MasterProcedureScheduler_waitRegion
// ============================================================================ // Region Locking Helpers // ============================================================================ /** * Suspend the procedure if the specified region is already locked. * @param procedure the procedure trying to acquire the lock on the region * @param regionInfo the region we are trying to lock * @return true if the procedure has to wait for the regions to be available */ public boolean waitRegion(final Procedure<?> procedure, final RegionInfo regionInfo) { return waitRegions(procedure, regionInfo.getTable(), regionInfo); }
3.68
hadoop_LightWeightLinkedSet_pollAll
/** * Remove all elements from the set and return them in order. Traverse the * link list, don't worry about hashtable - faster version of the parent * method. */ @Override public List<T> pollAll() { List<T> retList = new ArrayList<T>(size); while (head != null) { retList.add(head.element); head = head.after; } this.clear(); return retList; }
3.68
framework_AbstractComponent_setComponentError
/** * Sets the component's error message. The message may contain certain XML * tags, for more information see * * @link Component.ErrorMessage#ErrorMessage(String, int) * * @param componentError * the new <code>ErrorMessage</code> of the component. */ public void setComponentError(ErrorMessage componentError) { this.componentError = componentError; fireComponentErrorEvent(); markAsDirty(); }
3.68
hbase_OrderedBytes_blobVarEncodedLength
/** * Calculate the expected BlobVar encoded length based on unencoded length. */ public static int blobVarEncodedLength(int len) { if (0 == len) return 2; // 1-byte header + 1-byte terminator else return (int) Math.ceil((len * 8) // 8-bits per input byte / 7.0) // 7-bits of input data per encoded byte, rounded up + 1; // + 1-byte header }
3.68
framework_GridSingleSelect_getSelectedItems
/** * Returns a singleton set of the currently selected item or an empty set if * no item is selected. * * @return a singleton set of the selected item if any, an empty set * otherwise * * @see #getSelectedItem() */ public Set<T> getSelectedItems() { return model.getSelectedItems(); }
3.68
hadoop_FederationProtocolPBTranslator_readInstance
/** * Read instance from base64 data. * * @param base64String String containing Base64 data. * @throws IOException If the protobuf message build fails. */ @SuppressWarnings("unchecked") public void readInstance(String base64String) throws IOException { byte[] bytes = Base64.decodeBase64(base64String); Message msg = getBuilder().mergeFrom(bytes).build(); this.proto = (P) msg; }
3.68
pulsar_KeyValueSchemaImpl_encode
// encode as bytes: [key.length][key.bytes][value.length][value.bytes] or [value.bytes] public byte[] encode(KeyValue<K, V> message) { if (keyValueEncodingType != null && keyValueEncodingType == KeyValueEncodingType.INLINE) { return KeyValue.encode( message.getKey(), keySchema, message.getValue(), valueSchema ); } else { if (message.getValue() == null) { return null; } return valueSchema.encode(message.getValue()); } }
3.68
flink_RocksDBResourceContainer_getWriteBufferManagerCapacity
/** * Gets write buffer manager capacity. * * @return the capacity of the write buffer manager, or null if write buffer manager is not * enabled. */ public Long getWriteBufferManagerCapacity() { if (sharedResources == null) { return null; } return sharedResources.getResourceHandle().getWriteBufferManagerCapacity(); }
3.68
morf_DatabaseType_parseJdbcUrl
/** * Extracts the database connection details from a JDBC URL. * * <p>Finds the first available {@link DatabaseType} with a matching protocol, * then uses that to parse out the connection details.</p> * * <p>If there are multiple matches for the protocol, {@link IllegalStateException} * will be thrown.</p> * * <p>No performance guarantees are made, but it will be <em>at best</em> * <code>O(n),</code>where <code>n</code> is the number of registered * database types.</p> * * @param url The JDBC URL. * @return The connection details. * @throws IllegalArgumentException If no database type matching the URL * protocol is found or the matching database type fails to parse * the URL. */ public static JdbcUrlElements parseJdbcUrl(String url) { JdbcUrlElements result = null; for (DatabaseType databaseType : registeredTypes.values()) { Optional<JdbcUrlElements> connectionDetails = databaseType.extractJdbcUrl(url); if (connectionDetails.isPresent()) { if (result != null) throw new IllegalArgumentException("[" + url + "] matches more than one registered database type"); result = connectionDetails.get(); } } if (result == null) throw new IllegalArgumentException("[" + url + "] is not a valid JDBC URL"); return result; }
3.68
hudi_BaseHoodieWriteClient_scheduleLogCompaction
/** * Schedules a new log compaction instant. * @param extraMetadata Extra Metadata to be stored */ public Option<String> scheduleLogCompaction(Option<Map<String, String>> extraMetadata) throws HoodieIOException { String instantTime = createNewInstantTime(); return scheduleLogCompactionAtInstant(instantTime, extraMetadata) ? Option.of(instantTime) : Option.empty(); }
3.68
hadoop_AppCatalog_addRestResourceClasses
/** * Add your own resources here. */ private void addRestResourceClasses(final Set<Class<?>> resources) { resources.add(AppDetailsController.class); }
3.68
hbase_ZKSplitLog_getEncodedNodeName
/** * Gets the full path node name for the log file being split. This method will url encode the * filename. * @param zkw zk reference * @param filename log file name (only the basename) */ public static String getEncodedNodeName(ZKWatcher zkw, String filename) { return ZNodePaths.joinZNode(zkw.getZNodePaths().splitLogZNode, encode(filename)); }
3.68
querydsl_AntMetaDataExporter_addNumericMapping
/** * Adds NumericMapping instance, called by Ant */ public void addNumericMapping(NumericMapping mapping) { numericMappings.add(mapping); }
3.68
hbase_CompositeImmutableSegment_last
// *** Methods for SegmentsScanner @Override public Cell last() { throw new IllegalStateException("Not supported by CompositeImmutableScanner"); }
3.68
druid_DruidPooledConnection_getGloablVariables
/** * @since 1.0.28 */ public Map<String, Object> getGloablVariables() { return this.holder.globalVariables; }
3.68
hbase_HBaseConfiguration_subset
/** * Returns a subset of the configuration properties, matching the given key prefix. The prefix is * stripped from the return keys, ie. when calling with a prefix of "myprefix", the entry * "myprefix.key1 = value1" would be returned as "key1 = value1". If an entry's key matches the * prefix exactly ("myprefix = value2"), it will <strong>not</strong> be included in the results, * since it would show up as an entry with an empty key. */ public static Configuration subset(Configuration srcConf, String prefix) { Configuration newConf = new Configuration(false); for (Map.Entry<String, String> entry : srcConf) { if (entry.getKey().startsWith(prefix)) { String newKey = entry.getKey().substring(prefix.length()); // avoid entries that would produce an empty key if (!newKey.isEmpty()) { newConf.set(newKey, entry.getValue()); } } } return newConf; }
3.68
morf_CompositeSchema_viewNames
/** * @see org.alfasoftware.morf.metadata.Schema#viewNames() */ @Override public Collection<String> viewNames() { Set<String> result = Sets.newHashSet(); Set<String> seenViews = Sets.newHashSet(); for (Schema schema : delegates) { for (View view : schema.views()) { if (seenViews.add(view.getName().toUpperCase())) { result.add(view.getName()); } } } return result; }
3.68
hadoop_ContainerServiceRecordProcessor_getRecordTypes
/** * Returns the record types associated with a container service record. * @return the record type array */ @Override public int[] getRecordTypes() { return new int[] {Type.A, Type.AAAA, Type.PTR, Type.TXT}; }
3.68
flink_WindowedStateTransformation_aggregate
/** * Applies the given window function to each window. The window function is called for each * evaluation of the window for each key individually. The output of the window function is * interpreted as a regular non-windowed stream. * * <p>Arriving data is incrementally aggregated using the given aggregate function. This means * that the window function typically has only a single value to process when called. * * @param aggregateFunction The aggregation function that is used for incremental aggregation. * @param windowFunction The window function. * @param accumulatorType Type information for the internal accumulator type of the aggregation * function * @return The data stream that is the result of applying the window function to the window. * @param <ACC> The type of the AggregateFunction's accumulator * @param <V> The type of AggregateFunction's result, and the WindowFunction's input * @param <R> The type of the elements in the resulting stream, equal to the WindowFunction's * result type */ @PublicEvolving public <ACC, V, R> StateBootstrapTransformation<T> aggregate( AggregateFunction<T, ACC, V> aggregateFunction, ProcessWindowFunction<V, R, K, W> windowFunction, TypeInformation<ACC> accumulatorType) { checkNotNull(aggregateFunction, "aggregateFunction"); checkNotNull(windowFunction, "windowFunction"); checkNotNull(accumulatorType, "accumulatorType"); if (aggregateFunction instanceof RichFunction) { throw new UnsupportedOperationException( "This aggregate function cannot be a RichFunction."); } // clean the closures windowFunction = input.getExecutionEnvironment().clean(windowFunction); aggregateFunction = input.getExecutionEnvironment().clean(aggregateFunction); WindowOperator<K, T, ?, R, W> operator = builder.aggregate(aggregateFunction, windowFunction, accumulatorType); SavepointWriterOperatorFactory factory = (timestamp, path) -> new StateBootstrapWrapperOperator<>(timestamp, path, operator); return new StateBootstrapTransformation<>( input, operatorMaxParallelism, factory, keySelector, keyType); }
3.68
hadoop_OBSCommonUtils_keyToQualifiedPath
/** * Convert a key to a fully qualified path. * * @param owner the owner OBSFileSystem instance * @param key input key * @return the fully qualified path including URI scheme and bucket name. */ static Path keyToQualifiedPath(final OBSFileSystem owner, final String key) { return qualify(owner, keyToPath(key)); }
3.68
hadoop_BlockData_getBlockSize
/** * Gets the size of each block. * @return the size of each block. */ public int getBlockSize() { return blockSize; }
3.68
hbase_CompactingMemStore_getSegments
// the getSegments() method is used for tests only @Override protected List<Segment> getSegments() { List<? extends Segment> pipelineList = pipeline.getSegments(); List<Segment> list = new ArrayList<>(pipelineList.size() + 2); list.add(getActive()); list.addAll(pipelineList); list.addAll(snapshot.getAllSegments()); return list; }
3.68
framework_TableMoveFocusWithSelection_getTestDescription
/* * (non-Javadoc) * * @see com.vaadin.tests.components.AbstractTestUI#getTestDescription() */ @Override protected String getTestDescription() { return "Changing selection in single select mode should move focus"; }
3.68
pulsar_PatternMultiTopicsConsumerImpl_run
// TimerTask to recheck topics change, and trigger subscribe/unsubscribe based on the change. @Override public void run(Timeout timeout) throws Exception { if (timeout.isCancelled()) { return; } client.getLookup().getTopicsUnderNamespace(namespaceName, subscriptionMode, topicsPattern.pattern(), topicsHash) .thenCompose(getTopicsResult -> { if (log.isDebugEnabled()) { log.debug("Get topics under namespace {}, topics.size: {}, topicsHash: {}, filtered: {}", namespaceName, getTopicsResult.getTopics().size(), getTopicsResult.getTopicsHash(), getTopicsResult.isFiltered()); getTopicsResult.getTopics().forEach(topicName -> log.debug("Get topics under namespace {}, topic: {}", namespaceName, topicName)); } final List<String> oldTopics = new ArrayList<>(getPartitionedTopics()); for (String partition : getPartitions()) { TopicName topicName = TopicName.get(partition); if (!topicName.isPartitioned() || !oldTopics.contains(topicName.getPartitionedTopicName())) { oldTopics.add(partition); } } return updateSubscriptions(topicsPattern, this::setTopicsHash, getTopicsResult, topicsChangeListener, oldTopics); }).exceptionally(ex -> { log.warn("[{}] Failed to recheck topics change: {}", topic, ex.getMessage()); return null; }).thenAccept(__ -> { // schedule the next re-check task this.recheckPatternTimeout = client.timer() .newTimeout(PatternMultiTopicsConsumerImpl.this, Math.max(1, conf.getPatternAutoDiscoveryPeriod()), TimeUnit.SECONDS); }); }
3.68
flink_HiveParserQueryState_createConf
/** * If there are query specific settings to overlay, then create a copy of config There are two * cases we need to clone the session config that's being passed to hive driver 1. Async query - * If the client changes a config setting, that shouldn't reflect in the execution already * underway 2. confOverlay - The query specific settings should only be applied to the query * config and not session * * @return new configuration */ private HiveConf createConf(HiveConf conf, Map<String, String> confOverlay, boolean runAsync) { if (confOverlay != null && !confOverlay.isEmpty()) { conf = (conf == null ? new HiveConf() : new HiveConf(conf)); // apply overlay query specific settings, if any for (Map.Entry<String, String> confEntry : confOverlay.entrySet()) { try { conf.verifyAndSet(confEntry.getKey(), confEntry.getValue()); } catch (IllegalArgumentException e) { throw new RuntimeException("Error applying statement specific settings", e); } } } else if (runAsync) { conf = (conf == null ? new HiveConf() : new HiveConf(conf)); } if (conf == null) { conf = new HiveConf(); } conf.setVar(HiveConf.ConfVars.HIVEQUERYID, QueryPlan.makeQueryId()); return conf; }
3.68
hadoop_DataNodeFaultInjector_logDelaySendingPacketDownstream
/** * Used as a hook to intercept the latency of sending packet. */ public void logDelaySendingPacketDownstream( final String mirrAddr, final long delayMs) throws IOException { }
3.68
flink_RpcEndpoint_schedule
/** * The mainScheduledExecutor manages the given callable and sends it to the gateway after * the given delay. * * @param callable the callable to execute * @param delay the time from now to delay the execution * @param unit the time unit of the delay parameter * @param <V> result type of the callable * @return a ScheduledFuture which holds the future value of the given callable */ @Override public <V> ScheduledFuture<V> schedule(Callable<V> callable, long delay, TimeUnit unit) { final long delayMillis = TimeUnit.MILLISECONDS.convert(delay, unit); FutureTask<V> ft = new FutureTask<>(callable); if (mainScheduledExecutor.isShutdown()) { log.warn( "The scheduled executor service is shutdown and ignores the callable {}", callable); } else { mainScheduledExecutor.schedule( () -> gateway.runAsync(ft), delayMillis, TimeUnit.MILLISECONDS); } return new ScheduledFutureAdapter<>(ft, delayMillis, TimeUnit.MILLISECONDS); }
3.68
framework_SelectAllConstantViewport_setup
/* * (non-Javadoc) * * @see com.vaadin.tests.components.AbstractTestUI#setup(com.vaadin.server. * VaadinRequest) */ @Override protected void setup(VaadinRequest request) { final Table table = new Table(); table.addContainerProperty("", Integer.class, null); table.setSizeFull(); table.setMultiSelect(true); table.setNullSelectionAllowed(true); table.setSelectable(true); CheckBox selectAllCheckbox = new CheckBox("Select All"); selectAllCheckbox.addValueChangeListener(event -> { if (event.getValue()) { table.setValue(table.getItemIds()); } else { table.setValue(null); } }); for (int i = 0; i < 200; i++) { table.addItem(new Object[] { new Integer(i) }, new Integer(i)); } table.setCurrentPageFirstItemIndex(185); final CssLayout layout = new CssLayout(); layout.addComponent(selectAllCheckbox); layout.addComponent(table); layout.setSizeFull(); addComponent(layout); }
3.68
framework_LegacyLocatorStrategy_getPathForWidget
/** * Creates a locator String for the given widget. The path can be used to * locate the widget using {@link #getWidgetFromPath(String, Widget)}. * <p/> * Returns null if no path can be determined for the widget or if the widget * is null. * * @param w * The target widget * @return A String locator for the widget */ private String getPathForWidget(Widget w) { if (w == null) { return null; } String elementId = w.getElement().getId(); if (elementId != null && !elementId.isEmpty() && !elementId.startsWith("gwt-uid-")) { // Use PID_S+id if the user has set an id but do not use it for auto // generated id:s as these might not be consistent return "PID_S" + elementId; } else if (w instanceof VUI) { return ""; } else if (w instanceof VWindow) { Connector windowConnector = ConnectorMap.get(client) .getConnector(w); List<WindowConnector> subWindowList = client.getUIConnector() .getSubWindows(); int indexOfSubWindow = subWindowList.indexOf(windowConnector); return PARENTCHILD_SEPARATOR + "VWindow[" + indexOfSubWindow + "]"; } else if (w instanceof RootPanel) { return ROOT_ID; } Widget parent = w.getParent(); String basePath = getPathForWidget(parent); if (basePath == null) { return null; } String simpleName = Util.getSimpleName(w); /* * Check if the parent implements Iterable. At least VPopupView does not * implement HasWdgets so we cannot check for that. */ if (!(parent instanceof Iterable<?>)) { // Parent does not implement Iterable so we cannot find out which // child this is return null; } int pos = 0; for (Object child : (Iterable<?>) parent) { if (child == w) { return basePath + PARENTCHILD_SEPARATOR + simpleName + "[" + pos + "]"; } String simpleName2 = Util.getSimpleName(child); if (simpleName.equals(simpleName2)) { pos++; } } return null; }
3.68
flink_HiveStatsUtil_updateStats
/** * Update original table statistics parameters. * * @param newTableStats new catalog table statistics. * @param parameters original hive table statistics parameters. */ public static void updateStats( CatalogTableStatistics newTableStats, Map<String, String> parameters) { parameters.put(StatsSetupConst.ROW_COUNT, String.valueOf(newTableStats.getRowCount())); parameters.put(StatsSetupConst.TOTAL_SIZE, String.valueOf(newTableStats.getTotalSize())); parameters.put(StatsSetupConst.NUM_FILES, String.valueOf(newTableStats.getFileCount())); parameters.put( StatsSetupConst.RAW_DATA_SIZE, String.valueOf(newTableStats.getRawDataSize())); }
3.68
hadoop_LeveldbIterator_hasPrev
/** * @return true if there is a previous entry in the iteration. */ public boolean hasPrev() throws DBException { try { return iter.hasPrev(); } catch (DBException e) { throw e; } catch (RuntimeException e) { throw new DBException(e.getMessage(), e); } }
3.68
hudi_HoodieTableMetadataUtil_convertFilesToBloomFilterRecords
/** * Convert added and deleted files metadata to bloom filter index records. */ public static HoodieData<HoodieRecord> convertFilesToBloomFilterRecords(HoodieEngineContext engineContext, Map<String, List<String>> partitionToDeletedFiles, Map<String, Map<String, Long>> partitionToAppendedFiles, MetadataRecordsGenerationParams recordsGenerationParams, String instantTime) { // Create the tuple (partition, filename, isDeleted) to handle both deletes and appends final List<Tuple3<String, String, Boolean>> partitionFileFlagTupleList = fetchPartitionFileInfoTriplets(partitionToDeletedFiles, partitionToAppendedFiles); // Create records MDT int parallelism = Math.max(Math.min(partitionFileFlagTupleList.size(), recordsGenerationParams.getBloomIndexParallelism()), 1); return engineContext.parallelize(partitionFileFlagTupleList, parallelism).flatMap(partitionFileFlagTuple -> { final String partitionName = partitionFileFlagTuple.f0; final String filename = partitionFileFlagTuple.f1; final boolean isDeleted = partitionFileFlagTuple.f2; if (!FSUtils.isBaseFile(new Path(filename))) { LOG.warn(String.format("Ignoring file %s as it is not a base file", filename)); return Stream.<HoodieRecord>empty().iterator(); } // Read the bloom filter from the base file if the file is being added ByteBuffer bloomFilterBuffer = ByteBuffer.allocate(0); if (!isDeleted) { final String pathWithPartition = partitionName + "/" + filename; final Path addedFilePath = new Path(recordsGenerationParams.getDataMetaClient().getBasePath(), pathWithPartition); bloomFilterBuffer = readBloomFilter(recordsGenerationParams.getDataMetaClient().getHadoopConf(), addedFilePath); // If reading the bloom filter failed then do not add a record for this file if (bloomFilterBuffer == null) { LOG.error("Failed to read bloom filter from " + addedFilePath); return Stream.<HoodieRecord>empty().iterator(); } } final String partition = getPartitionIdentifier(partitionName); return Stream.<HoodieRecord>of(HoodieMetadataPayload.createBloomFilterMetadataRecord( partition, filename, instantTime, recordsGenerationParams.getBloomFilterType(), bloomFilterBuffer, partitionFileFlagTuple.f2)) .iterator(); }); }
3.68
flink_OperationTreeBuilder_aliasBackwardFields
/** Rename fields in the input {@link QueryOperation}. */ private QueryOperation aliasBackwardFields( QueryOperation inputOperation, List<String> alias, int aliasStartIndex) { if (!alias.isEmpty()) { List<String> namesBeforeAlias = inputOperation.getResolvedSchema().getColumnNames(); List<String> namesAfterAlias = new ArrayList<>(namesBeforeAlias); for (int i = 0; i < alias.size(); i++) { int withOffset = aliasStartIndex + i; namesAfterAlias.remove(withOffset); namesAfterAlias.add(withOffset, alias.get(i)); } return this.alias( namesAfterAlias.stream() .map(ApiExpressionUtils::unresolvedRef) .collect(Collectors.toList()), inputOperation); } else { return inputOperation; } }
3.68
hbase_HRegion_hasSeenWrongRegion
/** Returns If a {@link WrongRegionException} has been observed. */ boolean hasSeenWrongRegion() { return wrongRegion; }
3.68
pulsar_SubscriptionPolicies_checkEmpty
/** * Check if this SubscriptionPolicies is empty. Empty SubscriptionPolicies can be auto removed from TopicPolicies. * @return true if this SubscriptionPolicies is empty. */ public boolean checkEmpty() { return dispatchRate == null; }
3.68
streampipes_PrimitivePropertyBuilder_label
/** * Assigns a human-readable label to the event property. The label is used in the StreamPipes UI for better * explaining users the meaning of the property. * * @param label * @return this */ public PrimitivePropertyBuilder label(String label) { this.eventProperty.setLabel(label); return this; }
3.68
flink_FileMergingSnapshotManager_getManagedDirName
/** * Generate an unique managed directory name for one subtask. * * @return the managed directory name. */ public String getManagedDirName() { return String.format("%s_%d_%d_", operatorIDString, subtaskIndex, parallelism) .replaceAll("[^a-zA-Z0-9\\-]", "_"); }
3.68
flink_OutputFormatBase_postOpen
/** * Initialize the OutputFormat. This method is called at the end of {@link * OutputFormatBase#open(int, int)}. */ protected void postOpen() {}
3.68
flink_DynamicSinkUtils_convertToRowLevelUpdate
/** * Convert tableModify node to a RelNode representing for row-level update. * * @return a tuple contains the RelNode and the index for the required physical columns for * row-level update. */ private static Tuple2<RelNode, int[]> convertToRowLevelUpdate( LogicalTableModify tableModify, ContextResolvedTable contextResolvedTable, SupportsRowLevelUpdate.RowLevelUpdateInfo rowLevelUpdateInfo, String tableDebugName, DataTypeFactory dataTypeFactory, FlinkTypeFactory typeFactory) { // get the required columns ResolvedSchema resolvedSchema = contextResolvedTable.getResolvedSchema(); Optional<List<Column>> optionalColumns = rowLevelUpdateInfo.requiredColumns(); List<Column> requiredColumns = optionalColumns.orElse(resolvedSchema.getColumns()); // get the root table scan which we may need rewrite it LogicalTableScan tableScan = getSourceTableScan(tableModify); Tuple2<List<Integer>, List<MetadataColumn>> colsIndexAndExtraMetaCols = getRequireColumnsIndexAndExtraMetaCols(tableScan, requiredColumns, resolvedSchema); List<Integer> colIndexes = colsIndexAndExtraMetaCols.f0; List<MetadataColumn> metadataColumns = colsIndexAndExtraMetaCols.f1; // if meta columns size is greater than 0, we need to modify the underlying // LogicalTableScan to make it can read meta column int originColsCount = resolvedSchema.getColumnCount(); if (metadataColumns.size() > 0) { resolvedSchema = addExtraMetaCols( tableModify, tableScan, tableDebugName, metadataColumns, typeFactory); } return Tuple2.of( projectColumnsForUpdate( tableModify, originColsCount, resolvedSchema, colIndexes, rowLevelUpdateInfo.getRowLevelUpdateMode(), tableDebugName, dataTypeFactory, typeFactory), getPhysicalColumnIndices(colIndexes, resolvedSchema)); }
3.68
framework_VDragEvent_setElementOver
/** * @since 7.2 * @param targetElement * target element over which DnD event has happened * @see #getElementOver() */ public void setElementOver(Element targetElement) { setElementOver(DOM.asOld(targetElement)); }
3.68
framework_Calendar_setDefaultHandlers
/** * Set all the wanted default handlers here. This is always called after * constructing this object. All other events have default handlers except * range and event click. */ protected void setDefaultHandlers() { setHandler(new BasicBackwardHandler()); setHandler(new BasicForwardHandler()); setHandler(new BasicWeekClickHandler()); setHandler(new BasicDateClickHandler()); setHandler(new BasicEventMoveHandler()); setHandler(new BasicEventResizeHandler()); }
3.68
flink_BlobServer_getBlobExpiryTimes
/** * Returns the blob expiry times - for testing purposes only! * * @return blob expiry times (internal state!) */ @VisibleForTesting ConcurrentMap<Tuple2<JobID, TransientBlobKey>, Long> getBlobExpiryTimes() { return blobExpiryTimes; }
3.68
hbase_CellComparatorImpl_compareRows
/** * Compares the row part of the cell with a simple plain byte[] like the stopRow in Scan. This * should be used with context where for hbase:meta cells the * {{@link MetaCellComparator#META_COMPARATOR} should be used the cell to be compared the kv * serialized byte[] to be compared with the offset in the byte[] the length in the byte[] * @return 0 if both cell and the byte[] are equal, 1 if the cell is bigger than byte[], -1 * otherwise */ @Override public int compareRows(Cell left, byte[] right, int roffset, int rlength) { if (left instanceof ByteBufferExtendedCell) { return ByteBufferUtils.compareTo(((ByteBufferExtendedCell) left).getRowByteBuffer(), ((ByteBufferExtendedCell) left).getRowPosition(), left.getRowLength(), right, roffset, rlength); } return Bytes.compareTo(left.getRowArray(), left.getRowOffset(), left.getRowLength(), right, roffset, rlength); }
3.68
querydsl_JTSSurfaceExpression_pointOnSurface
/** * A Point guaranteed to be on this Surface. * * @return point on surface */ public JTSPointExpression<Point> pointOnSurface() { if (pointOnSurface == null) { pointOnSurface = JTSGeometryExpressions.pointOperation(SpatialOps.POINT_ON_SURFACE, mixin); } return pointOnSurface; }
3.68
framework_SingleSelectionModel_addSelectionListener
/** * {@inheritDoc} * <p> * Use {@link #addSingleSelectionListener(SingleSelectionListener)} for more * specific single selection event. * * @see #addSingleSelectionListener(SingleSelectionListener) */ @Override public default Registration addSelectionListener( SelectionListener<T> listener) { return addSingleSelectionListener( event -> listener.selectionChange(event)); }
3.68
graphhopper_GraphHopper_importOrLoad
/** * Imports provided data from disc and creates graph. Depending on the settings the resulting * graph will be stored to disc so on a second call this method will only load the graph from * disc which is usually a lot faster. */ public GraphHopper importOrLoad() { if (!load()) { printInfo(); process(false); } else { printInfo(); } return this; }
3.68
framework_TreeData_getParent
/** * Get the parent item for the given item. * * @param item * the item for which to retrieve the parent item for * @return parent item for the given item or {@code null} if the item is a * root item. * @throws IllegalArgumentException * if the item does not exist in this structure * @since 8.1.1 */ public T getParent(T item) { if (!contains(item)) { throw new IllegalArgumentException( "Item '" + item + "' not in hierarchy"); } return itemToWrapperMap.get(item).getParent(); }
3.68
graphhopper_Country_getAlpha2
/** * @return the ISO 3166-1:alpha2 code of this country */ public String getAlpha2() { return alpha2; }
3.68
hadoop_MutableStat_setExtended
/** * Set whether to display the extended stats (stdev, min/max etc.) or not * @param extended enable/disable displaying extended stats */ public synchronized void setExtended(boolean extended) { this.extended = extended; }
3.68
hadoop_TaskPool_sleepInterval
/** * Set the sleep interval. * @param value new value * @return the builder */ public Builder<I> sleepInterval(final int value) { sleepInterval = value; return this; }
3.68
hbase_RequestConverter_buildModifyTableRequest
/** * Creates a protocol buffer ModifyTableRequest * @return a ModifyTableRequest */ public static ModifyTableRequest buildModifyTableRequest(final TableName tableName, final TableDescriptor tableDesc, final long nonceGroup, final long nonce, final boolean reopenRegions) { ModifyTableRequest.Builder builder = ModifyTableRequest.newBuilder(); builder.setTableName(ProtobufUtil.toProtoTableName(tableName)); builder.setTableSchema(ProtobufUtil.toTableSchema(tableDesc)); builder.setNonceGroup(nonceGroup); builder.setNonce(nonce); builder.setReopenRegions(reopenRegions); return builder.build(); }
3.68
hadoop_AzureBlobFileSystem_getDelegationTokenManager
/** * Get any Delegation Token manager created by the filesystem. * @return the DT manager or null. */ @VisibleForTesting AbfsDelegationTokenManager getDelegationTokenManager() { return delegationTokenManager; }
3.68
flink_LinkedOptionalMap_mergeRightIntoLeft
/** Tries to merges the keys and the values of @right into @left. */ public static <K, V> MergeResult<K, V> mergeRightIntoLeft( LinkedOptionalMap<K, V> left, LinkedOptionalMap<K, V> right) { LinkedOptionalMap<K, V> merged = new LinkedOptionalMap<>(left); merged.putAll(right); return new MergeResult<>(merged, isLeftPrefixOfRight(left, right)); }
3.68
framework_AbstractListing_removeDataGenerator
/** * Removes the given data generator from this listing. If this listing does * not have the generator, does nothing. * * @param generator * the data generator to remove, not null */ protected void removeDataGenerator(DataGenerator<T> generator) { getDataCommunicator().removeDataGenerator(generator); }
3.68
morf_DataValueLookup_getDate
/** * Gets the value as a {@link java.sql.Date}. Will attempt conversion where possible * and throw a suitable conversion exception if the conversion fails. * May return {@code null} if the value is not set or is explicitly set * to {@code null}. * * @param name The column name. * @return The value. */ public default java.sql.Date getDate(String name) { String value = getValue(name); return value == null ? null : java.sql.Date.valueOf(value); }
3.68
flink_TwoPhaseCommitSinkFunction_recoverAndAbort
/** Abort a transaction that was rejected by a coordinator after a failure. */ protected void recoverAndAbort(TXN transaction) { abort(transaction); }
3.68
framework_CalendarConnector_registerEventToolTips
/** * Register the description of the events as tooltips. This way, any event * displaying widget can use the event index as a key to display the * tooltip. */ private void registerEventToolTips(List<CalendarState.Event> events) { for (CalendarState.Event e : events) { if (e.description != null && !"".equals(e.description)) { tooltips.put(e.index, e.description); } else { tooltips.remove(e.index); } } }
3.68
morf_InsertStatement_useParallelDml
/** * Request that this statement is executed with a parallel execution plan for data manipulation language (DML). This request will have no effect unless the database implementation supports it and the feature is enabled. * * <p>For statement that will affect a high percentage or rows in the table, a parallel execution plan may reduce the execution time, although the exact effect depends on * the underlying database, the nature of the data and the nature of the query.</p> * * <p>Note that the use of parallel DML comes with restrictions, in particular, a table may not be accessed in the same transaction following a parallel DML execution. Please consult the Oracle manual section <em>Restrictions on Parallel DML</em> to check whether this hint is suitable.</p> * * @param degreeOfParallelism - the degree of parallelism * @return this, for method chaining. */ public InsertStatement useParallelDml(int degreeOfParallelism) { return copyOnWriteOrMutate( insertStatementBuilder -> insertStatementBuilder.useParallelDml(degreeOfParallelism), () -> this.hints.add(new UseParallelDml(degreeOfParallelism)) ); }
3.68
framework_LayoutManager_setConnection
/** * Sets the application connection this instance is connected to. Called * internally by the framework. * * @param connection * the application connection this instance is connected to */ public void setConnection(ApplicationConnection connection) { if (this.connection != null) { throw new RuntimeException( "LayoutManager connection can never be changed"); } this.connection = connection; }
3.68
hbase_BloomFilterMetrics_incrementEligible
/** * Increment for cases where bloom filter could have been used but wasn't defined or loaded. */ public void incrementEligible() { eligibleRequests.increment(); }
3.68
flink_GlobalProperties_setRangePartitioned
/** * Set the parameters for range partition. * * @param ordering Order of the partitioned fields * @param distribution The data distribution for range partition. User can supply a customized * data distribution, also the data distribution can be null. */ public void setRangePartitioned(Ordering ordering, DataDistribution distribution) { if (ordering == null) { throw new NullPointerException(); } this.partitioning = PartitioningProperty.RANGE_PARTITIONED; this.ordering = ordering; this.partitioningFields = ordering.getInvolvedIndexes(); this.distribution = distribution; }
3.68
hbase_VisibilityLabelsCache_getLabelsCount
/** Returns The total number of visibility labels. */ public int getLabelsCount() { this.lock.readLock().lock(); try { return this.labels.size(); } finally { this.lock.readLock().unlock(); } }
3.68
dubbo_LoggerAdapter_isConfigured
/** * Return is the current logger has been configured. * Used to check if logger is available to use. * * @return true if the current logger has been configured */ default boolean isConfigured() { return true; }
3.68
hbase_OrderedBytes_decodeInt64
/** * Decode an {@code int64} value. * @see #encodeInt64(PositionedByteRange, long, Order) */ public static long decodeInt64(PositionedByteRange src) { final byte header = src.get(); assert header == FIXED_INT64 || header == DESCENDING.apply(FIXED_INT64); Order ord = header == FIXED_INT64 ? ASCENDING : DESCENDING; long val = (ord.apply(src.get()) ^ 0x80) & 0xff; for (int i = 1; i < 8; i++) { val = (val << 8) + (ord.apply(src.get()) & 0xff); } return val; }
3.68
morf_AddIndex_isApplied
/** * {@inheritDoc} * * @see org.alfasoftware.morf.upgrade.SchemaChange#isApplied(Schema, ConnectionResources) */ @Override public boolean isApplied(Schema schema, ConnectionResources database) { if (!schema.tableExists(tableName)) { return false; } Table table = schema.getTable(tableName); SchemaHomology homology = new SchemaHomology(); for (Index index : table.indexes()) { if (homology.indexesMatch(index, newIndex)) { return true; } } return false; }
3.68
hbase_RegionStates_getRegionsOfTableForDeleting
/** * Get the regions for deleting a table. * <p/> * Here we need to return all the regions irrespective of the states in order to archive them all. * This is because if we don't archive OFFLINE/SPLIT regions and if a snapshot or a cloned table * references to the regions, we will lose the data of the regions. */ public List<RegionInfo> getRegionsOfTableForDeleting(TableName table) { return getTableRegionStateNodes(table).stream().map(RegionStateNode::getRegionInfo) .collect(Collectors.toList()); }
3.68
hadoop_ResourceMappings_addAssignedResources
/** * Adds the resources for a given resource type. * * @param resourceType Resource Type * @param assigned Assigned resources to add */ public void addAssignedResources(String resourceType, AssignedResources assigned) { assignedResourcesMap.put(resourceType, assigned); }
3.68
dubbo_ParamType_addSupportTypes
/** * exclude null types * * @param classes * @return */ private static List<Class> addSupportTypes(Class... classes) { ArrayList<Class> types = new ArrayList<>(); for (Class clazz : classes) { if (clazz == null) { continue; } types.add(clazz); } return types; }
3.68
flink_SubpartitionDiskCacheManager_removeAllBuffers
/** Note that allBuffers can be touched by multiple threads. */ List<Tuple2<Buffer, Integer>> removeAllBuffers() { synchronized (allBuffers) { List<Tuple2<Buffer, Integer>> targetBuffers = new ArrayList<>(allBuffers); allBuffers.clear(); return targetBuffers; } }
3.68
hbase_AccessController_permissionGranted
/** * Check the current user for authorization to perform a specific action against the given set of * row data. * @param opType the operation type * @param user the user * @param e the coprocessor environment * @param families the map of column families to qualifiers present in the request * @param actions the desired actions * @return an authorization result */ private AuthResult permissionGranted(OpType opType, User user, RegionCoprocessorEnvironment e, Map<byte[], ? extends Collection<?>> families, Action... actions) { AuthResult result = null; for (Action action : actions) { result = accessChecker.permissionGranted(opType.toString(), user, action, e.getRegion().getRegionInfo().getTable(), families); if (!result.isAllowed()) { return result; } } return result; }
3.68
streampipes_InfluxDbClient_query
// Returns a list with the entries of the query. If there are no entries, it returns an empty list List<List<Object>> query(String query) { if (!connected) { throw new RuntimeException("InfluxDbClient not connected"); } QueryResult queryResult = influxDb.query(new Query(query, connectionSettings.getDatabaseName())); if (queryResult.getResults().get(0).getSeries() != null) { return queryResult.getResults().get(0).getSeries().get(0).getValues(); } else { return new ArrayList<>(); } }
3.68
flink_ListView_addAll
/** * Adds all of the elements of the specified list to this list view. * * @throws Exception Thrown if the system cannot add all data. * @param list The list with the elements that will be stored in this list view. */ public void addAll(List<T> list) throws Exception { this.list.addAll(list); }
3.68
flink_StreamTaskSourceInput_checkpointStarted
/** * This method is used with unaligned checkpoints to mark the arrival of a first {@link * CheckpointBarrier}. For chained sources, there is no {@link CheckpointBarrier} per se flowing * through the job graph. We can assume that an imaginary {@link CheckpointBarrier} was produced * by the source, at any point of time of our choosing. * * <p>We are choosing to interpret it, that {@link CheckpointBarrier} for sources was received * immediately as soon as we receive either checkpoint start RPC, or {@link CheckpointBarrier} * from a network input. So that we can checkpoint state of the source and all of the other * operators at the same time. * * <p>Also we are choosing to block the source, as a best effort optimisation as: - either there * is no backpressure and the checkpoint "alignment" will happen very quickly anyway - or there * is a backpressure, and it's better to prioritize processing data from the network to speed up * checkpointing. From the cluster resource utilisation perspective, by blocking chained source * doesn't block any resources from being used, as this task running the source has a backlog of * buffered input data waiting to be processed. * * <p>However from the correctness point of view, {@link #checkpointStarted(CheckpointBarrier)} * and {@link #checkpointStopped(long)} methods could be empty no-op. */ @Override public void checkpointStarted(CheckpointBarrier barrier) { blockConsumption(null); }
3.68
hadoop_PathFinder_getAbsolutePath
/** * Returns the full path name of this file if it is listed in the path */ public File getAbsolutePath(String filename) { if (pathenv == null || pathSep == null || fileSep == null) { return null; } int val = -1; String classvalue = pathenv + pathSep; while (((val = classvalue.indexOf(pathSep)) >= 0) && classvalue.length() > 0) { // Extract each entry from the pathenv String entry = classvalue.substring(0, val).trim(); File f = new File(entry); if (f.isDirectory()) { // this entry in the pathenv is a directory. // see if the required file is in this directory f = new File(entry + fileSep + filename); } // see if the filename matches and we can read it if (f.isFile() && FileUtil.canRead(f)) { return f; } classvalue = classvalue.substring(val + 1).trim(); } return null; }
3.68
hudi_HoodieInputFormatUtils_refreshFileStatus
/** * Checks the file status for a race condition which can set the file size to 0. 1. HiveInputFormat does * super.listStatus() and gets back a FileStatus[] 2. Then it creates the HoodieTableMetaClient for the paths listed. * 3. Generation of splits looks at FileStatus size to create splits, which skips this file * * @param conf * @param dataFile * @return */ private static HoodieBaseFile refreshFileStatus(Configuration conf, HoodieBaseFile dataFile) { Path dataPath = dataFile.getFileStatus().getPath(); try { if (dataFile.getFileSize() == 0) { FileSystem fs = dataPath.getFileSystem(conf); LOG.info("Refreshing file status " + dataFile.getPath()); return new HoodieBaseFile(fs.getFileStatus(dataPath), dataFile.getBootstrapBaseFile().orElse(null)); } return dataFile; } catch (IOException e) { throw new HoodieIOException("Could not get FileStatus on path " + dataPath); } }
3.68
shardingsphere-elasticjob_FailoverService_failoverIfNecessary
/** * Failover if necessary. */ public void failoverIfNecessary() { if (needFailover()) { jobNodeStorage.executeInLeader(FailoverNode.LATCH, new FailoverLeaderExecutionCallback()); } }
3.68
querydsl_NumberExpression_stringValue
/** * Create a cast to String expression * * @see java.lang.Object#toString() * @return string representation */ public StringExpression stringValue() { if (stringCast == null) { stringCast = Expressions.stringOperation(Ops.STRING_CAST, mixin); } return stringCast; }
3.68
framework_VFilterSelect_reset
/** * Resets the Select to its initial state */ private void reset() { debug("VFS: reset()"); if (currentSuggestion != null) { String text = currentSuggestion.getReplacementString(); setPromptingOff(text); setSelectedItemIcon(currentSuggestion.getIconUri()); selectedOptionKey = currentSuggestion.key; } else { if (focused || readonly || !enabled) { setPromptingOff(""); } else { setPromptingOn(); } setSelectedItemIcon(null); selectedOptionKey = null; } lastFilter = ""; suggestionPopup.hide(); }
3.68
framework_HierarchicalDataCommunicator_isExpanded
/** * Returns whether given item is expanded. * * @param item * the item to test * @return {@code true} if item is expanded; {@code false} if not */ public boolean isExpanded(T item) { return mapper.isExpanded(item); }
3.68
shardingsphere-elasticjob_JobNodeStorage_fillJobNode
/** * Fill job node. * * @param node node * @param value data of job node */ public void fillJobNode(final String node, final Object value) { regCenter.persist(jobNodePath.getFullPath(node), value.toString()); }
3.68
hudi_OptionsResolver_emitChangelog
/** * Returns whether the source should emit changelog. * * @return true if the source is read as streaming with changelog mode enabled */ public static boolean emitChangelog(Configuration conf) { return conf.getBoolean(FlinkOptions.READ_AS_STREAMING) && conf.getBoolean(FlinkOptions.CHANGELOG_ENABLED) || conf.getBoolean(FlinkOptions.READ_AS_STREAMING) && conf.getBoolean(FlinkOptions.CDC_ENABLED) || isIncrementalQuery(conf) && conf.getBoolean(FlinkOptions.CDC_ENABLED); }
3.68
querydsl_HibernateUpdateClause_setLockMode
/** * Set the lock mode for the given path. * @return the current object */ @SuppressWarnings("unchecked") public HibernateUpdateClause setLockMode(Path<?> path, LockMode lockMode) { lockModes.put(path, lockMode); return this; }
3.68
hadoop_DiskBalancerWorkItem_parseJson
/** * Reads a DiskBalancerWorkItem Object from a Json String. * * @param json - Json String. * @return DiskBalancerWorkItem Object * @throws IOException */ public static DiskBalancerWorkItem parseJson(String json) throws IOException { Preconditions.checkNotNull(json); return READER.readValue(json); }
3.68
framework_VaadinService_removeFromHttpSession
/** * Performs the actual removal of the VaadinSession from the underlying HTTP * session after sanity checks have been performed. * * @since 7.6 * @param wrappedSession * the underlying HTTP session */ protected void removeFromHttpSession(WrappedSession wrappedSession) { wrappedSession.removeAttribute(getSessionAttributeName()); }
3.68
flink_FixedLengthRecordSorter_getIterator
/** * Gets an iterator over all records in this buffer in their logical order. * * @return An iterator returning the records in their logical order. */ @Override public final MutableObjectIterator<T> getIterator() { final SingleSegmentInputView startIn = new SingleSegmentInputView(this.recordsPerSegment * this.recordSize); startIn.set(this.sortBuffer.get(0), 0); return new MutableObjectIterator<T>() { private final SingleSegmentInputView in = startIn; private final TypeComparator<T> comp = comparator; private final int numTotal = size(); private final int numPerSegment = recordsPerSegment; private int currentTotal = 0; private int currentInSegment = 0; private int currentSegmentIndex = 0; @Override public T next(T reuse) { if (this.currentTotal < this.numTotal) { if (this.currentInSegment >= this.numPerSegment) { this.currentInSegment = 0; this.currentSegmentIndex++; this.in.set(sortBuffer.get(this.currentSegmentIndex), 0); } this.currentTotal++; this.currentInSegment++; try { return this.comp.readWithKeyDenormalization(reuse, this.in); } catch (IOException ioe) { throw new RuntimeException(ioe); } } else { return null; } } @Override public T next() { if (this.currentTotal < this.numTotal) { if (this.currentInSegment >= this.numPerSegment) { this.currentInSegment = 0; this.currentSegmentIndex++; this.in.set(sortBuffer.get(this.currentSegmentIndex), 0); } this.currentTotal++; this.currentInSegment++; try { return this.comp.readWithKeyDenormalization( serializer.createInstance(), this.in); } catch (IOException ioe) { throw new RuntimeException(ioe); } } else { return null; } } }; }
3.68
framework_AbstractInlineDateFieldConnector_updateListeners
/** * Updates listeners registered (or register them) for the widget based on * the current resolution. * <p> * Subclasses may override this method to keep the common logic inside the * {@link #updateFromUIDL(UIDL, ApplicationConnection)} method as is and * customizing only listeners logic. */ @SuppressWarnings("deprecation") protected void updateListeners() { VAbstractDateFieldCalendar<PANEL, R> widget = getWidget(); if (isResolutionMonthOrHigher()) { widget.calendarPanel.setFocusChangeListener(date -> { Date date2 = new Date(); if (widget.calendarPanel.getDate() != null) { date2.setTime(widget.calendarPanel.getDate().getTime()); } /* * Update the value of calendarPanel */ date2.setYear(date.getYear()); date2.setMonth(date.getMonth()); widget.calendarPanel.setDate(date2); /* * Then update the value from panel to server */ widget.updateValueFromPanel(); }); } else { widget.calendarPanel.setFocusChangeListener(null); } }
3.68
hadoop_Cluster_getClusterStatus
/** * Get current cluster status. * * @return object of {@link ClusterMetrics} * @throws IOException * @throws InterruptedException */ public ClusterMetrics getClusterStatus() throws IOException, InterruptedException { return client.getClusterMetrics(); }
3.68
hudi_DataPruner_test
/** * Filters the index row with specific data filters and query fields. * * @param indexRow The index row * @param queryFields The query fields referenced by the filters * @return true if the index row should be considered as a candidate */ public boolean test(RowData indexRow, RowType.RowField[] queryFields) { Map<String, ColumnStats> columnStatsMap = convertColumnStats(indexRow, queryFields); for (ExpressionEvaluators.Evaluator evaluator : evaluators) { if (!evaluator.eval(columnStatsMap)) { return false; } } return true; }
3.68
hbase_RandomRowFilter_areSerializedFieldsEqual
/** * Returns true if and only if the fields of the filter that are serialized are equal to the * corresponding fields in other. Used for testing. */ @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) { return true; } if (!(o instanceof RandomRowFilter)) { return false; } RandomRowFilter other = (RandomRowFilter) o; return this.getChance() == other.getChance(); }
3.68
flink_SerializedCompositeKeyBuilder_buildCompositeKeyNamesSpaceUserKey
/** * Returns a serialized composite key, from the key and key-group provided in a previous call to * {@link #setKeyAndKeyGroup(Object, int)} and the given namespace, followed by the given * user-key. * * @param namespace the namespace to concatenate for the serialized composite key bytes. * @param namespaceSerializer the serializer to obtain the serialized form of the namespace. * @param userKey the user-key to concatenate for the serialized composite key, after the * namespace. * @param userKeySerializer the serializer to obtain the serialized form of the user-key. * @param <N> the type of the namespace. * @param <UK> the type of the user-key. * @return the bytes for the serialized composite key of key-group, key, namespace. */ @Nonnull public <N, UK> byte[] buildCompositeKeyNamesSpaceUserKey( @Nonnull N namespace, @Nonnull TypeSerializer<N> namespaceSerializer, @Nonnull UK userKey, @Nonnull TypeSerializer<UK> userKeySerializer) throws IOException { serializeNamespace(namespace, namespaceSerializer); userKeySerializer.serialize(userKey, keyOutView); return keyOutView.getCopyOfBuffer(); }
3.68
hbase_WindowMovingAverage_getMostRecentPosition
/** Returns index of most recent */ protected int getMostRecentPosition() { return mostRecent; }
3.68
hadoop_ApplicationMaster_run
/** * Connects to CM, sets up container launch context for shell command and * eventually dispatches the container start request to the CM. */ @Override public void run() { LOG.info("Setting up container launch context for containerid=" + container.getId() + ", isNameNode=" + isNameNodeLauncher); ContainerLaunchContext ctx = Records .newRecord(ContainerLaunchContext.class); // Set the environment ctx.setEnvironment(amOptions.getShellEnv()); ctx.setApplicationACLs(applicationAcls); try { ctx.setLocalResources(getLocalResources()); ctx.setCommands(getContainerStartCommand()); } catch (IOException e) { LOG.error("Error while configuring container!", e); return; } // Set up tokens for the container ctx.setTokens(allTokens.duplicate()); nmClientAsync.startContainerAsync(container, ctx); LOG.info("Starting {}; track at: http://{}/node/containerlogs/{}/{}/", isNameNodeLauncher ? "NAMENODE" : "DATANODE", container.getNodeHttpAddress(), container.getId(), launchingUser); }
3.68
framework_GridLayout_getComponent
/** * Gets the Component at given index. * * @param x * The column index, starting from 0 for the leftmost column. * @param y * The row index, starting from 0 for the topmost row. * @return Component in given cell or null if empty */ public Component getComponent(int x, int y) { for (Entry<Connector, ChildComponentData> entry : getState( false).childData.entrySet()) { ChildComponentData childData = entry.getValue(); if (childData.column1 <= x && x <= childData.column2 && childData.row1 <= y && y <= childData.row2) { return (Component) entry.getKey(); } } return null; }
3.68
hbase_StochasticLoadBalancer_balanceTable
/** * Given the cluster state this will try and approach an optimal balance. This should always * approach the optimal state given enough steps. */ @Override protected List<RegionPlan> balanceTable(TableName tableName, Map<ServerName, List<RegionInfo>> loadOfOneTable) { // On clusters with lots of HFileLinks or lots of reference files, // instantiating the storefile infos can be quite expensive. // Allow turning this feature off if the locality cost is not going to // be used in any computations. RegionHDFSBlockLocationFinder finder = null; if ((this.localityCost != null) || (this.rackLocalityCost != null)) { finder = this.regionFinder; } // The clusterState that is given to this method contains the state // of all the regions in the table(s) (that's true today) // Keep track of servers to iterate through them. BalancerClusterState cluster = new BalancerClusterState(loadOfOneTable, loads, finder, rackManager, regionCacheRatioOnOldServerMap); long startTime = EnvironmentEdgeManager.currentTime(); initCosts(cluster); sumMultiplier = 0; for (CostFunction c : costFunctions) { if (c.isNeeded()) { sumMultiplier += c.getMultiplier(); } } if (sumMultiplier <= 0) { LOG.error("At least one cost function needs a multiplier > 0. For example, set " + "hbase.master.balancer.stochastic.regionCountCost to a positive value or default"); return null; } double currentCost = computeCost(cluster, Double.MAX_VALUE); curOverallCost = currentCost; System.arraycopy(tempFunctionCosts, 0, curFunctionCosts, 0, curFunctionCosts.length); updateStochasticCosts(tableName, curOverallCost, curFunctionCosts); double initCost = currentCost; double newCost; if (!needsBalance(tableName, cluster)) { return null; } long computedMaxSteps; if (runMaxSteps) { computedMaxSteps = Math.max(this.maxSteps, calculateMaxSteps(cluster)); } else { long calculatedMaxSteps = calculateMaxSteps(cluster); computedMaxSteps = Math.min(this.maxSteps, calculatedMaxSteps); if (calculatedMaxSteps > maxSteps) { LOG.warn( "calculatedMaxSteps:{} for loadbalancer's stochastic walk is larger than " + "maxSteps:{}. Hence load balancing may not work well. Setting parameter " + "\"hbase.master.balancer.stochastic.runMaxSteps\" to true can overcome this issue." + "(This config change does not require service restart)", calculatedMaxSteps, maxSteps); } } LOG.info( "Start StochasticLoadBalancer.balancer, initial weighted average imbalance={}, " + "functionCost={} computedMaxSteps={}", currentCost / sumMultiplier, functionCost(), computedMaxSteps); final String initFunctionTotalCosts = totalCostsPerFunc(); // Perform a stochastic walk to see if we can get a good fit. long step; for (step = 0; step < computedMaxSteps; step++) { BalanceAction action = nextAction(cluster); if (action.getType() == BalanceAction.Type.NULL) { continue; } cluster.doAction(action); updateCostsAndWeightsWithAction(cluster, action); newCost = computeCost(cluster, currentCost); // Should this be kept? if (newCost < currentCost) { currentCost = newCost; // save for JMX curOverallCost = currentCost; System.arraycopy(tempFunctionCosts, 0, curFunctionCosts, 0, curFunctionCosts.length); } else { // Put things back the way they were before. // TODO: undo by remembering old values BalanceAction undoAction = action.undoAction(); cluster.doAction(undoAction); updateCostsAndWeightsWithAction(cluster, undoAction); } if (EnvironmentEdgeManager.currentTime() - startTime > maxRunningTime) { break; } } long endTime = EnvironmentEdgeManager.currentTime(); metricsBalancer.balanceCluster(endTime - startTime); if (initCost > currentCost) { updateStochasticCosts(tableName, curOverallCost, curFunctionCosts); List<RegionPlan> plans = createRegionPlans(cluster); LOG.info( "Finished computing new moving plan. Computation took {} ms" + " to try {} different iterations. Found a solution that moves " + "{} regions; Going from a computed imbalance of {}" + " to a new imbalance of {}. funtionCost={}", endTime - startTime, step, plans.size(), initCost / sumMultiplier, currentCost / sumMultiplier, functionCost()); sendRegionPlansToRingBuffer(plans, currentCost, initCost, initFunctionTotalCosts, step); return plans; } LOG.info( "Could not find a better moving plan. Tried {} different configurations in " + "{} ms, and did not find anything with an imbalance score less than {}", step, endTime - startTime, initCost / sumMultiplier); return null; }
3.68
hadoop_Server_checkServiceDependencies
/** * Checks if all service dependencies of a service are available. * * @param service service to check if all its dependencies are available. * * @throws ServerException thrown if a service dependency is missing. */ protected void checkServiceDependencies(Service service) throws ServerException { if (service.getServiceDependencies() != null) { for (Class dependency : service.getServiceDependencies()) { if (services.get(dependency) == null) { throw new ServerException(ServerException.ERROR.S10, service.getClass(), dependency); } } } }
3.68